1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "PPCTargetTransformInfo.h"
10 #include "llvm/Analysis/CodeMetrics.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/CodeGen/CostTable.h"
14 #include "llvm/CodeGen/TargetLowering.h"
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Support/CommandLine.h"
17 #include "llvm/Support/Debug.h"
20 #define DEBUG_TYPE "ppctti"
22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
25 // This is currently only used for the data prefetch pass which is only enabled
26 // for BG/Q by default.
27 static cl::opt<unsigned>
28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
29 cl::desc("The loop prefetch cache line size"));
32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
33 cl::desc("Enable using coldcc calling conv for cold "
34 "internal functions"));
37 LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
38 cl::desc("Do not add instruction count to lsr cost model"));
40 // The latency of mtctr is only justified if there are more than 4
41 // comparisons that will be removed as a result.
42 static cl::opt<unsigned>
43 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
44 cl::desc("Loops with a constant trip count smaller than "
45 "this value will not use the count register."));
47 //===----------------------------------------------------------------------===//
51 //===----------------------------------------------------------------------===//
53 TargetTransformInfo::PopcntSupportKind
54 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
55 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
56 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
57 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
58 TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
59 return TTI::PSK_Software;
62 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
63 TTI::TargetCostKind CostKind) {
64 if (DisablePPCConstHoist)
65 return BaseT::getIntImmCost(Imm, Ty, CostKind);
67 assert(Ty->isIntegerTy());
69 unsigned BitSize = Ty->getPrimitiveSizeInBits();
76 if (Imm.getBitWidth() <= 64) {
77 if (isInt<16>(Imm.getSExtValue()))
78 return TTI::TCC_Basic;
80 if (isInt<32>(Imm.getSExtValue())) {
81 // A constant that can be materialized using lis.
82 if ((Imm.getZExtValue() & 0xFFFF) == 0)
83 return TTI::TCC_Basic;
85 return 2 * TTI::TCC_Basic;
89 return 4 * TTI::TCC_Basic;
92 int PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
93 const APInt &Imm, Type *Ty,
94 TTI::TargetCostKind CostKind) {
95 if (DisablePPCConstHoist)
96 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
98 assert(Ty->isIntegerTy());
100 unsigned BitSize = Ty->getPrimitiveSizeInBits();
106 return TTI::TCC_Free;
107 case Intrinsic::sadd_with_overflow:
108 case Intrinsic::uadd_with_overflow:
109 case Intrinsic::ssub_with_overflow:
110 case Intrinsic::usub_with_overflow:
111 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
112 return TTI::TCC_Free;
114 case Intrinsic::experimental_stackmap:
115 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
116 return TTI::TCC_Free;
118 case Intrinsic::experimental_patchpoint_void:
119 case Intrinsic::experimental_patchpoint_i64:
120 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
121 return TTI::TCC_Free;
124 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
127 int PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
128 const APInt &Imm, Type *Ty,
129 TTI::TargetCostKind CostKind) {
130 if (DisablePPCConstHoist)
131 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind);
133 assert(Ty->isIntegerTy());
135 unsigned BitSize = Ty->getPrimitiveSizeInBits();
139 unsigned ImmIdx = ~0U;
140 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
144 return TTI::TCC_Free;
145 case Instruction::GetElementPtr:
146 // Always hoist the base address of a GetElementPtr. This prevents the
147 // creation of new constants for every base constant that gets constant
148 // folded with the offset.
150 return 2 * TTI::TCC_Basic;
151 return TTI::TCC_Free;
152 case Instruction::And:
153 RunFree = true; // (for the rotate-and-mask instructions)
155 case Instruction::Add:
156 case Instruction::Or:
157 case Instruction::Xor:
160 case Instruction::Sub:
161 case Instruction::Mul:
162 case Instruction::Shl:
163 case Instruction::LShr:
164 case Instruction::AShr:
167 case Instruction::ICmp:
170 // Zero comparisons can use record-form instructions.
172 case Instruction::Select:
175 case Instruction::PHI:
176 case Instruction::Call:
177 case Instruction::Ret:
178 case Instruction::Load:
179 case Instruction::Store:
183 if (ZeroFree && Imm == 0)
184 return TTI::TCC_Free;
186 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
187 if (isInt<16>(Imm.getSExtValue()))
188 return TTI::TCC_Free;
191 if (Imm.getBitWidth() <= 32 &&
192 (isShiftedMask_32(Imm.getZExtValue()) ||
193 isShiftedMask_32(~Imm.getZExtValue())))
194 return TTI::TCC_Free;
197 (isShiftedMask_64(Imm.getZExtValue()) ||
198 isShiftedMask_64(~Imm.getZExtValue())))
199 return TTI::TCC_Free;
202 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
203 return TTI::TCC_Free;
205 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
206 return TTI::TCC_Free;
209 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
213 PPCTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
214 TTI::TargetCostKind CostKind) {
215 // We already implement getCastInstrCost and getMemoryOpCost where we perform
216 // the vector adjustment there.
217 if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
218 return BaseT::getUserCost(U, Operands, CostKind);
220 if (U->getType()->isVectorTy()) {
221 // Instructions that need to be split should cost more.
222 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
223 return LT.first * BaseT::getUserCost(U, Operands, CostKind);
226 return BaseT::getUserCost(U, Operands, CostKind);
229 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
230 SmallPtrSetImpl<const Value *> &Visited) {
231 const PPCTargetMachine &TM = ST->getTargetMachine();
233 // Loop through the inline asm constraints and look for something that
235 auto asmClobbersCTR = [](InlineAsm *IA) {
236 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
237 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
238 InlineAsm::ConstraintInfo &C = CIV[i];
239 if (C.Type != InlineAsm::isInput)
240 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
241 if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
247 // Determining the address of a TLS variable results in a function call in
248 // certain TLS models.
249 std::function<bool(const Value *)> memAddrUsesCTR =
250 [&memAddrUsesCTR, &TM, &Visited](const Value *MemAddr) -> bool {
251 // No need to traverse again if we already checked this operand.
252 if (!Visited.insert(MemAddr).second)
254 const auto *GV = dyn_cast<GlobalValue>(MemAddr);
256 // Recurse to check for constants that refer to TLS global variables.
257 if (const auto *CV = dyn_cast<Constant>(MemAddr))
258 for (const auto &CO : CV->operands())
259 if (memAddrUsesCTR(CO))
265 if (!GV->isThreadLocal())
267 TLSModel::Model Model = TM.getTLSModel(GV);
268 return Model == TLSModel::GeneralDynamic ||
269 Model == TLSModel::LocalDynamic;
272 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
273 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
274 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
279 for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
281 if (CallInst *CI = dyn_cast<CallInst>(J)) {
282 // Inline ASM is okay, unless it clobbers the ctr register.
283 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
284 if (asmClobbersCTR(IA))
289 if (Function *F = CI->getCalledFunction()) {
290 // Most intrinsics don't become function calls, but some might.
291 // sin, cos, exp and log are always calls.
293 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
294 switch (F->getIntrinsicID()) {
296 // If we have a call to loop_decrement or set_loop_iterations,
297 // we're definitely using CTR.
298 case Intrinsic::set_loop_iterations:
299 case Intrinsic::loop_decrement:
302 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
303 // because, although it does clobber the counter register, the
304 // control can't then return to inside the loop unless there is also
305 // an eh_sjlj_setjmp.
306 case Intrinsic::eh_sjlj_setjmp:
308 case Intrinsic::memcpy:
309 case Intrinsic::memmove:
310 case Intrinsic::memset:
311 case Intrinsic::powi:
313 case Intrinsic::log2:
314 case Intrinsic::log10:
316 case Intrinsic::exp2:
321 case Intrinsic::copysign:
322 if (CI->getArgOperand(0)->getType()->getScalarType()->
326 continue; // ISD::FCOPYSIGN is never a library call.
327 case Intrinsic::fma: Opcode = ISD::FMA; break;
328 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
329 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
330 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
331 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
332 case Intrinsic::rint: Opcode = ISD::FRINT; break;
333 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
334 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
335 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
336 case Intrinsic::round: Opcode = ISD::FROUND; break;
337 case Intrinsic::lround: Opcode = ISD::LROUND; break;
338 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
339 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break;
340 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break;
341 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break;
342 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break;
346 // PowerPC does not use [US]DIVREM or other library calls for
347 // operations on regular types which are not otherwise library calls
348 // (i.e. soft float or atomics). If adapting for targets that do,
349 // additional care is required here.
352 if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
353 LibInfo->getLibFunc(F->getName(), Func) &&
354 LibInfo->hasOptimizedCodeGen(Func)) {
355 // Non-read-only functions are never treated as intrinsics.
356 if (!CI->onlyReadsMemory())
359 // Conversion happens only for FP calls.
360 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
364 default: return true;
365 case LibFunc_copysign:
366 case LibFunc_copysignf:
367 continue; // ISD::FCOPYSIGN is never a library call.
368 case LibFunc_copysignl:
373 continue; // ISD::FABS is never a library call.
377 Opcode = ISD::FSQRT; break;
381 Opcode = ISD::FFLOOR; break;
382 case LibFunc_nearbyint:
383 case LibFunc_nearbyintf:
384 case LibFunc_nearbyintl:
385 Opcode = ISD::FNEARBYINT; break;
389 Opcode = ISD::FCEIL; break;
393 Opcode = ISD::FRINT; break;
397 Opcode = ISD::FROUND; break;
401 Opcode = ISD::FTRUNC; break;
405 Opcode = ISD::FMINNUM; break;
409 Opcode = ISD::FMAXNUM; break;
415 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
417 if (EVTy == MVT::Other)
420 if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
422 else if (EVTy.isVector() &&
423 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
431 } else if (isa<BinaryOperator>(J) &&
432 (J->getType()->getScalarType()->isFP128Ty() ||
433 J->getType()->getScalarType()->isPPC_FP128Ty())) {
434 // Most operations on f128 or ppc_f128 values become calls.
436 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
437 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
438 CastInst *CI = cast<CastInst>(J);
439 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
440 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
441 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
442 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
444 } else if (isLargeIntegerTy(!TM.isPPC64(),
445 J->getType()->getScalarType()) &&
446 (J->getOpcode() == Instruction::UDiv ||
447 J->getOpcode() == Instruction::SDiv ||
448 J->getOpcode() == Instruction::URem ||
449 J->getOpcode() == Instruction::SRem)) {
451 } else if (!TM.isPPC64() &&
452 isLargeIntegerTy(false, J->getType()->getScalarType()) &&
453 (J->getOpcode() == Instruction::Shl ||
454 J->getOpcode() == Instruction::AShr ||
455 J->getOpcode() == Instruction::LShr)) {
456 // Only on PPC32, for 128-bit integers (specifically not 64-bit
457 // integers), these might be runtime calls.
459 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
460 // On PowerPC, indirect jumps use the counter register.
462 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
463 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
467 // FREM is always a call.
468 if (J->getOpcode() == Instruction::FRem)
471 if (ST->useSoftFloat()) {
472 switch(J->getOpcode()) {
473 case Instruction::FAdd:
474 case Instruction::FSub:
475 case Instruction::FMul:
476 case Instruction::FDiv:
477 case Instruction::FPTrunc:
478 case Instruction::FPExt:
479 case Instruction::FPToUI:
480 case Instruction::FPToSI:
481 case Instruction::UIToFP:
482 case Instruction::SIToFP:
483 case Instruction::FCmp:
488 for (Value *Operand : J->operands())
489 if (memAddrUsesCTR(Operand))
496 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
498 TargetLibraryInfo *LibInfo,
499 HardwareLoopInfo &HWLoopInfo) {
500 const PPCTargetMachine &TM = ST->getTargetMachine();
501 TargetSchedModel SchedModel;
504 // Do not convert small short loops to CTR loop.
505 unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
506 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
507 SmallPtrSet<const Value *, 32> EphValues;
508 CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
510 for (BasicBlock *BB : L->blocks())
511 Metrics.analyzeBasicBlock(BB, *this, EphValues);
512 // 6 is an approximate latency for the mtctr instruction.
513 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
517 // We don't want to spill/restore the counter register, and so we don't
518 // want to use the counter register if the loop contains calls.
519 SmallPtrSet<const Value *, 4> Visited;
520 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
522 if (mightUseCTR(*I, LibInfo, Visited))
525 SmallVector<BasicBlock*, 4> ExitingBlocks;
526 L->getExitingBlocks(ExitingBlocks);
528 // If there is an exit edge known to be frequently taken,
529 // we should not transform this loop.
530 for (auto &BB : ExitingBlocks) {
531 Instruction *TI = BB->getTerminator();
534 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
535 uint64_t TrueWeight = 0, FalseWeight = 0;
536 if (!BI->isConditional() ||
537 !BI->extractProfMetadata(TrueWeight, FalseWeight))
540 // If the exit path is more frequent than the loop path,
541 // we return here without further analysis for this loop.
542 bool TrueIsExit = !L->contains(BI->getSuccessor(0));
543 if (( TrueIsExit && FalseWeight < TrueWeight) ||
544 (!TrueIsExit && FalseWeight > TrueWeight))
549 LLVMContext &C = L->getHeader()->getContext();
550 HWLoopInfo.CountType = TM.isPPC64() ?
551 Type::getInt64Ty(C) : Type::getInt32Ty(C);
552 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
556 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
557 TTI::UnrollingPreferences &UP) {
558 if (ST->getCPUDirective() == PPC::DIR_A2) {
559 // The A2 is in-order with a deep pipeline, and concatenation unrolling
560 // helps expose latency-hiding opportunities to the instruction scheduler.
561 UP.Partial = UP.Runtime = true;
563 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
564 // often outweigh the cost of a division to compute the trip count.
565 UP.AllowExpensiveTripCount = true;
568 BaseT::getUnrollingPreferences(L, SE, UP);
571 void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
572 TTI::PeelingPreferences &PP) {
573 BaseT::getPeelingPreferences(L, SE, PP);
575 // This function returns true to allow using coldcc calling convention.
576 // Returning true results in coldcc being used for functions which are cold at
577 // all call sites when the callers of the functions are not calling any other
578 // non coldcc functions.
579 bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
580 return EnablePPCColdCC;
583 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
584 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
585 // on combining the loads generated for consecutive accesses, and failure to
586 // do so is particularly expensive. This makes it much more likely (compared
587 // to only using concatenation unrolling).
588 if (ST->getCPUDirective() == PPC::DIR_A2)
591 return LoopHasReductions;
594 PPCTTIImpl::TTI::MemCmpExpansionOptions
595 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
596 TTI::MemCmpExpansionOptions Options;
597 Options.LoadSizes = {8, 4, 2, 1};
598 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
602 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
606 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
607 assert(ClassID == GPRRC || ClassID == FPRRC ||
608 ClassID == VRRC || ClassID == VSXRC);
610 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
611 return ClassID == VSXRC ? 64 : 32;
613 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
617 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
619 return ST->hasVSX() ? VSXRC : VRRC;
620 else if (Ty && (Ty->getScalarType()->isFloatTy() ||
621 Ty->getScalarType()->isDoubleTy()))
622 return ST->hasVSX() ? VSXRC : FPRRC;
623 else if (Ty && (Ty->getScalarType()->isFP128Ty() ||
624 Ty->getScalarType()->isPPC_FP128Ty()))
626 else if (Ty && Ty->getScalarType()->isHalfTy())
632 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
636 llvm_unreachable("unknown register class");
637 return "PPC::unknown register class";
638 case GPRRC: return "PPC::GPRRC";
639 case FPRRC: return "PPC::FPRRC";
640 case VRRC: return "PPC::VRRC";
641 case VSXRC: return "PPC::VSXRC";
645 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
647 if (ST->hasQPX()) return 256;
648 if (ST->hasAltivec()) return 128;
658 unsigned PPCTTIImpl::getCacheLineSize() const {
659 // Check first if the user specified a custom line size.
660 if (CacheLineSize.getNumOccurrences() > 0)
661 return CacheLineSize;
663 // Starting with P7 we have a cache line size of 128.
664 unsigned Directive = ST->getCPUDirective();
665 // Assume that Future CPU has the same cache line size as the others.
666 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
667 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
668 Directive == PPC::DIR_PWR_FUTURE)
671 // On other processors return a default of 64 bytes.
675 unsigned PPCTTIImpl::getPrefetchDistance() const {
676 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
677 // default, only on the BG/Q).
681 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
682 unsigned Directive = ST->getCPUDirective();
683 // The 440 has no SIMD support, but floating-point instructions
684 // have a 5-cycle latency, so unroll by 5x for latency hiding.
685 if (Directive == PPC::DIR_440)
688 // The A2 has no SIMD support, but floating-point instructions
689 // have a 6-cycle latency, so unroll by 6x for latency hiding.
690 if (Directive == PPC::DIR_A2)
693 // FIXME: For lack of any better information, do no harm...
694 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
697 // For P7 and P8, floating-point instructions have a 6-cycle latency and
698 // there are two execution units, so unroll by 12x for latency hiding.
699 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
700 // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
701 // Assume that future is the same as the others.
702 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
703 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
704 Directive == PPC::DIR_PWR_FUTURE)
707 // For most things, modern systems have two execution units (and
708 // out-of-order execution).
712 // Adjust the cost of vector instructions on targets which there is overlap
713 // between the vector and scalar units, thereby reducing the overall throughput
714 // of vector code wrt. scalar code.
715 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1,
717 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
720 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
721 // If type legalization involves splitting the vector, we don't want to
722 // double the cost at every step - only the last step.
723 if (LT1.first != 1 || !LT1.second.isVector())
726 int ISD = TLI->InstructionOpcodeToISD(Opcode);
727 if (TLI->isOperationExpand(ISD, LT1.second))
731 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
732 if (LT2.first != 1 || !LT2.second.isVector())
739 int PPCTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
740 TTI::TargetCostKind CostKind,
741 TTI::OperandValueKind Op1Info,
742 TTI::OperandValueKind Op2Info,
743 TTI::OperandValueProperties Opd1PropInfo,
744 TTI::OperandValueProperties Opd2PropInfo,
745 ArrayRef<const Value *> Args,
746 const Instruction *CxtI) {
747 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
748 // TODO: Handle more cost kinds.
749 if (CostKind != TTI::TCK_RecipThroughput)
750 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
751 Op2Info, Opd1PropInfo,
752 Opd2PropInfo, Args, CxtI);
754 // Fallback to the default implementation.
755 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
757 Opd1PropInfo, Opd2PropInfo);
758 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
761 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
763 // Legalize the type.
764 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
766 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
767 // (at least in the sense that there need only be one non-loop-invariant
768 // instruction). We need one such shuffle instruction for each actual
769 // register (this is not true for arbitrary shuffles, but is true for the
770 // structured types of shuffles covered by TTI::ShuffleKind).
771 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
775 int PPCTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
776 if (CostKind != TTI::TCK_RecipThroughput)
777 return Opcode == Instruction::PHI ? 0 : 1;
778 // Branches are assumed to be predicted.
779 return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
782 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
783 TTI::TargetCostKind CostKind,
784 const Instruction *I) {
785 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
787 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I);
788 Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src);
789 // TODO: Allow non-throughput costs that aren't binary.
790 if (CostKind != TTI::TCK_RecipThroughput)
791 return Cost == 0 ? 0 : 1;
795 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
796 TTI::TargetCostKind CostKind,
797 const Instruction *I) {
798 int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
799 // TODO: Handle other cost kinds.
800 if (CostKind != TTI::TCK_RecipThroughput)
802 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
805 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
806 assert(Val->isVectorTy() && "This must be a vector type");
808 int ISD = TLI->InstructionOpcodeToISD(Opcode);
809 assert(ISD && "Invalid opcode");
811 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
812 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
814 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
815 // Double-precision scalars are already located in index #0 (or #1 if LE).
816 if (ISD == ISD::EXTRACT_VECTOR_ELT &&
817 Index == (ST->isLittleEndian() ? 1 : 0))
822 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
823 // Floating point scalars are already located in index #0.
829 } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
830 if (ST->hasP9Altivec()) {
831 if (ISD == ISD::INSERT_VECTOR_ELT)
832 // A move-to VSR and a permute/insert. Assume vector operation cost
833 // for both (cost will be 2x on P9).
834 return vectorCostAdjustment(2, Opcode, Val, nullptr);
836 // It's an extract. Maybe we can do a cheap move-from VSR.
837 unsigned EltSize = Val->getScalarSizeInBits();
839 unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
840 if (Index == MfvsrdIndex)
842 } else if (EltSize == 32) {
843 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
844 if (Index == MfvsrwzIndex)
848 // We need a vector extract (or mfvsrld). Assume vector operation cost.
849 // The cost of the load constant for a vector extract is disregarded
850 // (invariant, easily schedulable).
851 return vectorCostAdjustment(1, Opcode, Val, nullptr);
853 } else if (ST->hasDirectMove())
854 // Assume permute has standard cost.
855 // Assume move-to/move-from VSR have 2x standard cost.
859 // Estimated cost of a load-hit-store delay. This was obtained
860 // experimentally as a minimum needed to prevent unprofitable
861 // vectorization for the paq8p benchmark. It may need to be
862 // raised further if other unprofitable cases remain.
863 unsigned LHSPenalty = 2;
864 if (ISD == ISD::INSERT_VECTOR_ELT)
867 // Vector element insert/extract with Altivec is very expensive,
868 // because they require store and reload with the attendant
869 // processor stall for load-hit-store. Until VSX is available,
870 // these need to be estimated as very costly.
871 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
872 ISD == ISD::INSERT_VECTOR_ELT)
873 return LHSPenalty + Cost;
878 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
879 MaybeAlign Alignment, unsigned AddressSpace,
880 TTI::TargetCostKind CostKind,
881 const Instruction *I) {
882 if (TLI->getValueType(DL, Src, true) == MVT::Other)
883 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
885 // Legalize the type.
886 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
887 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
890 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
892 // TODO: Handle other cost kinds.
893 if (CostKind != TTI::TCK_RecipThroughput)
896 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
898 bool IsAltivecType = ST->hasAltivec() &&
899 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
900 LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
901 bool IsVSXType = ST->hasVSX() &&
902 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
903 bool IsQPXType = ST->hasQPX() &&
904 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
906 // VSX has 32b/64b load instructions. Legalization can handle loading of
907 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
908 // PPCTargetLowering can't compute the cost appropriately. So here we
909 // explicitly check this case.
910 unsigned MemBytes = Src->getPrimitiveSizeInBits();
911 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
912 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
915 // Aligned loads and stores are easy.
916 unsigned SrcBytes = LT.second.getStoreSize();
917 if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
920 // If we can use the permutation-based load sequence, then this is also
921 // relatively cheap (not counting loop-invariant instructions): one load plus
922 // one permute (the last load in a series has extra cost, but we're
923 // neglecting that here). Note that on the P7, we could do unaligned loads
924 // for Altivec types using the VSX instructions, but that's more expensive
925 // than using the permutation-based load sequence. On the P8, that's no
927 if (Opcode == Instruction::Load &&
928 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
929 *Alignment >= LT.second.getScalarType().getStoreSize())
930 return Cost + LT.first; // Add the cost of the permutations.
932 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
933 // P7, unaligned vector loads are more expensive than the permutation-based
934 // load sequence, so that might be used instead, but regardless, the net cost
935 // is about the same (not counting loop-invariant instructions).
936 if (IsVSXType || (ST->hasVSX() && IsAltivecType))
939 // Newer PPC supports unaligned memory access.
940 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
943 // PPC in general does not support unaligned loads and stores. They'll need
944 // to be decomposed based on the alignment factor.
946 // Add the cost of each scalar load or store.
948 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
950 // For a vector type, there is also scalarization overhead (only for
951 // stores, loads are expanded using the vector-load + permutation sequence,
952 // which is much less expensive).
953 if (Src->isVectorTy() && Opcode == Instruction::Store)
954 for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
956 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
961 int PPCTTIImpl::getInterleavedMemoryOpCost(
962 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
963 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
964 bool UseMaskForCond, bool UseMaskForGaps) {
965 if (UseMaskForCond || UseMaskForGaps)
966 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
967 Alignment, AddressSpace, CostKind,
968 UseMaskForCond, UseMaskForGaps);
970 assert(isa<VectorType>(VecTy) &&
971 "Expect a vector type for interleaved memory op");
973 // Legalize the type.
974 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
976 // Firstly, the cost of load/store operation.
978 getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
981 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
982 // (at least in the sense that there need only be one non-loop-invariant
983 // instruction). For each result vector, we need one shuffle per incoming
984 // vector (except that the first shuffle can take two incoming vectors
985 // because it does not need to take itself).
986 Cost += Factor*(LT.first-1);
991 unsigned PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
992 TTI::TargetCostKind CostKind) {
993 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
996 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
997 LoopInfo *LI, DominatorTree *DT,
998 AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
999 // Process nested loops first.
1000 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
1001 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
1002 return false; // Stop search.
1004 HardwareLoopInfo HWLoopInfo(L);
1006 if (!HWLoopInfo.canAnalyze(*LI))
1009 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
1012 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
1015 *BI = HWLoopInfo.ExitBranch;
1019 bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1020 TargetTransformInfo::LSRCost &C2) {
1021 // PowerPC default behaviour here is "instruction number 1st priority".
1022 // If LsrNoInsnsCost is set, call default implementation.
1023 if (!LsrNoInsnsCost)
1024 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
1025 C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1026 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
1027 C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1029 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);