1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements a TargetTransformInfo analysis pass specific to the
10 // SystemZ target machine. It uses the target's detailed information to provide
11 // more precise answers to certain TTI queries, while letting the target
12 // independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
16 #include "SystemZTargetTransformInfo.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/BasicTTIImpl.h"
19 #include "llvm/CodeGen/CostTable.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
25 #define DEBUG_TYPE "systemztti"
27 //===----------------------------------------------------------------------===//
29 // SystemZ cost model.
31 //===----------------------------------------------------------------------===//
33 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
34 TTI::TargetCostKind CostKind) {
35 assert(Ty->isIntegerTy());
37 unsigned BitSize = Ty->getPrimitiveSizeInBits();
38 // There is no cost model for constants with a bit size of 0. Return TCC_Free
39 // here, so that constant hoisting will ignore this constant.
42 // No cost model for operations on integers larger than 64 bit implemented yet.
49 if (Imm.getBitWidth() <= 64) {
50 // Constants loaded via lgfi.
51 if (isInt<32>(Imm.getSExtValue()))
52 return TTI::TCC_Basic;
53 // Constants loaded via llilf.
54 if (isUInt<32>(Imm.getZExtValue()))
55 return TTI::TCC_Basic;
56 // Constants loaded via llihf:
57 if ((Imm.getZExtValue() & 0xffffffff) == 0)
58 return TTI::TCC_Basic;
60 return 2 * TTI::TCC_Basic;
63 return 4 * TTI::TCC_Basic;
66 int SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
67 const APInt &Imm, Type *Ty,
68 TTI::TargetCostKind CostKind) {
69 assert(Ty->isIntegerTy());
71 unsigned BitSize = Ty->getPrimitiveSizeInBits();
72 // There is no cost model for constants with a bit size of 0. Return TCC_Free
73 // here, so that constant hoisting will ignore this constant.
76 // No cost model for operations on integers larger than 64 bit implemented yet.
83 case Instruction::GetElementPtr:
84 // Always hoist the base address of a GetElementPtr. This prevents the
85 // creation of new constants for every base constant that gets constant
86 // folded with the offset.
88 return 2 * TTI::TCC_Basic;
90 case Instruction::Store:
91 if (Idx == 0 && Imm.getBitWidth() <= 64) {
92 // Any 8-bit immediate store can by implemented via mvi.
95 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
96 if (isInt<16>(Imm.getSExtValue()))
100 case Instruction::ICmp:
101 if (Idx == 1 && Imm.getBitWidth() <= 64) {
102 // Comparisons against signed 32-bit immediates implemented via cgfi.
103 if (isInt<32>(Imm.getSExtValue()))
104 return TTI::TCC_Free;
105 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
106 if (isUInt<32>(Imm.getZExtValue()))
107 return TTI::TCC_Free;
110 case Instruction::Add:
111 case Instruction::Sub:
112 if (Idx == 1 && Imm.getBitWidth() <= 64) {
113 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
114 if (isUInt<32>(Imm.getZExtValue()))
115 return TTI::TCC_Free;
116 // Or their negation, by swapping addition vs. subtraction.
117 if (isUInt<32>(-Imm.getSExtValue()))
118 return TTI::TCC_Free;
121 case Instruction::Mul:
122 if (Idx == 1 && Imm.getBitWidth() <= 64) {
123 // We use msgfi to multiply by 32-bit signed immediates.
124 if (isInt<32>(Imm.getSExtValue()))
125 return TTI::TCC_Free;
128 case Instruction::Or:
129 case Instruction::Xor:
130 if (Idx == 1 && Imm.getBitWidth() <= 64) {
131 // Masks supported by oilf/xilf.
132 if (isUInt<32>(Imm.getZExtValue()))
133 return TTI::TCC_Free;
134 // Masks supported by oihf/xihf.
135 if ((Imm.getZExtValue() & 0xffffffff) == 0)
136 return TTI::TCC_Free;
139 case Instruction::And:
140 if (Idx == 1 && Imm.getBitWidth() <= 64) {
141 // Any 32-bit AND operation can by implemented via nilf.
143 return TTI::TCC_Free;
144 // 64-bit masks supported by nilf.
145 if (isUInt<32>(~Imm.getZExtValue()))
146 return TTI::TCC_Free;
147 // 64-bit masks supported by nilh.
148 if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
149 return TTI::TCC_Free;
150 // Some 64-bit AND operations can be implemented via risbg.
151 const SystemZInstrInfo *TII = ST->getInstrInfo();
153 if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
154 return TTI::TCC_Free;
157 case Instruction::Shl:
158 case Instruction::LShr:
159 case Instruction::AShr:
160 // Always return TCC_Free for the shift value of a shift instruction.
162 return TTI::TCC_Free;
164 case Instruction::UDiv:
165 case Instruction::SDiv:
166 case Instruction::URem:
167 case Instruction::SRem:
168 case Instruction::Trunc:
169 case Instruction::ZExt:
170 case Instruction::SExt:
171 case Instruction::IntToPtr:
172 case Instruction::PtrToInt:
173 case Instruction::BitCast:
174 case Instruction::PHI:
175 case Instruction::Call:
176 case Instruction::Select:
177 case Instruction::Ret:
178 case Instruction::Load:
182 return SystemZTTIImpl::getIntImmCost(Imm, Ty, CostKind);
185 int SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
186 const APInt &Imm, Type *Ty,
187 TTI::TargetCostKind CostKind) {
188 assert(Ty->isIntegerTy());
190 unsigned BitSize = Ty->getPrimitiveSizeInBits();
191 // There is no cost model for constants with a bit size of 0. Return TCC_Free
192 // here, so that constant hoisting will ignore this constant.
194 return TTI::TCC_Free;
195 // No cost model for operations on integers larger than 64 bit implemented yet.
197 return TTI::TCC_Free;
201 return TTI::TCC_Free;
202 case Intrinsic::sadd_with_overflow:
203 case Intrinsic::uadd_with_overflow:
204 case Intrinsic::ssub_with_overflow:
205 case Intrinsic::usub_with_overflow:
206 // These get expanded to include a normal addition/subtraction.
207 if (Idx == 1 && Imm.getBitWidth() <= 64) {
208 if (isUInt<32>(Imm.getZExtValue()))
209 return TTI::TCC_Free;
210 if (isUInt<32>(-Imm.getSExtValue()))
211 return TTI::TCC_Free;
214 case Intrinsic::smul_with_overflow:
215 case Intrinsic::umul_with_overflow:
216 // These get expanded to include a normal multiplication.
217 if (Idx == 1 && Imm.getBitWidth() <= 64) {
218 if (isInt<32>(Imm.getSExtValue()))
219 return TTI::TCC_Free;
222 case Intrinsic::experimental_stackmap:
223 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
224 return TTI::TCC_Free;
226 case Intrinsic::experimental_patchpoint_void:
227 case Intrinsic::experimental_patchpoint_i64:
228 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
229 return TTI::TCC_Free;
232 return SystemZTTIImpl::getIntImmCost(Imm, Ty, CostKind);
235 TargetTransformInfo::PopcntSupportKind
236 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
237 assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
238 if (ST->hasPopulationCount() && TyWidth <= 64)
239 return TTI::PSK_FastHardware;
240 return TTI::PSK_Software;
243 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
244 TTI::UnrollingPreferences &UP) {
245 // Find out if L contains a call, what the machine instruction count
246 // estimate is, and how many stores there are.
247 bool HasCall = false;
248 unsigned NumStores = 0;
249 for (auto &BB : L->blocks())
250 for (auto &I : *BB) {
251 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
252 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
253 if (isLoweredToCall(F))
255 if (F->getIntrinsicID() == Intrinsic::memcpy ||
256 F->getIntrinsicID() == Intrinsic::memset)
258 } else { // indirect call.
262 if (isa<StoreInst>(&I)) {
263 Type *MemAccessTy = I.getOperand(0)->getType();
264 NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, None, 0,
265 TTI::TCK_RecipThroughput);
269 // The z13 processor will run out of store tags if too many stores
270 // are fed into it too quickly. Therefore make sure there are not
271 // too many stores in the resulting unrolled loop.
272 unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
275 // Only allow full unrolling if loop has any calls.
276 UP.FullUnrollMaxCount = Max;
282 if (UP.MaxCount <= 1)
285 // Allow partial and runtime trip count unrolling.
286 UP.Partial = UP.Runtime = true;
288 UP.PartialThreshold = 75;
289 UP.DefaultUnrollRuntimeCount = 4;
291 // Allow expensive instructions in the pre-header of the loop.
292 UP.AllowExpensiveTripCount = true;
297 void SystemZTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
298 TTI::PeelingPreferences &PP) {
299 BaseT::getPeelingPreferences(L, SE, PP);
302 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
303 TargetTransformInfo::LSRCost &C2) {
304 // SystemZ specific: check instruction count (first), and don't care about
305 // ImmCost, since offsets are checked explicitly.
306 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
307 C1.NumIVMuls, C1.NumBaseAdds,
308 C1.ScaleCost, C1.SetupCost) <
309 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
310 C2.NumIVMuls, C2.NumBaseAdds,
311 C2.ScaleCost, C2.SetupCost);
314 unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
315 bool Vector = (ClassID == 1);
317 // Discount the stack pointer. Also leave out %r0, since it can't
318 // be used in an address.
325 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const {
333 unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
334 unsigned NumStridedMemAccesses,
335 unsigned NumPrefetches,
336 bool HasCall) const {
337 // Don't prefetch a loop with many far apart accesses.
338 if (NumPrefetches > 16)
341 // Emit prefetch instructions for smaller strides in cases where we think
342 // the hardware prefetcher might not be able to keep up.
343 if (NumStridedMemAccesses > 32 &&
344 NumStridedMemAccesses == NumMemAccesses && !HasCall)
347 return ST->hasMiscellaneousExtensions3() ? 8192 : 2048;
350 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
351 EVT VT = TLI->getValueType(DL, DataType);
352 return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
355 // Return the bit size for the scalar type or vector element
356 // type. getScalarSizeInBits() returns 0 for a pointer type.
357 static unsigned getScalarSizeInBits(Type *Ty) {
359 (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
360 assert(Size > 0 && "Element must have non-zero size.");
364 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
365 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
367 static unsigned getNumVectorRegs(Type *Ty) {
368 auto *VTy = cast<FixedVectorType>(Ty);
369 unsigned WideBits = getScalarSizeInBits(Ty) * VTy->getNumElements();
370 assert(WideBits > 0 && "Could not compute size of vector");
371 return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
374 int SystemZTTIImpl::getArithmeticInstrCost(
375 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
376 TTI::OperandValueKind Op1Info,
377 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
378 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
379 const Instruction *CxtI) {
381 // TODO: Handle more cost kinds.
382 if (CostKind != TTI::TCK_RecipThroughput)
383 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
384 Op2Info, Opd1PropInfo,
385 Opd2PropInfo, Args, CxtI);
387 // TODO: return a good value for BB-VECTORIZER that includes the
388 // immediate loads, which we do not want to count for the loop
389 // vectorizer, since they are hopefully hoisted out of the loop. This
390 // would require a new parameter 'InLoop', but not sure if constant
391 // args are common enough to motivate this.
393 unsigned ScalarBits = Ty->getScalarSizeInBits();
395 // There are thre cases of division and remainder: Dividing with a register
396 // needs a divide instruction. A divisor which is a power of two constant
397 // can be implemented with a sequence of shifts. Any other constant needs a
398 // multiply and shifts.
399 const unsigned DivInstrCost = 20;
400 const unsigned DivMulSeqCost = 10;
401 const unsigned SDivPow2Cost = 4;
404 Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
405 bool UnsignedDivRem =
406 Opcode == Instruction::UDiv || Opcode == Instruction::URem;
408 // Check for a constant divisor.
409 bool DivRemConst = false;
410 bool DivRemConstPow2 = false;
411 if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
412 if (const Constant *C = dyn_cast<Constant>(Args[1])) {
413 const ConstantInt *CVal =
414 (C->getType()->isVectorTy()
415 ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
416 : dyn_cast<const ConstantInt>(C));
417 if (CVal != nullptr &&
418 (CVal->getValue().isPowerOf2() || (-CVal->getValue()).isPowerOf2()))
419 DivRemConstPow2 = true;
425 if (!Ty->isVectorTy()) {
426 // These FP operations are supported with a dedicated instruction for
427 // float, double and fp128 (base implementation assumes float generally
429 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
430 Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
433 // There is no native support for FRem.
434 if (Opcode == Instruction::FRem)
437 // Give discount for some combined logical operations if supported.
438 if (Args.size() == 2 && ST->hasMiscellaneousExtensions3()) {
439 if (Opcode == Instruction::Xor) {
440 for (const Value *A : Args) {
441 if (const Instruction *I = dyn_cast<Instruction>(A))
442 if (I->hasOneUse() &&
443 (I->getOpcode() == Instruction::And ||
444 I->getOpcode() == Instruction::Or ||
445 I->getOpcode() == Instruction::Xor))
449 else if (Opcode == Instruction::Or || Opcode == Instruction::And) {
450 for (const Value *A : Args) {
451 if (const Instruction *I = dyn_cast<Instruction>(A))
452 if (I->hasOneUse() && I->getOpcode() == Instruction::Xor)
458 // Or requires one instruction, although it has custom handling for i64.
459 if (Opcode == Instruction::Or)
462 if (Opcode == Instruction::Xor && ScalarBits == 1) {
463 if (ST->hasLoadStoreOnCond2())
464 return 5; // 2 * (li 0; loc 1); xor
465 return 7; // 2 * ipm sequences ; xor ; shift ; compare
469 return (SignedDivRem ? SDivPow2Cost : 1);
471 return DivMulSeqCost;
472 if (SignedDivRem || UnsignedDivRem)
475 else if (ST->hasVector()) {
476 auto *VTy = cast<FixedVectorType>(Ty);
477 unsigned VF = VTy->getNumElements();
478 unsigned NumVectors = getNumVectorRegs(Ty);
480 // These vector operations are custom handled, but are still supported
481 // with one instruction per vector, regardless of element size.
482 if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
483 Opcode == Instruction::AShr) {
488 return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
490 return VF * DivMulSeqCost + getScalarizationOverhead(VTy, Args);
491 if ((SignedDivRem || UnsignedDivRem) && VF > 4)
492 // Temporary hack: disable high vectorization factors with integer
493 // division/remainder, which will get scalarized and handled with
494 // GR128 registers. The mischeduler is not clever enough to avoid
498 // These FP operations are supported with a single vector instruction for
499 // double (base implementation assumes float generally costs 2). For
500 // FP128, the scalar cost is 1, and there is no overhead since the values
501 // are already in scalar registers.
502 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
503 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
504 switch (ScalarBits) {
506 // The vector enhancements facility 1 provides v4f32 instructions.
507 if (ST->hasVectorEnhancements1())
509 // Return the cost of multiple scalar invocation plus the cost of
510 // inserting and extracting the values.
511 unsigned ScalarCost =
512 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
513 unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(VTy, Args);
514 // FIXME: VF 2 for these FP operations are currently just as
515 // expensive as for VF 4.
528 // There is no native support for FRem.
529 if (Opcode == Instruction::FRem) {
530 unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(VTy, Args);
531 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
532 if (VF == 2 && ScalarBits == 32)
538 // Fallback to the default implementation.
539 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
540 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
543 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
544 int Index, VectorType *SubTp) {
545 if (ST->hasVector()) {
546 unsigned NumVectors = getNumVectorRegs(Tp);
548 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
550 // FP128 values are always in scalar registers, so there is no work
551 // involved with a shuffle, except for broadcast. In that case register
552 // moves are done with a single instruction per element.
553 if (Tp->getScalarType()->isFP128Ty())
554 return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
557 case TargetTransformInfo::SK_ExtractSubvector:
558 // ExtractSubvector Index indicates start offset.
560 // Extracting a subvector from first index is a noop.
561 return (Index == 0 ? 0 : NumVectors);
563 case TargetTransformInfo::SK_Broadcast:
564 // Loop vectorizer calls here to figure out the extra cost of
565 // broadcasting a loaded value to all elements of a vector. Since vlrep
566 // loads and replicates with a single instruction, adjust the returned
568 return NumVectors - 1;
572 // SystemZ supports single instruction permutation / replication.
577 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
580 // Return the log2 difference of the element sizes of the two vector types.
581 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
582 unsigned Bits0 = Ty0->getScalarSizeInBits();
583 unsigned Bits1 = Ty1->getScalarSizeInBits();
586 return (Log2_32(Bits1) - Log2_32(Bits0));
588 return (Log2_32(Bits0) - Log2_32(Bits1));
591 // Return the number of instructions needed to truncate SrcTy to DstTy.
592 unsigned SystemZTTIImpl::
593 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
594 assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
595 assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
596 "Packing must reduce size of vector type.");
597 assert(cast<FixedVectorType>(SrcTy)->getNumElements() ==
598 cast<FixedVectorType>(DstTy)->getNumElements() &&
599 "Packing should not change number of elements.");
601 // TODO: Since fp32 is expanded, the extract cost should always be 0.
603 unsigned NumParts = getNumVectorRegs(SrcTy);
605 // Up to 2 vector registers can be truncated efficiently with pack or
606 // permute. The latter requires an immediate mask to be loaded, which
607 // typically gets hoisted out of a loop. TODO: return a good value for
608 // BB-VECTORIZER that includes the immediate loads, which we do not want
609 // to count for the loop vectorizer.
613 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
614 unsigned VF = cast<FixedVectorType>(SrcTy)->getNumElements();
615 for (unsigned P = 0; P < Log2Diff; ++P) {
621 // Currently, a general mix of permutes and pack instructions is output by
622 // isel, which follow the cost computation above except for this case which
623 // is one instruction less:
624 if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
625 DstTy->getScalarSizeInBits() == 8)
631 // Return the cost of converting a vector bitmask produced by a compare
632 // (SrcTy), to the type of the select or extend instruction (DstTy).
633 unsigned SystemZTTIImpl::
634 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
635 assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
636 "Should only be called with vector types.");
638 unsigned PackCost = 0;
639 unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
640 unsigned DstScalarBits = DstTy->getScalarSizeInBits();
641 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
642 if (SrcScalarBits > DstScalarBits)
643 // The bitmask will be truncated.
644 PackCost = getVectorTruncCost(SrcTy, DstTy);
645 else if (SrcScalarBits < DstScalarBits) {
646 unsigned DstNumParts = getNumVectorRegs(DstTy);
647 // Each vector select needs its part of the bitmask unpacked.
648 PackCost = Log2Diff * DstNumParts;
649 // Extra cost for moving part of mask before unpacking.
650 PackCost += DstNumParts - 1;
656 // Return the type of the compared operands. This is needed to compute the
657 // cost for a Select / ZExt or SExt instruction.
658 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
659 Type *OpTy = nullptr;
660 if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
661 OpTy = CI->getOperand(0)->getType();
662 else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
663 if (LogicI->getNumOperands() == 2)
664 if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
665 if (isa<CmpInst>(LogicI->getOperand(1)))
666 OpTy = CI0->getOperand(0)->getType();
668 if (OpTy != nullptr) {
670 assert (!OpTy->isVectorTy() && "Expected scalar type");
673 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
674 // be either scalar or already vectorized with a same or lesser VF.
675 Type *ElTy = OpTy->getScalarType();
676 return FixedVectorType::get(ElTy, VF);
682 // Get the cost of converting a boolean vector to a vector with same width
683 // and element size as Dst, plus the cost of zero extending if needed.
684 unsigned SystemZTTIImpl::
685 getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
686 const Instruction *I) {
687 auto *DstVTy = cast<FixedVectorType>(Dst);
688 unsigned VF = DstVTy->getNumElements();
690 // If we know what the widths of the compared operands, get any cost of
691 // converting it to match Dst. Otherwise assume same widths.
692 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
693 if (CmpOpTy != nullptr)
694 Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
695 if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
696 // One 'vn' per dst vector with an immediate mask.
697 Cost += getNumVectorRegs(Dst);
701 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
702 TTI::TargetCostKind CostKind,
703 const Instruction *I) {
704 // FIXME: Can the logic below also be used for these cost kinds?
705 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) {
706 int BaseCost = BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I);
707 return BaseCost == 0 ? BaseCost : 1;
710 unsigned DstScalarBits = Dst->getScalarSizeInBits();
711 unsigned SrcScalarBits = Src->getScalarSizeInBits();
713 if (!Src->isVectorTy()) {
714 assert (!Dst->isVectorTy());
716 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
717 if (SrcScalarBits >= 32 ||
718 (I != nullptr && isa<LoadInst>(I->getOperand(0))))
720 return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
723 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
724 Src->isIntegerTy(1)) {
725 if (ST->hasLoadStoreOnCond2())
726 return 2; // li 0; loc 1
728 // This should be extension of a compare i1 result, which is done with
729 // ipm and a varying sequence of instructions.
731 if (Opcode == Instruction::SExt)
732 Cost = (DstScalarBits < 64 ? 3 : 4);
733 if (Opcode == Instruction::ZExt)
735 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
736 if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
737 // If operands of an fp-type was compared, this costs +1.
742 else if (ST->hasVector()) {
743 auto *SrcVecTy = cast<FixedVectorType>(Src);
744 auto *DstVecTy = cast<FixedVectorType>(Dst);
745 unsigned VF = SrcVecTy->getNumElements();
746 unsigned NumDstVectors = getNumVectorRegs(Dst);
747 unsigned NumSrcVectors = getNumVectorRegs(Src);
749 if (Opcode == Instruction::Trunc) {
750 if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
751 return 0; // Check for NOOP conversions.
752 return getVectorTruncCost(Src, Dst);
755 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
756 if (SrcScalarBits >= 8) {
757 // ZExt/SExt will be handled with one unpack per doubling of width.
758 unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
760 // For types that spans multiple vector registers, some additional
761 // instructions are used to setup the unpacking.
762 unsigned NumSrcVectorOps =
763 (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
764 : (NumDstVectors / 2));
766 return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
768 else if (SrcScalarBits == 1)
769 return getBoolVecToIntConversionCost(Opcode, Dst, I);
772 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
773 Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
774 // TODO: Fix base implementation which could simplify things a bit here
775 // (seems to miss on differentiating on scalar/vector types).
777 // Only 64 bit vector conversions are natively supported before z15.
778 if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {
779 if (SrcScalarBits == DstScalarBits)
780 return NumDstVectors;
782 if (SrcScalarBits == 1)
783 return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
786 // Return the cost of multiple scalar invocation plus the cost of
787 // inserting and extracting the values. Base implementation does not
788 // realize float->int gets scalarized.
789 unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(),
790 Src->getScalarType(), CostKind);
791 unsigned TotCost = VF * ScalarCost;
792 bool NeedsInserts = true, NeedsExtracts = true;
793 // FP128 registers do not get inserted or extracted.
794 if (DstScalarBits == 128 &&
795 (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
796 NeedsInserts = false;
797 if (SrcScalarBits == 128 &&
798 (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
799 NeedsExtracts = false;
801 TotCost += getScalarizationOverhead(SrcVecTy, false, NeedsExtracts);
802 TotCost += getScalarizationOverhead(DstVecTy, NeedsInserts, false);
804 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
805 if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
811 if (Opcode == Instruction::FPTrunc) {
812 if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements.
813 return VF /*ldxbr/lexbr*/ +
814 getScalarizationOverhead(DstVecTy, true, false);
815 else // double -> float
816 return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
819 if (Opcode == Instruction::FPExt) {
820 if (SrcScalarBits == 32 && DstScalarBits == 64) {
821 // float -> double is very rare and currently unoptimized. Instead of
822 // using vldeb, which can do two at a time, all conversions are
826 // -> fp128. VF * lxdb/lxeb + extraction of elements.
827 return VF + getScalarizationOverhead(SrcVecTy, false, true);
831 return BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I);
834 // Scalar i8 / i16 operations will typically be made after first extending
835 // the operands to i32.
836 static unsigned getOperandsExtensionCost(const Instruction *I) {
837 unsigned ExtCost = 0;
838 for (Value *Op : I->operands())
839 // A load of i8 or i16 sign/zero extends to i32.
840 if (!isa<LoadInst>(Op) && !isa<ConstantInt>(Op))
846 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
848 TTI::TargetCostKind CostKind,
849 const Instruction *I) {
850 if (CostKind != TTI::TCK_RecipThroughput)
851 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind);
853 if (!ValTy->isVectorTy()) {
855 case Instruction::ICmp: {
856 // A loaded value compared with 0 with multiple users becomes Load and
857 // Test. The load is then not foldable, so return 0 cost for the ICmp.
858 unsigned ScalarBits = ValTy->getScalarSizeInBits();
859 if (I != nullptr && ScalarBits >= 32)
860 if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
861 if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
862 if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
863 C->getZExtValue() == 0)
867 if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
868 Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
871 case Instruction::Select:
872 if (ValTy->isFloatingPointTy())
873 return 4; // No load on condition for FP - costs a conditional jump.
874 return 1; // Load On Condition / Select Register.
877 else if (ST->hasVector()) {
878 unsigned VF = cast<FixedVectorType>(ValTy)->getNumElements();
880 // Called with a compare instruction.
881 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
882 unsigned PredicateExtraCost = 0;
884 // Some predicates cost one or two extra instructions.
885 switch (cast<CmpInst>(I)->getPredicate()) {
886 case CmpInst::Predicate::ICMP_NE:
887 case CmpInst::Predicate::ICMP_UGE:
888 case CmpInst::Predicate::ICMP_ULE:
889 case CmpInst::Predicate::ICMP_SGE:
890 case CmpInst::Predicate::ICMP_SLE:
891 PredicateExtraCost = 1;
893 case CmpInst::Predicate::FCMP_ONE:
894 case CmpInst::Predicate::FCMP_ORD:
895 case CmpInst::Predicate::FCMP_UEQ:
896 case CmpInst::Predicate::FCMP_UNO:
897 PredicateExtraCost = 2;
904 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
905 // floats. FIXME: <2 x float> generates same code as <4 x float>.
906 unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
907 unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
909 unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
912 else { // Called with a select instruction.
913 assert (Opcode == Instruction::Select);
915 // We can figure out the extra cost of packing / unpacking if the
916 // instruction was passed and the compare instruction is found.
917 unsigned PackCost = 0;
918 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
919 if (CmpOpTy != nullptr)
921 getVectorBitmaskConversionCost(CmpOpTy, ValTy);
923 return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
927 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind);
931 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
932 // vlvgp will insert two grs into a vector register, so only count half the
933 // number of instructions.
934 if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
935 return ((Index % 2 == 0) ? 1 : 0);
937 if (Opcode == Instruction::ExtractElement) {
938 int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
940 // Give a slight penalty for moving out of vector pipeline to FXU unit.
941 if (Index == 0 && Val->isIntOrIntVectorTy())
947 return BaseT::getVectorInstrCost(Opcode, Val, Index);
950 // Check if a load may be folded as a memory operand in its user.
951 bool SystemZTTIImpl::
952 isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
953 if (!Ld->hasOneUse())
956 const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
957 unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
958 unsigned TruncBits = 0;
959 unsigned SExtBits = 0;
960 unsigned ZExtBits = 0;
961 if (UserI->hasOneUse()) {
962 unsigned UserBits = UserI->getType()->getScalarSizeInBits();
963 if (isa<TruncInst>(UserI))
964 TruncBits = UserBits;
965 else if (isa<SExtInst>(UserI))
967 else if (isa<ZExtInst>(UserI))
970 if (TruncBits || SExtBits || ZExtBits) {
972 UserI = cast<Instruction>(*UserI->user_begin());
973 // Load (single use) -> trunc/extend (single use) -> UserI
975 if ((UserI->getOpcode() == Instruction::Sub ||
976 UserI->getOpcode() == Instruction::SDiv ||
977 UserI->getOpcode() == Instruction::UDiv) &&
978 UserI->getOperand(1) != FoldedValue)
979 return false; // Not commutative, only RHS foldable.
980 // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
981 // extension was made of the load.
982 unsigned LoadOrTruncBits =
983 ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
984 switch (UserI->getOpcode()) {
985 case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
986 case Instruction::Sub:
987 case Instruction::ICmp:
988 if (LoadedBits == 32 && ZExtBits == 64)
991 case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
992 if (UserI->getOpcode() != Instruction::ICmp) {
993 if (LoadedBits == 16 &&
995 (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
997 if (LoadOrTruncBits == 16)
1001 case Instruction::SDiv:// SE: 32->64
1002 if (LoadedBits == 32 && SExtBits == 64)
1005 case Instruction::UDiv:
1006 case Instruction::And:
1007 case Instruction::Or:
1008 case Instruction::Xor:
1009 // This also makes sense for float operations, but disabled for now due
1011 // case Instruction::FCmp:
1012 // case Instruction::FAdd:
1013 // case Instruction::FSub:
1014 // case Instruction::FMul:
1015 // case Instruction::FDiv:
1017 // All possible extensions of memory checked above.
1019 // Comparison between memory and immediate.
1020 if (UserI->getOpcode() == Instruction::ICmp)
1021 if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
1022 if (isUInt<16>(CI->getZExtValue()))
1024 return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
1030 static bool isBswapIntrinsicCall(const Value *V) {
1031 if (const Instruction *I = dyn_cast<Instruction>(V))
1032 if (auto *CI = dyn_cast<CallInst>(I))
1033 if (auto *F = CI->getCalledFunction())
1034 if (F->getIntrinsicID() == Intrinsic::bswap)
1039 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1040 MaybeAlign Alignment, unsigned AddressSpace,
1041 TTI::TargetCostKind CostKind,
1042 const Instruction *I) {
1043 assert(!Src->isVoidTy() && "Invalid type");
1045 // TODO: Handle other cost kinds.
1046 if (CostKind != TTI::TCK_RecipThroughput)
1049 if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
1050 // Store the load or its truncated or extended value in FoldedValue.
1051 const Instruction *FoldedValue = nullptr;
1052 if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
1053 const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
1054 assert (UserI->getNumOperands() == 2 && "Expected a binop.");
1056 // UserI can't fold two loads, so in that case return 0 cost only
1057 // half of the time.
1058 for (unsigned i = 0; i < 2; ++i) {
1059 if (UserI->getOperand(i) == FoldedValue)
1062 if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
1063 LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
1065 (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
1066 isa<ZExtInst>(OtherOp)))
1067 OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
1068 if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
1069 return i == 0; // Both operands foldable.
1073 return 0; // Only I is foldable in user.
1078 (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
1080 // Store/Load reversed saves one instruction.
1081 if (((!Src->isVectorTy() && NumOps == 1) || ST->hasVectorEnhancements2()) &&
1083 if (Opcode == Instruction::Load && I->hasOneUse()) {
1084 const Instruction *LdUser = cast<Instruction>(*I->user_begin());
1085 // In case of load -> bswap -> store, return normal cost for the load.
1086 if (isBswapIntrinsicCall(LdUser) &&
1087 (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
1090 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
1091 const Value *StoredVal = SI->getValueOperand();
1092 if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
1097 if (Src->getScalarSizeInBits() == 128)
1098 // 128 bit scalars are held in a pair of two 64 bit registers.
1104 // The generic implementation of getInterleavedMemoryOpCost() is based on
1105 // adding costs of the memory operations plus all the extracts and inserts
1106 // needed for using / defining the vector operands. The SystemZ version does
1107 // roughly the same but bases the computations on vector permutations
1109 int SystemZTTIImpl::getInterleavedMemoryOpCost(
1110 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1111 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1112 bool UseMaskForCond, bool UseMaskForGaps) {
1113 if (UseMaskForCond || UseMaskForGaps)
1114 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1115 Alignment, AddressSpace, CostKind,
1116 UseMaskForCond, UseMaskForGaps);
1117 assert(isa<VectorType>(VecTy) &&
1118 "Expect a vector type for interleaved memory op");
1120 // Return the ceiling of dividing A by B.
1121 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
1123 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1124 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1125 unsigned VF = NumElts / Factor;
1126 unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
1127 unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
1128 unsigned NumPermutes = 0;
1130 if (Opcode == Instruction::Load) {
1131 // Loading interleave groups may have gaps, which may mean fewer
1132 // loads. Find out how many vectors will be loaded in total, and in how
1133 // many of them each value will be in.
1134 BitVector UsedInsts(NumVectorMemOps, false);
1135 std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
1136 for (unsigned Index : Indices)
1137 for (unsigned Elt = 0; Elt < VF; ++Elt) {
1138 unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
1140 ValueVecs[Index].set(Vec);
1142 NumVectorMemOps = UsedInsts.count();
1144 for (unsigned Index : Indices) {
1145 // Estimate that each loaded source vector containing this Index
1146 // requires one operation, except that vperm can handle two input
1147 // registers first time for each dst vector.
1148 unsigned NumSrcVecs = ValueVecs[Index].count();
1149 unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
1150 assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
1151 NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
1154 // Estimate the permutes for each stored vector as the smaller of the
1155 // number of elements and the number of source vectors. Subtract one per
1156 // dst vector for vperm (S.A.).
1157 unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
1158 unsigned NumDstVecs = NumVectorMemOps;
1159 assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
1160 NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
1163 // Cost of load/store operations and the permutations needed.
1164 return NumVectorMemOps + NumPermutes;
1167 static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy) {
1168 if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
1169 return getNumVectorRegs(RetTy); // VPERM
1173 int SystemZTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1174 TTI::TargetCostKind CostKind) {
1175 int Cost = getVectorIntrinsicInstrCost(ICA.getID(), ICA.getReturnType());
1178 return BaseT::getIntrinsicInstrCost(ICA, CostKind);