1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // SystemZ target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "SystemZTargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/CodeGen/CostTable.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/Support/Debug.h"
26 #define DEBUG_TYPE "systemztti"
28 //===----------------------------------------------------------------------===//
30 // SystemZ cost model.
32 //===----------------------------------------------------------------------===//
34 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
35 assert(Ty->isIntegerTy());
37 unsigned BitSize = Ty->getPrimitiveSizeInBits();
38 // There is no cost model for constants with a bit size of 0. Return TCC_Free
39 // here, so that constant hoisting will ignore this constant.
42 // No cost model for operations on integers larger than 64 bit implemented yet.
49 if (Imm.getBitWidth() <= 64) {
50 // Constants loaded via lgfi.
51 if (isInt<32>(Imm.getSExtValue()))
52 return TTI::TCC_Basic;
53 // Constants loaded via llilf.
54 if (isUInt<32>(Imm.getZExtValue()))
55 return TTI::TCC_Basic;
56 // Constants loaded via llihf:
57 if ((Imm.getZExtValue() & 0xffffffff) == 0)
58 return TTI::TCC_Basic;
60 return 2 * TTI::TCC_Basic;
63 return 4 * TTI::TCC_Basic;
66 int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
67 const APInt &Imm, Type *Ty) {
68 assert(Ty->isIntegerTy());
70 unsigned BitSize = Ty->getPrimitiveSizeInBits();
71 // There is no cost model for constants with a bit size of 0. Return TCC_Free
72 // here, so that constant hoisting will ignore this constant.
75 // No cost model for operations on integers larger than 64 bit implemented yet.
82 case Instruction::GetElementPtr:
83 // Always hoist the base address of a GetElementPtr. This prevents the
84 // creation of new constants for every base constant that gets constant
85 // folded with the offset.
87 return 2 * TTI::TCC_Basic;
89 case Instruction::Store:
90 if (Idx == 0 && Imm.getBitWidth() <= 64) {
91 // Any 8-bit immediate store can by implemented via mvi.
94 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
95 if (isInt<16>(Imm.getSExtValue()))
99 case Instruction::ICmp:
100 if (Idx == 1 && Imm.getBitWidth() <= 64) {
101 // Comparisons against signed 32-bit immediates implemented via cgfi.
102 if (isInt<32>(Imm.getSExtValue()))
103 return TTI::TCC_Free;
104 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
105 if (isUInt<32>(Imm.getZExtValue()))
106 return TTI::TCC_Free;
109 case Instruction::Add:
110 case Instruction::Sub:
111 if (Idx == 1 && Imm.getBitWidth() <= 64) {
112 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
113 if (isUInt<32>(Imm.getZExtValue()))
114 return TTI::TCC_Free;
115 // Or their negation, by swapping addition vs. subtraction.
116 if (isUInt<32>(-Imm.getSExtValue()))
117 return TTI::TCC_Free;
120 case Instruction::Mul:
121 if (Idx == 1 && Imm.getBitWidth() <= 64) {
122 // We use msgfi to multiply by 32-bit signed immediates.
123 if (isInt<32>(Imm.getSExtValue()))
124 return TTI::TCC_Free;
127 case Instruction::Or:
128 case Instruction::Xor:
129 if (Idx == 1 && Imm.getBitWidth() <= 64) {
130 // Masks supported by oilf/xilf.
131 if (isUInt<32>(Imm.getZExtValue()))
132 return TTI::TCC_Free;
133 // Masks supported by oihf/xihf.
134 if ((Imm.getZExtValue() & 0xffffffff) == 0)
135 return TTI::TCC_Free;
138 case Instruction::And:
139 if (Idx == 1 && Imm.getBitWidth() <= 64) {
140 // Any 32-bit AND operation can by implemented via nilf.
142 return TTI::TCC_Free;
143 // 64-bit masks supported by nilf.
144 if (isUInt<32>(~Imm.getZExtValue()))
145 return TTI::TCC_Free;
146 // 64-bit masks supported by nilh.
147 if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
148 return TTI::TCC_Free;
149 // Some 64-bit AND operations can be implemented via risbg.
150 const SystemZInstrInfo *TII = ST->getInstrInfo();
152 if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
153 return TTI::TCC_Free;
156 case Instruction::Shl:
157 case Instruction::LShr:
158 case Instruction::AShr:
159 // Always return TCC_Free for the shift value of a shift instruction.
161 return TTI::TCC_Free;
163 case Instruction::UDiv:
164 case Instruction::SDiv:
165 case Instruction::URem:
166 case Instruction::SRem:
167 case Instruction::Trunc:
168 case Instruction::ZExt:
169 case Instruction::SExt:
170 case Instruction::IntToPtr:
171 case Instruction::PtrToInt:
172 case Instruction::BitCast:
173 case Instruction::PHI:
174 case Instruction::Call:
175 case Instruction::Select:
176 case Instruction::Ret:
177 case Instruction::Load:
181 return SystemZTTIImpl::getIntImmCost(Imm, Ty);
184 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
185 const APInt &Imm, Type *Ty) {
186 assert(Ty->isIntegerTy());
188 unsigned BitSize = Ty->getPrimitiveSizeInBits();
189 // There is no cost model for constants with a bit size of 0. Return TCC_Free
190 // here, so that constant hoisting will ignore this constant.
192 return TTI::TCC_Free;
193 // No cost model for operations on integers larger than 64 bit implemented yet.
195 return TTI::TCC_Free;
199 return TTI::TCC_Free;
200 case Intrinsic::sadd_with_overflow:
201 case Intrinsic::uadd_with_overflow:
202 case Intrinsic::ssub_with_overflow:
203 case Intrinsic::usub_with_overflow:
204 // These get expanded to include a normal addition/subtraction.
205 if (Idx == 1 && Imm.getBitWidth() <= 64) {
206 if (isUInt<32>(Imm.getZExtValue()))
207 return TTI::TCC_Free;
208 if (isUInt<32>(-Imm.getSExtValue()))
209 return TTI::TCC_Free;
212 case Intrinsic::smul_with_overflow:
213 case Intrinsic::umul_with_overflow:
214 // These get expanded to include a normal multiplication.
215 if (Idx == 1 && Imm.getBitWidth() <= 64) {
216 if (isInt<32>(Imm.getSExtValue()))
217 return TTI::TCC_Free;
220 case Intrinsic::experimental_stackmap:
221 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
222 return TTI::TCC_Free;
224 case Intrinsic::experimental_patchpoint_void:
225 case Intrinsic::experimental_patchpoint_i64:
226 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
227 return TTI::TCC_Free;
230 return SystemZTTIImpl::getIntImmCost(Imm, Ty);
233 TargetTransformInfo::PopcntSupportKind
234 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
235 assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
236 if (ST->hasPopulationCount() && TyWidth <= 64)
237 return TTI::PSK_FastHardware;
238 return TTI::PSK_Software;
241 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
242 TTI::UnrollingPreferences &UP) {
243 // Find out if L contains a call, what the machine instruction count
244 // estimate is, and how many stores there are.
245 bool HasCall = false;
246 unsigned NumStores = 0;
247 for (auto &BB : L->blocks())
248 for (auto &I : *BB) {
249 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
250 ImmutableCallSite CS(&I);
251 if (const Function *F = CS.getCalledFunction()) {
252 if (isLoweredToCall(F))
254 if (F->getIntrinsicID() == Intrinsic::memcpy ||
255 F->getIntrinsicID() == Intrinsic::memset)
257 } else { // indirect call.
261 if (isa<StoreInst>(&I)) {
262 Type *MemAccessTy = I.getOperand(0)->getType();
263 NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, 0, 0);
267 // The z13 processor will run out of store tags if too many stores
268 // are fed into it too quickly. Therefore make sure there are not
269 // too many stores in the resulting unrolled loop.
270 unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
273 // Only allow full unrolling if loop has any calls.
274 UP.FullUnrollMaxCount = Max;
280 if (UP.MaxCount <= 1)
283 // Allow partial and runtime trip count unrolling.
284 UP.Partial = UP.Runtime = true;
286 UP.PartialThreshold = 75;
287 UP.DefaultUnrollRuntimeCount = 4;
289 // Allow expensive instructions in the pre-header of the loop.
290 UP.AllowExpensiveTripCount = true;
296 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
297 TargetTransformInfo::LSRCost &C2) {
298 // SystemZ specific: check instruction count (first), and don't care about
299 // ImmCost, since offsets are checked explicitly.
300 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
301 C1.NumIVMuls, C1.NumBaseAdds,
302 C1.ScaleCost, C1.SetupCost) <
303 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
304 C2.NumIVMuls, C2.NumBaseAdds,
305 C2.ScaleCost, C2.SetupCost);
308 unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) {
310 // Discount the stack pointer. Also leave out %r0, since it can't
311 // be used in an address.
318 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const {
326 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
327 EVT VT = TLI->getValueType(DL, DataType);
328 return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
331 // Return the bit size for the scalar type or vector element
332 // type. getScalarSizeInBits() returns 0 for a pointer type.
333 static unsigned getScalarSizeInBits(Type *Ty) {
335 (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits());
336 assert(Size > 0 && "Element must have non-zero size.");
340 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
341 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
343 static unsigned getNumVectorRegs(Type *Ty) {
344 assert(Ty->isVectorTy() && "Expected vector type");
345 unsigned WideBits = getScalarSizeInBits(Ty) * Ty->getVectorNumElements();
346 assert(WideBits > 0 && "Could not compute size of vector");
347 return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
350 int SystemZTTIImpl::getArithmeticInstrCost(
351 unsigned Opcode, Type *Ty,
352 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
353 TTI::OperandValueProperties Opd1PropInfo,
354 TTI::OperandValueProperties Opd2PropInfo,
355 ArrayRef<const Value *> Args) {
357 // TODO: return a good value for BB-VECTORIZER that includes the
358 // immediate loads, which we do not want to count for the loop
359 // vectorizer, since they are hopefully hoisted out of the loop. This
360 // would require a new parameter 'InLoop', but not sure if constant
361 // args are common enough to motivate this.
363 unsigned ScalarBits = Ty->getScalarSizeInBits();
365 // There are thre cases of division and remainder: Dividing with a register
366 // needs a divide instruction. A divisor which is a power of two constant
367 // can be implemented with a sequence of shifts. Any other constant needs a
368 // multiply and shifts.
369 const unsigned DivInstrCost = 20;
370 const unsigned DivMulSeqCost = 10;
371 const unsigned SDivPow2Cost = 4;
374 Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
375 bool UnsignedDivRem =
376 Opcode == Instruction::UDiv || Opcode == Instruction::URem;
378 // Check for a constant divisor.
379 bool DivRemConst = false;
380 bool DivRemConstPow2 = false;
381 if ((SignedDivRem || UnsignedDivRem) && Args.size() == 2) {
382 if (const Constant *C = dyn_cast<Constant>(Args[1])) {
383 const ConstantInt *CVal =
384 (C->getType()->isVectorTy()
385 ? dyn_cast_or_null<const ConstantInt>(C->getSplatValue())
386 : dyn_cast<const ConstantInt>(C));
387 if (CVal != nullptr &&
388 (CVal->getValue().isPowerOf2() || (-CVal->getValue()).isPowerOf2()))
389 DivRemConstPow2 = true;
395 if (Ty->isVectorTy()) {
396 assert(ST->hasVector() &&
397 "getArithmeticInstrCost() called with vector type.");
398 unsigned VF = Ty->getVectorNumElements();
399 unsigned NumVectors = getNumVectorRegs(Ty);
401 // These vector operations are custom handled, but are still supported
402 // with one instruction per vector, regardless of element size.
403 if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
404 Opcode == Instruction::AShr) {
409 return (NumVectors * (SignedDivRem ? SDivPow2Cost : 1));
411 return VF * DivMulSeqCost + getScalarizationOverhead(Ty, Args);
412 if ((SignedDivRem || UnsignedDivRem) && VF > 4)
413 // Temporary hack: disable high vectorization factors with integer
414 // division/remainder, which will get scalarized and handled with
415 // GR128 registers. The mischeduler is not clever enough to avoid
419 // These FP operations are supported with a single vector instruction for
420 // double (base implementation assumes float generally costs 2). For
421 // FP128, the scalar cost is 1, and there is no overhead since the values
422 // are already in scalar registers.
423 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
424 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
425 switch (ScalarBits) {
427 // The vector enhancements facility 1 provides v4f32 instructions.
428 if (ST->hasVectorEnhancements1())
430 // Return the cost of multiple scalar invocation plus the cost of
431 // inserting and extracting the values.
432 unsigned ScalarCost =
433 getArithmeticInstrCost(Opcode, Ty->getScalarType());
434 unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args);
435 // FIXME: VF 2 for these FP operations are currently just as
436 // expensive as for VF 4.
449 // There is no native support for FRem.
450 if (Opcode == Instruction::FRem) {
451 unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args);
452 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
453 if (VF == 2 && ScalarBits == 32)
459 // These FP operations are supported with a dedicated instruction for
460 // float, double and fp128 (base implementation assumes float generally
462 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
463 Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
466 // There is no native support for FRem.
467 if (Opcode == Instruction::FRem)
470 // Or requires one instruction, although it has custom handling for i64.
471 if (Opcode == Instruction::Or)
474 if (Opcode == Instruction::Xor && ScalarBits == 1) {
475 if (ST->hasLoadStoreOnCond2())
476 return 5; // 2 * (li 0; loc 1); xor
477 return 7; // 2 * ipm sequences ; xor ; shift ; compare
481 return (SignedDivRem ? SDivPow2Cost : 1);
483 return DivMulSeqCost;
484 if (SignedDivRem || UnsignedDivRem)
488 // Fallback to the default implementation.
489 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
490 Opd1PropInfo, Opd2PropInfo, Args);
493 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
495 assert (Tp->isVectorTy());
496 assert (ST->hasVector() && "getShuffleCost() called.");
497 unsigned NumVectors = getNumVectorRegs(Tp);
499 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
501 // FP128 values are always in scalar registers, so there is no work
502 // involved with a shuffle, except for broadcast. In that case register
503 // moves are done with a single instruction per element.
504 if (Tp->getScalarType()->isFP128Ty())
505 return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
508 case TargetTransformInfo::SK_ExtractSubvector:
509 // ExtractSubvector Index indicates start offset.
511 // Extracting a subvector from first index is a noop.
512 return (Index == 0 ? 0 : NumVectors);
514 case TargetTransformInfo::SK_Broadcast:
515 // Loop vectorizer calls here to figure out the extra cost of
516 // broadcasting a loaded value to all elements of a vector. Since vlrep
517 // loads and replicates with a single instruction, adjust the returned
519 return NumVectors - 1;
523 // SystemZ supports single instruction permutation / replication.
527 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
530 // Return the log2 difference of the element sizes of the two vector types.
531 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
532 unsigned Bits0 = Ty0->getScalarSizeInBits();
533 unsigned Bits1 = Ty1->getScalarSizeInBits();
536 return (Log2_32(Bits1) - Log2_32(Bits0));
538 return (Log2_32(Bits0) - Log2_32(Bits1));
541 // Return the number of instructions needed to truncate SrcTy to DstTy.
542 unsigned SystemZTTIImpl::
543 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
544 assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
545 assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
546 "Packing must reduce size of vector type.");
547 assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() &&
548 "Packing should not change number of elements.");
550 // TODO: Since fp32 is expanded, the extract cost should always be 0.
552 unsigned NumParts = getNumVectorRegs(SrcTy);
554 // Up to 2 vector registers can be truncated efficiently with pack or
555 // permute. The latter requires an immediate mask to be loaded, which
556 // typically gets hoisted out of a loop. TODO: return a good value for
557 // BB-VECTORIZER that includes the immediate loads, which we do not want
558 // to count for the loop vectorizer.
562 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
563 unsigned VF = SrcTy->getVectorNumElements();
564 for (unsigned P = 0; P < Log2Diff; ++P) {
570 // Currently, a general mix of permutes and pack instructions is output by
571 // isel, which follow the cost computation above except for this case which
572 // is one instruction less:
573 if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
574 DstTy->getScalarSizeInBits() == 8)
580 // Return the cost of converting a vector bitmask produced by a compare
581 // (SrcTy), to the type of the select or extend instruction (DstTy).
582 unsigned SystemZTTIImpl::
583 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
584 assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
585 "Should only be called with vector types.");
587 unsigned PackCost = 0;
588 unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
589 unsigned DstScalarBits = DstTy->getScalarSizeInBits();
590 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
591 if (SrcScalarBits > DstScalarBits)
592 // The bitmask will be truncated.
593 PackCost = getVectorTruncCost(SrcTy, DstTy);
594 else if (SrcScalarBits < DstScalarBits) {
595 unsigned DstNumParts = getNumVectorRegs(DstTy);
596 // Each vector select needs its part of the bitmask unpacked.
597 PackCost = Log2Diff * DstNumParts;
598 // Extra cost for moving part of mask before unpacking.
599 PackCost += DstNumParts - 1;
605 // Return the type of the compared operands. This is needed to compute the
606 // cost for a Select / ZExt or SExt instruction.
607 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
608 Type *OpTy = nullptr;
609 if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
610 OpTy = CI->getOperand(0)->getType();
611 else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
612 if (LogicI->getNumOperands() == 2)
613 if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
614 if (isa<CmpInst>(LogicI->getOperand(1)))
615 OpTy = CI0->getOperand(0)->getType();
617 if (OpTy != nullptr) {
619 assert (!OpTy->isVectorTy() && "Expected scalar type");
622 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
623 // be either scalar or already vectorized with a same or lesser VF.
624 Type *ElTy = OpTy->getScalarType();
625 return VectorType::get(ElTy, VF);
631 // Get the cost of converting a boolean vector to a vector with same width
632 // and element size as Dst, plus the cost of zero extending if needed.
633 unsigned SystemZTTIImpl::
634 getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
635 const Instruction *I) {
636 assert (Dst->isVectorTy());
637 unsigned VF = Dst->getVectorNumElements();
639 // If we know what the widths of the compared operands, get any cost of
640 // converting it to match Dst. Otherwise assume same widths.
641 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
642 if (CmpOpTy != nullptr)
643 Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
644 if (Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP)
645 // One 'vn' per dst vector with an immediate mask.
646 Cost += getNumVectorRegs(Dst);
650 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
651 const Instruction *I) {
652 unsigned DstScalarBits = Dst->getScalarSizeInBits();
653 unsigned SrcScalarBits = Src->getScalarSizeInBits();
655 if (Src->isVectorTy()) {
656 assert (ST->hasVector() && "getCastInstrCost() called with vector type.");
657 assert (Dst->isVectorTy());
658 unsigned VF = Src->getVectorNumElements();
659 unsigned NumDstVectors = getNumVectorRegs(Dst);
660 unsigned NumSrcVectors = getNumVectorRegs(Src);
662 if (Opcode == Instruction::Trunc) {
663 if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
664 return 0; // Check for NOOP conversions.
665 return getVectorTruncCost(Src, Dst);
668 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
669 if (SrcScalarBits >= 8) {
670 // ZExt/SExt will be handled with one unpack per doubling of width.
671 unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
673 // For types that spans multiple vector registers, some additional
674 // instructions are used to setup the unpacking.
675 unsigned NumSrcVectorOps =
676 (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
677 : (NumDstVectors / 2));
679 return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
681 else if (SrcScalarBits == 1)
682 return getBoolVecToIntConversionCost(Opcode, Dst, I);
685 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
686 Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
687 // TODO: Fix base implementation which could simplify things a bit here
688 // (seems to miss on differentiating on scalar/vector types).
690 // Only 64 bit vector conversions are natively supported.
691 if (DstScalarBits == 64) {
692 if (SrcScalarBits == 64)
693 return NumDstVectors;
695 if (SrcScalarBits == 1)
696 return getBoolVecToIntConversionCost(Opcode, Dst, I) + NumDstVectors;
699 // Return the cost of multiple scalar invocation plus the cost of
700 // inserting and extracting the values. Base implementation does not
701 // realize float->int gets scalarized.
702 unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(),
703 Src->getScalarType());
704 unsigned TotCost = VF * ScalarCost;
705 bool NeedsInserts = true, NeedsExtracts = true;
706 // FP128 registers do not get inserted or extracted.
707 if (DstScalarBits == 128 &&
708 (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
709 NeedsInserts = false;
710 if (SrcScalarBits == 128 &&
711 (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
712 NeedsExtracts = false;
714 TotCost += getScalarizationOverhead(Src, false, NeedsExtracts);
715 TotCost += getScalarizationOverhead(Dst, NeedsInserts, false);
717 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
718 if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
724 if (Opcode == Instruction::FPTrunc) {
725 if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements.
726 return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false);
727 else // double -> float
728 return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
731 if (Opcode == Instruction::FPExt) {
732 if (SrcScalarBits == 32 && DstScalarBits == 64) {
733 // float -> double is very rare and currently unoptimized. Instead of
734 // using vldeb, which can do two at a time, all conversions are
738 // -> fp128. VF * lxdb/lxeb + extraction of elements.
739 return VF + getScalarizationOverhead(Src, false, true);
743 assert (!Dst->isVectorTy());
745 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) {
746 if (SrcScalarBits >= 32 ||
747 (I != nullptr && isa<LoadInst>(I->getOperand(0))))
749 return SrcScalarBits > 1 ? 2 /*i8/i16 extend*/ : 5 /*branch seq.*/;
752 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
753 Src->isIntegerTy(1)) {
754 if (ST->hasLoadStoreOnCond2())
755 return 2; // li 0; loc 1
757 // This should be extension of a compare i1 result, which is done with
758 // ipm and a varying sequence of instructions.
760 if (Opcode == Instruction::SExt)
761 Cost = (DstScalarBits < 64 ? 3 : 4);
762 if (Opcode == Instruction::ZExt)
764 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
765 if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
766 // If operands of an fp-type was compared, this costs +1.
772 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
775 // Scalar i8 / i16 operations will typically be made after first extending
776 // the operands to i32.
777 static unsigned getOperandsExtensionCost(const Instruction *I) {
778 unsigned ExtCost = 0;
779 for (Value *Op : I->operands())
780 // A load of i8 or i16 sign/zero extends to i32.
781 if (!isa<LoadInst>(Op) && !isa<ConstantInt>(Op))
787 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
788 Type *CondTy, const Instruction *I) {
789 if (ValTy->isVectorTy()) {
790 assert (ST->hasVector() && "getCmpSelInstrCost() called with vector type.");
791 unsigned VF = ValTy->getVectorNumElements();
793 // Called with a compare instruction.
794 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
795 unsigned PredicateExtraCost = 0;
797 // Some predicates cost one or two extra instructions.
798 switch (cast<CmpInst>(I)->getPredicate()) {
799 case CmpInst::Predicate::ICMP_NE:
800 case CmpInst::Predicate::ICMP_UGE:
801 case CmpInst::Predicate::ICMP_ULE:
802 case CmpInst::Predicate::ICMP_SGE:
803 case CmpInst::Predicate::ICMP_SLE:
804 PredicateExtraCost = 1;
806 case CmpInst::Predicate::FCMP_ONE:
807 case CmpInst::Predicate::FCMP_ORD:
808 case CmpInst::Predicate::FCMP_UEQ:
809 case CmpInst::Predicate::FCMP_UNO:
810 PredicateExtraCost = 2;
817 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
818 // floats. FIXME: <2 x float> generates same code as <4 x float>.
819 unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
820 unsigned NumVecs_cmp = getNumVectorRegs(ValTy);
822 unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
825 else { // Called with a select instruction.
826 assert (Opcode == Instruction::Select);
828 // We can figure out the extra cost of packing / unpacking if the
829 // instruction was passed and the compare instruction is found.
830 unsigned PackCost = 0;
831 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
832 if (CmpOpTy != nullptr)
834 getVectorBitmaskConversionCost(CmpOpTy, ValTy);
836 return getNumVectorRegs(ValTy) /*vsel*/ + PackCost;
841 case Instruction::ICmp: {
842 // A loaded value compared with 0 with multiple users becomes Load and
843 // Test. The load is then not foldable, so return 0 cost for the ICmp.
844 unsigned ScalarBits = ValTy->getScalarSizeInBits();
845 if (I != nullptr && ScalarBits >= 32)
846 if (LoadInst *Ld = dyn_cast<LoadInst>(I->getOperand(0)))
847 if (const ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
848 if (!Ld->hasOneUse() && Ld->getParent() == I->getParent() &&
849 C->getZExtValue() == 0)
853 if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
854 Cost += (I != nullptr ? getOperandsExtensionCost(I) : 2);
857 case Instruction::Select:
858 if (ValTy->isFloatingPointTy())
859 return 4; // No load on condition for FP - costs a conditional jump.
860 return 1; // Load On Condition.
864 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr);
868 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
869 // vlvgp will insert two grs into a vector register, so only count half the
870 // number of instructions.
871 if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
872 return ((Index % 2 == 0) ? 1 : 0);
874 if (Opcode == Instruction::ExtractElement) {
875 int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1);
877 // Give a slight penalty for moving out of vector pipeline to FXU unit.
878 if (Index == 0 && Val->isIntOrIntVectorTy())
884 return BaseT::getVectorInstrCost(Opcode, Val, Index);
887 // Check if a load may be folded as a memory operand in its user.
888 bool SystemZTTIImpl::
889 isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {
890 if (!Ld->hasOneUse())
893 const Instruction *UserI = cast<Instruction>(*Ld->user_begin());
894 unsigned LoadedBits = getScalarSizeInBits(Ld->getType());
895 unsigned TruncBits = 0;
896 unsigned SExtBits = 0;
897 unsigned ZExtBits = 0;
898 if (UserI->hasOneUse()) {
899 unsigned UserBits = UserI->getType()->getScalarSizeInBits();
900 if (isa<TruncInst>(UserI))
901 TruncBits = UserBits;
902 else if (isa<SExtInst>(UserI))
904 else if (isa<ZExtInst>(UserI))
907 if (TruncBits || SExtBits || ZExtBits) {
909 UserI = cast<Instruction>(*UserI->user_begin());
910 // Load (single use) -> trunc/extend (single use) -> UserI
912 if ((UserI->getOpcode() == Instruction::Sub ||
913 UserI->getOpcode() == Instruction::SDiv ||
914 UserI->getOpcode() == Instruction::UDiv) &&
915 UserI->getOperand(1) != FoldedValue)
916 return false; // Not commutative, only RHS foldable.
917 // LoadOrTruncBits holds the number of effectively loaded bits, but 0 if an
918 // extension was made of the load.
919 unsigned LoadOrTruncBits =
920 ((SExtBits || ZExtBits) ? 0 : (TruncBits ? TruncBits : LoadedBits));
921 switch (UserI->getOpcode()) {
922 case Instruction::Add: // SE: 16->32, 16/32->64, z14:16->64. ZE: 32->64
923 case Instruction::Sub:
924 case Instruction::ICmp:
925 if (LoadedBits == 32 && ZExtBits == 64)
928 case Instruction::Mul: // SE: 16->32, 32->64, z14:16->64
929 if (UserI->getOpcode() != Instruction::ICmp) {
930 if (LoadedBits == 16 &&
932 (SExtBits == 64 && ST->hasMiscellaneousExtensions2())))
934 if (LoadOrTruncBits == 16)
938 case Instruction::SDiv:// SE: 32->64
939 if (LoadedBits == 32 && SExtBits == 64)
942 case Instruction::UDiv:
943 case Instruction::And:
944 case Instruction::Or:
945 case Instruction::Xor:
946 // This also makes sense for float operations, but disabled for now due
948 // case Instruction::FCmp:
949 // case Instruction::FAdd:
950 // case Instruction::FSub:
951 // case Instruction::FMul:
952 // case Instruction::FDiv:
954 // All possible extensions of memory checked above.
956 // Comparison between memory and immediate.
957 if (UserI->getOpcode() == Instruction::ICmp)
958 if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1)))
959 if (isUInt<16>(CI->getZExtValue()))
961 return (LoadOrTruncBits == 32 || LoadOrTruncBits == 64);
967 static bool isBswapIntrinsicCall(const Value *V) {
968 if (const Instruction *I = dyn_cast<Instruction>(V))
969 if (auto *CI = dyn_cast<CallInst>(I))
970 if (auto *F = CI->getCalledFunction())
971 if (F->getIntrinsicID() == Intrinsic::bswap)
976 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
977 unsigned Alignment, unsigned AddressSpace,
978 const Instruction *I) {
979 assert(!Src->isVoidTy() && "Invalid type");
981 if (!Src->isVectorTy() && Opcode == Instruction::Load && I != nullptr) {
982 // Store the load or its truncated or extended value in FoldedValue.
983 const Instruction *FoldedValue = nullptr;
984 if (isFoldableLoad(cast<LoadInst>(I), FoldedValue)) {
985 const Instruction *UserI = cast<Instruction>(*FoldedValue->user_begin());
986 assert (UserI->getNumOperands() == 2 && "Expected a binop.");
988 // UserI can't fold two loads, so in that case return 0 cost only
990 for (unsigned i = 0; i < 2; ++i) {
991 if (UserI->getOperand(i) == FoldedValue)
994 if (Instruction *OtherOp = dyn_cast<Instruction>(UserI->getOperand(i))){
995 LoadInst *OtherLoad = dyn_cast<LoadInst>(OtherOp);
997 (isa<TruncInst>(OtherOp) || isa<SExtInst>(OtherOp) ||
998 isa<ZExtInst>(OtherOp)))
999 OtherLoad = dyn_cast<LoadInst>(OtherOp->getOperand(0));
1000 if (OtherLoad && isFoldableLoad(OtherLoad, FoldedValue/*dummy*/))
1001 return i == 0; // Both operands foldable.
1005 return 0; // Only I is foldable in user.
1010 (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src));
1012 // Store/Load reversed saves one instruction.
1013 if (!Src->isVectorTy() && NumOps == 1 && I != nullptr) {
1014 if (Opcode == Instruction::Load && I->hasOneUse()) {
1015 const Instruction *LdUser = cast<Instruction>(*I->user_begin());
1016 // In case of load -> bswap -> store, return normal cost for the load.
1017 if (isBswapIntrinsicCall(LdUser) &&
1018 (!LdUser->hasOneUse() || !isa<StoreInst>(*LdUser->user_begin())))
1021 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
1022 const Value *StoredVal = SI->getValueOperand();
1023 if (StoredVal->hasOneUse() && isBswapIntrinsicCall(StoredVal))
1028 if (Src->getScalarSizeInBits() == 128)
1029 // 128 bit scalars are held in a pair of two 64 bit registers.
1035 // The generic implementation of getInterleavedMemoryOpCost() is based on
1036 // adding costs of the memory operations plus all the extracts and inserts
1037 // needed for using / defining the vector operands. The SystemZ version does
1038 // roughly the same but bases the computations on vector permutations
1040 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
1042 ArrayRef<unsigned> Indices,
1044 unsigned AddressSpace,
1045 bool UseMaskForCond,
1046 bool UseMaskForGaps) {
1047 if (UseMaskForCond || UseMaskForGaps)
1048 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1049 Alignment, AddressSpace,
1050 UseMaskForCond, UseMaskForGaps);
1051 assert(isa<VectorType>(VecTy) &&
1052 "Expect a vector type for interleaved memory op");
1054 // Return the ceiling of dividing A by B.
1055 auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
1057 unsigned NumElts = VecTy->getVectorNumElements();
1058 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
1059 unsigned VF = NumElts / Factor;
1060 unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
1061 unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
1062 unsigned NumPermutes = 0;
1064 if (Opcode == Instruction::Load) {
1065 // Loading interleave groups may have gaps, which may mean fewer
1066 // loads. Find out how many vectors will be loaded in total, and in how
1067 // many of them each value will be in.
1068 BitVector UsedInsts(NumVectorMemOps, false);
1069 std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
1070 for (unsigned Index : Indices)
1071 for (unsigned Elt = 0; Elt < VF; ++Elt) {
1072 unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
1074 ValueVecs[Index].set(Vec);
1076 NumVectorMemOps = UsedInsts.count();
1078 for (unsigned Index : Indices) {
1079 // Estimate that each loaded source vector containing this Index
1080 // requires one operation, except that vperm can handle two input
1081 // registers first time for each dst vector.
1082 unsigned NumSrcVecs = ValueVecs[Index].count();
1083 unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
1084 assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
1085 NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
1088 // Estimate the permutes for each stored vector as the smaller of the
1089 // number of elements and the number of source vectors. Subtract one per
1090 // dst vector for vperm (S.A.).
1091 unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
1092 unsigned NumDstVecs = NumVectorMemOps;
1093 assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
1094 NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
1097 // Cost of load/store operations and the permutations needed.
1098 return NumVectorMemOps + NumPermutes;
1101 static int getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy) {
1102 if (RetTy->isVectorTy() && ID == Intrinsic::bswap)
1103 return getNumVectorRegs(RetTy); // VPERM
1107 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1108 ArrayRef<Value *> Args,
1109 FastMathFlags FMF, unsigned VF) {
1110 int Cost = getVectorIntrinsicInstrCost(ID, RetTy);
1113 return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
1116 int SystemZTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1117 ArrayRef<Type *> Tys,
1119 unsigned ScalarizationCostPassed) {
1120 int Cost = getVectorIntrinsicInstrCost(ID, RetTy);
1123 return BaseT::getIntrinsicInstrCost(ID, RetTy, Tys,
1124 FMF, ScalarizationCostPassed);