1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // SystemZ target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "SystemZTargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "systemztti"
28 //===----------------------------------------------------------------------===//
30 // SystemZ cost model.
32 //===----------------------------------------------------------------------===//
34 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
35 assert(Ty->isIntegerTy());
37 unsigned BitSize = Ty->getPrimitiveSizeInBits();
38 // There is no cost model for constants with a bit size of 0. Return TCC_Free
39 // here, so that constant hoisting will ignore this constant.
42 // No cost model for operations on integers larger than 64 bit implemented yet.
49 if (Imm.getBitWidth() <= 64) {
50 // Constants loaded via lgfi.
51 if (isInt<32>(Imm.getSExtValue()))
52 return TTI::TCC_Basic;
53 // Constants loaded via llilf.
54 if (isUInt<32>(Imm.getZExtValue()))
55 return TTI::TCC_Basic;
56 // Constants loaded via llihf:
57 if ((Imm.getZExtValue() & 0xffffffff) == 0)
58 return TTI::TCC_Basic;
60 return 2 * TTI::TCC_Basic;
63 return 4 * TTI::TCC_Basic;
66 int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
67 const APInt &Imm, Type *Ty) {
68 assert(Ty->isIntegerTy());
70 unsigned BitSize = Ty->getPrimitiveSizeInBits();
71 // There is no cost model for constants with a bit size of 0. Return TCC_Free
72 // here, so that constant hoisting will ignore this constant.
75 // No cost model for operations on integers larger than 64 bit implemented yet.
82 case Instruction::GetElementPtr:
83 // Always hoist the base address of a GetElementPtr. This prevents the
84 // creation of new constants for every base constant that gets constant
85 // folded with the offset.
87 return 2 * TTI::TCC_Basic;
89 case Instruction::Store:
90 if (Idx == 0 && Imm.getBitWidth() <= 64) {
91 // Any 8-bit immediate store can by implemented via mvi.
94 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
95 if (isInt<16>(Imm.getSExtValue()))
99 case Instruction::ICmp:
100 if (Idx == 1 && Imm.getBitWidth() <= 64) {
101 // Comparisons against signed 32-bit immediates implemented via cgfi.
102 if (isInt<32>(Imm.getSExtValue()))
103 return TTI::TCC_Free;
104 // Comparisons against unsigned 32-bit immediates implemented via clgfi.
105 if (isUInt<32>(Imm.getZExtValue()))
106 return TTI::TCC_Free;
109 case Instruction::Add:
110 case Instruction::Sub:
111 if (Idx == 1 && Imm.getBitWidth() <= 64) {
112 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
113 if (isUInt<32>(Imm.getZExtValue()))
114 return TTI::TCC_Free;
115 // Or their negation, by swapping addition vs. subtraction.
116 if (isUInt<32>(-Imm.getSExtValue()))
117 return TTI::TCC_Free;
120 case Instruction::Mul:
121 if (Idx == 1 && Imm.getBitWidth() <= 64) {
122 // We use msgfi to multiply by 32-bit signed immediates.
123 if (isInt<32>(Imm.getSExtValue()))
124 return TTI::TCC_Free;
127 case Instruction::Or:
128 case Instruction::Xor:
129 if (Idx == 1 && Imm.getBitWidth() <= 64) {
130 // Masks supported by oilf/xilf.
131 if (isUInt<32>(Imm.getZExtValue()))
132 return TTI::TCC_Free;
133 // Masks supported by oihf/xihf.
134 if ((Imm.getZExtValue() & 0xffffffff) == 0)
135 return TTI::TCC_Free;
138 case Instruction::And:
139 if (Idx == 1 && Imm.getBitWidth() <= 64) {
140 // Any 32-bit AND operation can by implemented via nilf.
142 return TTI::TCC_Free;
143 // 64-bit masks supported by nilf.
144 if (isUInt<32>(~Imm.getZExtValue()))
145 return TTI::TCC_Free;
146 // 64-bit masks supported by nilh.
147 if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
148 return TTI::TCC_Free;
149 // Some 64-bit AND operations can be implemented via risbg.
150 const SystemZInstrInfo *TII = ST->getInstrInfo();
152 if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
153 return TTI::TCC_Free;
156 case Instruction::Shl:
157 case Instruction::LShr:
158 case Instruction::AShr:
159 // Always return TCC_Free for the shift value of a shift instruction.
161 return TTI::TCC_Free;
163 case Instruction::UDiv:
164 case Instruction::SDiv:
165 case Instruction::URem:
166 case Instruction::SRem:
167 case Instruction::Trunc:
168 case Instruction::ZExt:
169 case Instruction::SExt:
170 case Instruction::IntToPtr:
171 case Instruction::PtrToInt:
172 case Instruction::BitCast:
173 case Instruction::PHI:
174 case Instruction::Call:
175 case Instruction::Select:
176 case Instruction::Ret:
177 case Instruction::Load:
181 return SystemZTTIImpl::getIntImmCost(Imm, Ty);
184 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
185 const APInt &Imm, Type *Ty) {
186 assert(Ty->isIntegerTy());
188 unsigned BitSize = Ty->getPrimitiveSizeInBits();
189 // There is no cost model for constants with a bit size of 0. Return TCC_Free
190 // here, so that constant hoisting will ignore this constant.
192 return TTI::TCC_Free;
193 // No cost model for operations on integers larger than 64 bit implemented yet.
195 return TTI::TCC_Free;
199 return TTI::TCC_Free;
200 case Intrinsic::sadd_with_overflow:
201 case Intrinsic::uadd_with_overflow:
202 case Intrinsic::ssub_with_overflow:
203 case Intrinsic::usub_with_overflow:
204 // These get expanded to include a normal addition/subtraction.
205 if (Idx == 1 && Imm.getBitWidth() <= 64) {
206 if (isUInt<32>(Imm.getZExtValue()))
207 return TTI::TCC_Free;
208 if (isUInt<32>(-Imm.getSExtValue()))
209 return TTI::TCC_Free;
212 case Intrinsic::smul_with_overflow:
213 case Intrinsic::umul_with_overflow:
214 // These get expanded to include a normal multiplication.
215 if (Idx == 1 && Imm.getBitWidth() <= 64) {
216 if (isInt<32>(Imm.getSExtValue()))
217 return TTI::TCC_Free;
220 case Intrinsic::experimental_stackmap:
221 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
222 return TTI::TCC_Free;
224 case Intrinsic::experimental_patchpoint_void:
225 case Intrinsic::experimental_patchpoint_i64:
226 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
227 return TTI::TCC_Free;
230 return SystemZTTIImpl::getIntImmCost(Imm, Ty);
233 TargetTransformInfo::PopcntSupportKind
234 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
235 assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
236 if (ST->hasPopulationCount() && TyWidth <= 64)
237 return TTI::PSK_FastHardware;
238 return TTI::PSK_Software;
241 void SystemZTTIImpl::getUnrollingPreferences(Loop *L,
242 TTI::UnrollingPreferences &UP) {
243 // Find out if L contains a call, what the machine instruction count
244 // estimate is, and how many stores there are.
245 bool HasCall = false;
246 unsigned NumStores = 0;
247 for (auto &BB : L->blocks())
248 for (auto &I : *BB) {
249 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
250 ImmutableCallSite CS(&I);
251 if (const Function *F = CS.getCalledFunction()) {
252 if (isLoweredToCall(F))
254 if (F->getIntrinsicID() == Intrinsic::memcpy ||
255 F->getIntrinsicID() == Intrinsic::memset)
257 } else { // indirect call.
261 if (isa<StoreInst>(&I)) {
262 Type *MemAccessTy = I.getOperand(0)->getType();
263 NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, 0, 0);
267 // The z13 processor will run out of store tags if too many stores
268 // are fed into it too quickly. Therefore make sure there are not
269 // too many stores in the resulting unrolled loop.
270 unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
273 // Only allow full unrolling if loop has any calls.
274 UP.FullUnrollMaxCount = Max;
280 if (UP.MaxCount <= 1)
283 // Allow partial and runtime trip count unrolling.
284 UP.Partial = UP.Runtime = true;
286 UP.PartialThreshold = 75;
287 UP.DefaultUnrollRuntimeCount = 4;
289 // Allow expensive instructions in the pre-header of the loop.
290 UP.AllowExpensiveTripCount = true;
295 unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) {
297 // Discount the stack pointer. Also leave out %r0, since it can't
298 // be used in an address.
305 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) {
313 int SystemZTTIImpl::getArithmeticInstrCost(
314 unsigned Opcode, Type *Ty,
315 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
316 TTI::OperandValueProperties Opd1PropInfo,
317 TTI::OperandValueProperties Opd2PropInfo,
318 ArrayRef<const Value *> Args) {
320 // TODO: return a good value for BB-VECTORIZER that includes the
321 // immediate loads, which we do not want to count for the loop
322 // vectorizer, since they are hopefully hoisted out of the loop. This
323 // would require a new parameter 'InLoop', but not sure if constant
324 // args are common enough to motivate this.
326 unsigned ScalarBits = Ty->getScalarSizeInBits();
328 if (Ty->isVectorTy()) {
329 assert (ST->hasVector() && "getArithmeticInstrCost() called with vector type.");
330 unsigned VF = Ty->getVectorNumElements();
331 unsigned NumVectors = getNumberOfParts(Ty);
333 // These vector operations are custom handled, but are still supported
334 // with one instruction per vector, regardless of element size.
335 if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
336 Opcode == Instruction::AShr) {
340 // These FP operations are supported with a single vector instruction for
341 // double (base implementation assumes float generally costs 2). For
342 // FP128, the scalar cost is 1, and there is no overhead since the values
343 // are already in scalar registers.
344 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
345 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
346 switch (ScalarBits) {
348 // Return the cost of multiple scalar invocation plus the cost of
349 // inserting and extracting the values.
350 unsigned ScalarCost = getArithmeticInstrCost(Opcode, Ty->getScalarType());
351 unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args);
352 // FIXME: VF 2 for these FP operations are currently just as
353 // expensive as for VF 4.
366 // There is no native support for FRem.
367 if (Opcode == Instruction::FRem) {
368 unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args);
369 // FIXME: VF 2 for float is currently just as expensive as for VF 4.
370 if (VF == 2 && ScalarBits == 32)
376 // These FP operations are supported with a dedicated instruction for
377 // float, double and fp128 (base implementation assumes float generally
379 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
380 Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
383 // There is no native support for FRem.
384 if (Opcode == Instruction::FRem)
387 if (Opcode == Instruction::LShr || Opcode == Instruction::AShr)
388 return (ScalarBits >= 32 ? 1 : 2 /*ext*/);
390 // Or requires one instruction, although it has custom handling for i64.
391 if (Opcode == Instruction::Or)
394 if (Opcode == Instruction::Xor && ScalarBits == 1)
395 // 2 * ipm sequences ; xor ; shift ; compare
398 // An extra extension for narrow types is needed.
399 if ((Opcode == Instruction::SDiv || Opcode == Instruction::SRem))
400 // sext of op(s) for narrow types
401 return (ScalarBits < 32 ? 4 : (ScalarBits == 32 ? 2 : 1));
403 if (Opcode == Instruction::UDiv || Opcode == Instruction::URem)
404 // Clearing of low 64 bit reg + sext of op(s) for narrow types + dl[g]r
405 return (ScalarBits < 32 ? 4 : 2);
408 // Fallback to the default implementation.
409 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
410 Opd1PropInfo, Opd2PropInfo, Args);
414 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
416 assert (Tp->isVectorTy());
417 assert (ST->hasVector() && "getShuffleCost() called.");
418 unsigned NumVectors = getNumberOfParts(Tp);
420 // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
422 // FP128 values are always in scalar registers, so there is no work
423 // involved with a shuffle, except for broadcast. In that case register
424 // moves are done with a single instruction per element.
425 if (Tp->getScalarType()->isFP128Ty())
426 return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
429 case TargetTransformInfo::SK_ExtractSubvector:
430 // ExtractSubvector Index indicates start offset.
432 // Extracting a subvector from first index is a noop.
433 return (Index == 0 ? 0 : NumVectors);
435 case TargetTransformInfo::SK_Broadcast:
436 // Loop vectorizer calls here to figure out the extra cost of
437 // broadcasting a loaded value to all elements of a vector. Since vlrep
438 // loads and replicates with a single instruction, adjust the returned
440 return NumVectors - 1;
444 // SystemZ supports single instruction permutation / replication.
448 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
451 // Return the log2 difference of the element sizes of the two vector types.
452 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
453 unsigned Bits0 = Ty0->getScalarSizeInBits();
454 unsigned Bits1 = Ty1->getScalarSizeInBits();
457 return (Log2_32(Bits1) - Log2_32(Bits0));
459 return (Log2_32(Bits0) - Log2_32(Bits1));
462 // Return the number of instructions needed to truncate SrcTy to DstTy.
463 unsigned SystemZTTIImpl::
464 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
465 assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
466 assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
467 "Packing must reduce size of vector type.");
468 assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() &&
469 "Packing should not change number of elements.");
471 // TODO: Since fp32 is expanded, the extract cost should always be 0.
473 unsigned NumParts = getNumberOfParts(SrcTy);
475 // Up to 2 vector registers can be truncated efficiently with pack or
476 // permute. The latter requires an immediate mask to be loaded, which
477 // typically gets hoisted out of a loop. TODO: return a good value for
478 // BB-VECTORIZER that includes the immediate loads, which we do not want
479 // to count for the loop vectorizer.
483 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
484 unsigned VF = SrcTy->getVectorNumElements();
485 for (unsigned P = 0; P < Log2Diff; ++P) {
491 // Currently, a general mix of permutes and pack instructions is output by
492 // isel, which follow the cost computation above except for this case which
493 // is one instruction less:
494 if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
495 DstTy->getScalarSizeInBits() == 8)
501 // Return the cost of converting a vector bitmask produced by a compare
502 // (SrcTy), to the type of the select or extend instruction (DstTy).
503 unsigned SystemZTTIImpl::
504 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
505 assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
506 "Should only be called with vector types.");
508 unsigned PackCost = 0;
509 unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
510 unsigned DstScalarBits = DstTy->getScalarSizeInBits();
511 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
512 if (SrcScalarBits > DstScalarBits)
513 // The bitmask will be truncated.
514 PackCost = getVectorTruncCost(SrcTy, DstTy);
515 else if (SrcScalarBits < DstScalarBits) {
516 unsigned DstNumParts = getNumberOfParts(DstTy);
517 // Each vector select needs its part of the bitmask unpacked.
518 PackCost = Log2Diff * DstNumParts;
519 // Extra cost for moving part of mask before unpacking.
520 PackCost += DstNumParts - 1;
526 // Return the type of the compared operands. This is needed to compute the
527 // cost for a Select / ZExt or SExt instruction.
528 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
529 Type *OpTy = nullptr;
530 if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
531 OpTy = CI->getOperand(0)->getType();
532 else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
533 if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
534 if (isa<CmpInst>(LogicI->getOperand(1)))
535 OpTy = CI0->getOperand(0)->getType();
537 if (OpTy != nullptr) {
539 assert (!OpTy->isVectorTy() && "Expected scalar type");
542 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may
543 // be either scalar or already vectorized with a same or lesser VF.
544 Type *ElTy = OpTy->getScalarType();
545 return VectorType::get(ElTy, VF);
551 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
552 const Instruction *I) {
553 unsigned DstScalarBits = Dst->getScalarSizeInBits();
554 unsigned SrcScalarBits = Src->getScalarSizeInBits();
556 if (Src->isVectorTy()) {
557 assert (ST->hasVector() && "getCastInstrCost() called with vector type.");
558 assert (Dst->isVectorTy());
559 unsigned VF = Src->getVectorNumElements();
560 unsigned NumDstVectors = getNumberOfParts(Dst);
561 unsigned NumSrcVectors = getNumberOfParts(Src);
563 if (Opcode == Instruction::Trunc) {
564 if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
565 return 0; // Check for NOOP conversions.
566 return getVectorTruncCost(Src, Dst);
569 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
570 if (SrcScalarBits >= 8) {
571 // ZExt/SExt will be handled with one unpack per doubling of width.
572 unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
574 // For types that spans multiple vector registers, some additional
575 // instructions are used to setup the unpacking.
576 unsigned NumSrcVectorOps =
577 (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
578 : (NumDstVectors / 2));
580 return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
582 else if (SrcScalarBits == 1) {
583 // This should be extension of a compare i1 result.
584 // If we know what the widths of the compared operands, get the
585 // cost of converting it to Dst. Otherwise assume same widths.
587 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
588 if (CmpOpTy != nullptr)
589 Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
590 if (Opcode == Instruction::ZExt)
591 // One 'vn' per dst vector with an immediate mask.
592 Cost += NumDstVectors;
597 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
598 Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
599 // TODO: Fix base implementation which could simplify things a bit here
600 // (seems to miss on differentiating on scalar/vector types).
602 // Only 64 bit vector conversions are natively supported.
603 if (SrcScalarBits == 64 && DstScalarBits == 64)
604 return NumDstVectors;
606 // Return the cost of multiple scalar invocation plus the cost of
607 // inserting and extracting the values. Base implementation does not
608 // realize float->int gets scalarized.
609 unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(),
610 Src->getScalarType());
611 unsigned TotCost = VF * ScalarCost;
612 bool NeedsInserts = true, NeedsExtracts = true;
613 // FP128 registers do not get inserted or extracted.
614 if (DstScalarBits == 128 &&
615 (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
616 NeedsInserts = false;
617 if (SrcScalarBits == 128 &&
618 (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
619 NeedsExtracts = false;
621 TotCost += getScalarizationOverhead(Dst, NeedsInserts, NeedsExtracts);
623 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
624 if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
630 if (Opcode == Instruction::FPTrunc) {
631 if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements.
632 return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false);
633 else // double -> float
634 return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
637 if (Opcode == Instruction::FPExt) {
638 if (SrcScalarBits == 32 && DstScalarBits == 64) {
639 // float -> double is very rare and currently unoptimized. Instead of
640 // using vldeb, which can do two at a time, all conversions are
644 // -> fp128. VF * lxdb/lxeb + extraction of elements.
645 return VF + getScalarizationOverhead(Src, false, true);
649 assert (!Dst->isVectorTy());
651 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP)
652 return (SrcScalarBits >= 32 ? 1 : 2 /*i8/i16 extend*/);
654 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
655 Src->isIntegerTy(1)) {
656 // This should be extension of a compare i1 result, which is done with
657 // ipm and a varying sequence of instructions.
659 if (Opcode == Instruction::SExt)
660 Cost = (DstScalarBits < 64 ? 3 : 4);
661 if (Opcode == Instruction::ZExt)
663 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
664 if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
665 // If operands of an fp-type was compared, this costs +1.
672 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
675 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
676 const Instruction *I) {
677 if (ValTy->isVectorTy()) {
678 assert (ST->hasVector() && "getCmpSelInstrCost() called with vector type.");
679 assert (CondTy == nullptr || CondTy->isVectorTy());
680 unsigned VF = ValTy->getVectorNumElements();
682 // Called with a compare instruction.
683 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
684 unsigned PredicateExtraCost = 0;
686 // Some predicates cost one or two extra instructions.
687 switch (dyn_cast<CmpInst>(I)->getPredicate()) {
688 case CmpInst::Predicate::ICMP_NE:
689 case CmpInst::Predicate::ICMP_UGE:
690 case CmpInst::Predicate::ICMP_ULE:
691 case CmpInst::Predicate::ICMP_SGE:
692 case CmpInst::Predicate::ICMP_SLE:
693 PredicateExtraCost = 1;
695 case CmpInst::Predicate::FCMP_ONE:
696 case CmpInst::Predicate::FCMP_ORD:
697 case CmpInst::Predicate::FCMP_UEQ:
698 case CmpInst::Predicate::FCMP_UNO:
699 PredicateExtraCost = 2;
706 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
707 // floats. FIXME: <2 x float> generates same code as <4 x float>.
708 unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
709 unsigned NumVecs_cmp = getNumberOfParts(ValTy);
711 unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
714 else { // Called with a select instruction.
715 assert (Opcode == Instruction::Select);
717 // We can figure out the extra cost of packing / unpacking if the
718 // instruction was passed and the compare instruction is found.
719 unsigned PackCost = 0;
720 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
721 if (CmpOpTy != nullptr)
723 getVectorBitmaskConversionCost(CmpOpTy, ValTy);
725 return getNumberOfParts(ValTy) /*vsel*/ + PackCost;
730 case Instruction::ICmp: {
732 if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
733 Cost += 2; // extend both operands
736 case Instruction::Select:
737 if (ValTy->isFloatingPointTy())
738 return 4; // No load on condition for FP, so this costs a conditional jump.
739 return 1; // Load On Condition.
743 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr);
747 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
748 // vlvgp will insert two grs into a vector register, so only count half the
749 // number of instructions.
750 if (Opcode == Instruction::InsertElement &&
751 Val->getScalarType()->isIntegerTy(64))
752 return ((Index % 2 == 0) ? 1 : 0);
754 if (Opcode == Instruction::ExtractElement) {
755 int Cost = ((Val->getScalarSizeInBits() == 1) ? 2 /*+test-under-mask*/ : 1);
757 // Give a slight penalty for moving out of vector pipeline to FXU unit.
758 if (Index == 0 && Val->getScalarType()->isIntegerTy())
764 return BaseT::getVectorInstrCost(Opcode, Val, Index);
767 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
768 unsigned Alignment, unsigned AddressSpace,
769 const Instruction *I) {
770 assert(!Src->isVoidTy() && "Invalid type");
772 if (!Src->isVectorTy() && Opcode == Instruction::Load &&
773 I != nullptr && I->hasOneUse()) {
774 const Instruction *UserI = cast<Instruction>(*I->user_begin());
775 unsigned Bits = Src->getScalarSizeInBits();
776 bool FoldsLoad = false;
777 switch (UserI->getOpcode()) {
778 case Instruction::ICmp:
779 case Instruction::Add:
780 case Instruction::Sub:
781 case Instruction::Mul:
782 case Instruction::SDiv:
783 case Instruction::UDiv:
784 case Instruction::And:
785 case Instruction::Or:
786 case Instruction::Xor:
787 // This also makes sense for float operations, but disabled for now due
789 // case Instruction::FCmp:
790 // case Instruction::FAdd:
791 // case Instruction::FSub:
792 // case Instruction::FMul:
793 // case Instruction::FDiv:
794 FoldsLoad = (Bits == 32 || Bits == 64);
799 assert (UserI->getNumOperands() == 2 &&
800 "Expected to only handle binops.");
802 // UserI can't fold two loads, so in that case return 0 cost only
804 for (unsigned i = 0; i < 2; ++i) {
805 if (UserI->getOperand(i) == I)
807 if (LoadInst *LI = dyn_cast<LoadInst>(UserI->getOperand(i))) {
817 unsigned NumOps = getNumberOfParts(Src);
819 if (Src->getScalarSizeInBits() == 128)
820 // 128 bit scalars are held in a pair of two 64 bit registers.
826 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
828 ArrayRef<unsigned> Indices,
830 unsigned AddressSpace) {
831 assert(isa<VectorType>(VecTy) &&
832 "Expect a vector type for interleaved memory op");
834 unsigned WideBits = (VecTy->isPtrOrPtrVectorTy() ?
835 (64U * VecTy->getVectorNumElements()) : VecTy->getPrimitiveSizeInBits());
836 assert (WideBits > 0 && "Could not compute size of vector");
838 ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
840 // How many source vectors are handled to produce a vectorized operand?
841 int NumElsPerVector = (VecTy->getVectorNumElements() / NumWideParts);
843 ((NumWideParts > NumElsPerVector) ? NumElsPerVector : NumWideParts);
845 // A Load group may have gaps.
846 unsigned NumOperands =
847 ((Opcode == Instruction::Load) ? Indices.size() : Factor);
849 // Each needed permute takes two vectors as input.
852 int NumPermutes = NumSrcParts * NumOperands;
854 // Cost of load/store operations and the permutations needed.
855 return NumWideParts + NumPermutes;