1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SystemZTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZISelLowering.h"
15 #include "SystemZCallingConv.h"
16 #include "SystemZConstantPoolValue.h"
17 #include "SystemZMachineFunctionInfo.h"
18 #include "SystemZTargetMachine.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/IR/Intrinsics.h"
29 #define DEBUG_TYPE "systemz-lower"
32 // Represents a sequence for extracting a 0/1 value from an IPM result:
33 // (((X ^ XORValue) + AddValue) >> Bit)
34 struct IPMConversion {
35 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
36 : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
43 // Represents information about a comparison.
45 Comparison(SDValue Op0In, SDValue Op1In)
46 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
48 // The operands to the comparison.
51 // The opcode that should be used to compare Op0 and Op1.
54 // A SystemZICMP value. Only used for integer comparisons.
57 // The mask of CC values that Opcode can produce.
60 // The mask of CC values for which the original condition is true.
63 } // end anonymous namespace
65 // Classify VT as either 32 or 64 bit.
66 static bool is32Bit(EVT VT) {
67 switch (VT.getSimpleVT().SimpleTy) {
73 llvm_unreachable("Unsupported type");
77 // Return a version of MachineOperand that can be safely used before the
79 static MachineOperand earlyUseOperand(MachineOperand Op) {
85 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
86 const SystemZSubtarget &STI)
87 : TargetLowering(TM), Subtarget(STI) {
88 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
90 // Set up the register classes.
91 if (Subtarget.hasHighWord())
92 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
94 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
95 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
96 if (Subtarget.hasVector()) {
97 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
98 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
100 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
101 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
103 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
105 if (Subtarget.hasVector()) {
106 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
107 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
108 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
109 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
110 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
111 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
114 // Compute derived properties from the register classes
115 computeRegisterProperties(Subtarget.getRegisterInfo());
117 // Set up special registers.
118 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
120 // TODO: It may be better to default to latency-oriented scheduling, however
121 // LLVM's current latency-oriented scheduler can't handle physreg definitions
122 // such as SystemZ has with CC, so set this to the register-pressure
123 // scheduler, because it can.
124 setSchedulingPreference(Sched::RegPressure);
126 setBooleanContents(ZeroOrOneBooleanContent);
127 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
129 // Instructions are strings of 2-byte aligned 2-byte values.
130 setMinFunctionAlignment(2);
132 // Handle operations that are handled in a similar way for all types.
133 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
134 I <= MVT::LAST_FP_VALUETYPE;
136 MVT VT = MVT::SimpleValueType(I);
137 if (isTypeLegal(VT)) {
138 // Lower SET_CC into an IPM-based sequence.
139 setOperationAction(ISD::SETCC, VT, Custom);
141 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
142 setOperationAction(ISD::SELECT, VT, Expand);
144 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
145 setOperationAction(ISD::SELECT_CC, VT, Custom);
146 setOperationAction(ISD::BR_CC, VT, Custom);
150 // Expand jump table branches as address arithmetic followed by an
152 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
154 // Expand BRCOND into a BR_CC (see above).
155 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
157 // Handle integer types.
158 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
159 I <= MVT::LAST_INTEGER_VALUETYPE;
161 MVT VT = MVT::SimpleValueType(I);
162 if (isTypeLegal(VT)) {
163 // Expand individual DIV and REMs into DIVREMs.
164 setOperationAction(ISD::SDIV, VT, Expand);
165 setOperationAction(ISD::UDIV, VT, Expand);
166 setOperationAction(ISD::SREM, VT, Expand);
167 setOperationAction(ISD::UREM, VT, Expand);
168 setOperationAction(ISD::SDIVREM, VT, Custom);
169 setOperationAction(ISD::UDIVREM, VT, Custom);
171 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
172 // stores, putting a serialization instruction after the stores.
173 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
174 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
176 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
177 // available, or if the operand is constant.
178 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
180 // Use POPCNT on z196 and above.
181 if (Subtarget.hasPopulationCount())
182 setOperationAction(ISD::CTPOP, VT, Custom);
184 setOperationAction(ISD::CTPOP, VT, Expand);
186 // No special instructions for these.
187 setOperationAction(ISD::CTTZ, VT, Expand);
188 setOperationAction(ISD::ROTR, VT, Expand);
190 // Use *MUL_LOHI where possible instead of MULH*.
191 setOperationAction(ISD::MULHS, VT, Expand);
192 setOperationAction(ISD::MULHU, VT, Expand);
193 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
194 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
196 // Only z196 and above have native support for conversions to unsigned.
197 if (!Subtarget.hasFPExtension())
198 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
202 // Type legalization will convert 8- and 16-bit atomic operations into
203 // forms that operate on i32s (but still keeping the original memory VT).
204 // Lower them into full i32 operations.
205 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
206 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
207 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
208 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
209 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
210 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
211 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
212 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
213 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
214 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
215 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
216 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
218 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
220 // Traps are legal, as we will convert them to "j .+2".
221 setOperationAction(ISD::TRAP, MVT::Other, Legal);
223 // z10 has instructions for signed but not unsigned FP conversion.
224 // Handle unsigned 32-bit types as signed 64-bit types.
225 if (!Subtarget.hasFPExtension()) {
226 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
227 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
230 // We have native support for a 64-bit CTLZ, via FLOGR.
231 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
232 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
234 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
235 setOperationAction(ISD::OR, MVT::i64, Custom);
237 // FIXME: Can we support these natively?
238 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
239 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
240 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
242 // We have native instructions for i8, i16 and i32 extensions, but not i1.
243 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
244 for (MVT VT : MVT::integer_valuetypes()) {
245 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
246 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
247 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
250 // Handle the various types of symbolic address.
251 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
252 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
253 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
254 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
255 setOperationAction(ISD::JumpTable, PtrVT, Custom);
257 // We need to handle dynamic allocations specially because of the
258 // 160-byte area at the bottom of the stack.
259 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
260 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
262 // Use custom expanders so that we can force the function to use
264 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
265 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
267 // Handle prefetches with PFD or PFDRL.
268 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
270 for (MVT VT : MVT::vector_valuetypes()) {
271 // Assume by default that all vector operations need to be expanded.
272 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
273 if (getOperationAction(Opcode, VT) == Legal)
274 setOperationAction(Opcode, VT, Expand);
276 // Likewise all truncating stores and extending loads.
277 for (MVT InnerVT : MVT::vector_valuetypes()) {
278 setTruncStoreAction(VT, InnerVT, Expand);
279 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
280 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
281 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
284 if (isTypeLegal(VT)) {
285 // These operations are legal for anything that can be stored in a
286 // vector register, even if there is no native support for the format
287 // as such. In particular, we can do these for v4f32 even though there
288 // are no specific instructions for that format.
289 setOperationAction(ISD::LOAD, VT, Legal);
290 setOperationAction(ISD::STORE, VT, Legal);
291 setOperationAction(ISD::VSELECT, VT, Legal);
292 setOperationAction(ISD::BITCAST, VT, Legal);
293 setOperationAction(ISD::UNDEF, VT, Legal);
295 // Likewise, except that we need to replace the nodes with something
297 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
298 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
302 // Handle integer vector types.
303 for (MVT VT : MVT::integer_vector_valuetypes()) {
304 if (isTypeLegal(VT)) {
305 // These operations have direct equivalents.
306 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
307 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
308 setOperationAction(ISD::ADD, VT, Legal);
309 setOperationAction(ISD::SUB, VT, Legal);
310 if (VT != MVT::v2i64)
311 setOperationAction(ISD::MUL, VT, Legal);
312 setOperationAction(ISD::AND, VT, Legal);
313 setOperationAction(ISD::OR, VT, Legal);
314 setOperationAction(ISD::XOR, VT, Legal);
315 setOperationAction(ISD::CTPOP, VT, Custom);
316 setOperationAction(ISD::CTTZ, VT, Legal);
317 setOperationAction(ISD::CTLZ, VT, Legal);
319 // Convert a GPR scalar to a vector by inserting it into element 0.
320 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
322 // Use a series of unpacks for extensions.
323 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
324 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
326 // Detect shifts by a scalar amount and convert them into
328 setOperationAction(ISD::SHL, VT, Custom);
329 setOperationAction(ISD::SRA, VT, Custom);
330 setOperationAction(ISD::SRL, VT, Custom);
332 // At present ROTL isn't matched by DAGCombiner. ROTR should be
333 // converted into ROTL.
334 setOperationAction(ISD::ROTL, VT, Expand);
335 setOperationAction(ISD::ROTR, VT, Expand);
337 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
338 // and inverting the result as necessary.
339 setOperationAction(ISD::SETCC, VT, Custom);
343 if (Subtarget.hasVector()) {
344 // There should be no need to check for float types other than v2f64
345 // since <2 x f32> isn't a legal type.
346 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
347 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
348 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
349 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
352 // Handle floating-point types.
353 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
354 I <= MVT::LAST_FP_VALUETYPE;
356 MVT VT = MVT::SimpleValueType(I);
357 if (isTypeLegal(VT)) {
358 // We can use FI for FRINT.
359 setOperationAction(ISD::FRINT, VT, Legal);
361 // We can use the extended form of FI for other rounding operations.
362 if (Subtarget.hasFPExtension()) {
363 setOperationAction(ISD::FNEARBYINT, VT, Legal);
364 setOperationAction(ISD::FFLOOR, VT, Legal);
365 setOperationAction(ISD::FCEIL, VT, Legal);
366 setOperationAction(ISD::FTRUNC, VT, Legal);
367 setOperationAction(ISD::FROUND, VT, Legal);
370 // No special instructions for these.
371 setOperationAction(ISD::FSIN, VT, Expand);
372 setOperationAction(ISD::FCOS, VT, Expand);
373 setOperationAction(ISD::FSINCOS, VT, Expand);
374 setOperationAction(ISD::FREM, VT, Expand);
375 setOperationAction(ISD::FPOW, VT, Expand);
379 // Handle floating-point vector types.
380 if (Subtarget.hasVector()) {
381 // Scalar-to-vector conversion is just a subreg.
382 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
383 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
385 // Some insertions and extractions can be done directly but others
386 // need to go via integers.
387 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
388 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
389 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
390 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
392 // These operations have direct equivalents.
393 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
394 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
395 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
396 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
397 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
398 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
399 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
400 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
401 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
402 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
403 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
404 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
405 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
406 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
409 // We have fused multiply-addition for f32 and f64 but not f128.
410 setOperationAction(ISD::FMA, MVT::f32, Legal);
411 setOperationAction(ISD::FMA, MVT::f64, Legal);
412 setOperationAction(ISD::FMA, MVT::f128, Expand);
414 // Needed so that we don't try to implement f128 constant loads using
415 // a load-and-extend of a f80 constant (in cases where the constant
416 // would fit in an f80).
417 for (MVT VT : MVT::fp_valuetypes())
418 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
420 // Floating-point truncation and stores need to be done separately.
421 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
422 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
423 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
425 // We have 64-bit FPR<->GPR moves, but need special handling for
427 if (!Subtarget.hasVector()) {
428 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
429 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
432 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
433 // structure, but VAEND is a no-op.
434 setOperationAction(ISD::VASTART, MVT::Other, Custom);
435 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
436 setOperationAction(ISD::VAEND, MVT::Other, Expand);
438 // Codes for which we want to perform some z-specific combinations.
439 setTargetDAGCombine(ISD::SIGN_EXTEND);
440 setTargetDAGCombine(ISD::STORE);
441 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
442 setTargetDAGCombine(ISD::FP_ROUND);
443 setTargetDAGCombine(ISD::BSWAP);
444 setTargetDAGCombine(ISD::SHL);
445 setTargetDAGCombine(ISD::SRA);
446 setTargetDAGCombine(ISD::SRL);
447 setTargetDAGCombine(ISD::ROTL);
449 // Handle intrinsics.
450 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
451 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
453 // We want to use MVC in preference to even a single load/store pair.
454 MaxStoresPerMemcpy = 0;
455 MaxStoresPerMemcpyOptSize = 0;
457 // The main memset sequence is a byte store followed by an MVC.
458 // Two STC or MV..I stores win over that, but the kind of fused stores
459 // generated by target-independent code don't when the byte value is
460 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
461 // than "STC;MVC". Handle the choice in target-specific code instead.
462 MaxStoresPerMemset = 0;
463 MaxStoresPerMemsetOptSize = 0;
466 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
467 LLVMContext &, EVT VT) const {
470 return VT.changeVectorElementTypeToInteger();
473 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
474 VT = VT.getScalarType();
479 switch (VT.getSimpleVT().SimpleTy) {
492 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
493 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
494 return Imm.isZero() || Imm.isNegZero();
497 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
498 // We can use CGFI or CLGFI.
499 return isInt<32>(Imm) || isUInt<32>(Imm);
502 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
503 // We can use ALGFI or SLGFI.
504 return isUInt<32>(Imm) || isUInt<32>(-Imm);
507 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
511 // Unaligned accesses should never be slower than the expanded version.
512 // We check specifically for aligned accesses in the few cases where
513 // they are required.
519 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
520 const AddrMode &AM, Type *Ty,
522 // Punt on globals for now, although they can be used in limited
523 // RELATIVE LONG cases.
527 // Require a 20-bit signed offset.
528 if (!isInt<20>(AM.BaseOffs))
531 // Indexing is OK but no scale factor can be applied.
532 return AM.Scale == 0 || AM.Scale == 1;
535 bool SystemZTargetLowering::isFoldableMemAccessOffset(Instruction *I,
536 int64_t Offset) const {
537 // This only applies to z13.
538 if (!Subtarget.hasVector())
541 // * Use LDE instead of LE/LEY to avoid partial register
542 // dependencies (LDE only supports small offsets).
543 // * Utilize the vector registers to hold floating point
544 // values (vector load / store instructions only support small
547 assert (isa<LoadInst>(I) || isa<StoreInst>(I));
548 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
549 I->getOperand(0)->getType());
550 bool IsFPAccess = MemAccessTy->isFloatingPointTy();
551 bool IsVectorAccess = MemAccessTy->isVectorTy();
553 // A store of an extracted vector element will be combined into a VSTE type
555 if (!IsVectorAccess && isa<StoreInst>(I)) {
556 Value *DataOp = I->getOperand(0);
557 if (isa<ExtractElementInst>(DataOp))
558 IsVectorAccess = true;
561 // A load which gets inserted into a vector element will be combined into a
562 // VLE type instruction.
563 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
564 User *LoadUser = *I->user_begin();
565 if (isa<InsertElementInst>(LoadUser))
566 IsVectorAccess = true;
569 if (!isUInt<12>(Offset) && (IsFPAccess || IsVectorAccess))
575 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
576 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
578 unsigned FromBits = FromType->getPrimitiveSizeInBits();
579 unsigned ToBits = ToType->getPrimitiveSizeInBits();
580 return FromBits > ToBits;
583 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
584 if (!FromVT.isInteger() || !ToVT.isInteger())
586 unsigned FromBits = FromVT.getSizeInBits();
587 unsigned ToBits = ToVT.getSizeInBits();
588 return FromBits > ToBits;
591 //===----------------------------------------------------------------------===//
592 // Inline asm support
593 //===----------------------------------------------------------------------===//
595 TargetLowering::ConstraintType
596 SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
597 if (Constraint.size() == 1) {
598 switch (Constraint[0]) {
599 case 'a': // Address register
600 case 'd': // Data register (equivalent to 'r')
601 case 'f': // Floating-point register
602 case 'h': // High-part register
603 case 'r': // General-purpose register
604 return C_RegisterClass;
606 case 'Q': // Memory with base and unsigned 12-bit displacement
607 case 'R': // Likewise, plus an index
608 case 'S': // Memory with base and signed 20-bit displacement
609 case 'T': // Likewise, plus an index
610 case 'm': // Equivalent to 'T'.
613 case 'I': // Unsigned 8-bit constant
614 case 'J': // Unsigned 12-bit constant
615 case 'K': // Signed 16-bit constant
616 case 'L': // Signed 20-bit displacement (on all targets we support)
617 case 'M': // 0x7fffffff
624 return TargetLowering::getConstraintType(Constraint);
627 TargetLowering::ConstraintWeight SystemZTargetLowering::
628 getSingleConstraintMatchWeight(AsmOperandInfo &info,
629 const char *constraint) const {
630 ConstraintWeight weight = CW_Invalid;
631 Value *CallOperandVal = info.CallOperandVal;
632 // If we don't have a value, we can't do a match,
633 // but allow it at the lowest weight.
636 Type *type = CallOperandVal->getType();
637 // Look at the constraint type.
638 switch (*constraint) {
640 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
643 case 'a': // Address register
644 case 'd': // Data register (equivalent to 'r')
645 case 'h': // High-part register
646 case 'r': // General-purpose register
647 if (CallOperandVal->getType()->isIntegerTy())
648 weight = CW_Register;
651 case 'f': // Floating-point register
652 if (type->isFloatingPointTy())
653 weight = CW_Register;
656 case 'I': // Unsigned 8-bit constant
657 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
658 if (isUInt<8>(C->getZExtValue()))
659 weight = CW_Constant;
662 case 'J': // Unsigned 12-bit constant
663 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
664 if (isUInt<12>(C->getZExtValue()))
665 weight = CW_Constant;
668 case 'K': // Signed 16-bit constant
669 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
670 if (isInt<16>(C->getSExtValue()))
671 weight = CW_Constant;
674 case 'L': // Signed 20-bit displacement (on all targets we support)
675 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
676 if (isInt<20>(C->getSExtValue()))
677 weight = CW_Constant;
680 case 'M': // 0x7fffffff
681 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
682 if (C->getZExtValue() == 0x7fffffff)
683 weight = CW_Constant;
689 // Parse a "{tNNN}" register constraint for which the register type "t"
690 // has already been verified. MC is the class associated with "t" and
691 // Map maps 0-based register numbers to LLVM register numbers.
692 static std::pair<unsigned, const TargetRegisterClass *>
693 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
694 const unsigned *Map) {
695 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
696 if (isdigit(Constraint[2])) {
699 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
700 if (!Failed && Index < 16 && Map[Index])
701 return std::make_pair(Map[Index], RC);
703 return std::make_pair(0U, nullptr);
706 std::pair<unsigned, const TargetRegisterClass *>
707 SystemZTargetLowering::getRegForInlineAsmConstraint(
708 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
709 if (Constraint.size() == 1) {
710 // GCC Constraint Letters
711 switch (Constraint[0]) {
713 case 'd': // Data register (equivalent to 'r')
714 case 'r': // General-purpose register
716 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
717 else if (VT == MVT::i128)
718 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
719 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
721 case 'a': // Address register
723 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
724 else if (VT == MVT::i128)
725 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
726 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
728 case 'h': // High-part register (an LLVM extension)
729 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
731 case 'f': // Floating-point register
733 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
734 else if (VT == MVT::f128)
735 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
736 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
739 if (Constraint.size() > 0 && Constraint[0] == '{') {
740 // We need to override the default register parsing for GPRs and FPRs
741 // because the interpretation depends on VT. The internal names of
742 // the registers are also different from the external names
743 // (F0D and F0S instead of F0, etc.).
744 if (Constraint[1] == 'r') {
746 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
747 SystemZMC::GR32Regs);
749 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
750 SystemZMC::GR128Regs);
751 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
752 SystemZMC::GR64Regs);
754 if (Constraint[1] == 'f') {
756 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
757 SystemZMC::FP32Regs);
759 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
760 SystemZMC::FP128Regs);
761 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
762 SystemZMC::FP64Regs);
765 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
768 void SystemZTargetLowering::
769 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
770 std::vector<SDValue> &Ops,
771 SelectionDAG &DAG) const {
772 // Only support length 1 constraints for now.
773 if (Constraint.length() == 1) {
774 switch (Constraint[0]) {
775 case 'I': // Unsigned 8-bit constant
776 if (auto *C = dyn_cast<ConstantSDNode>(Op))
777 if (isUInt<8>(C->getZExtValue()))
778 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
782 case 'J': // Unsigned 12-bit constant
783 if (auto *C = dyn_cast<ConstantSDNode>(Op))
784 if (isUInt<12>(C->getZExtValue()))
785 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
789 case 'K': // Signed 16-bit constant
790 if (auto *C = dyn_cast<ConstantSDNode>(Op))
791 if (isInt<16>(C->getSExtValue()))
792 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
796 case 'L': // Signed 20-bit displacement (on all targets we support)
797 if (auto *C = dyn_cast<ConstantSDNode>(Op))
798 if (isInt<20>(C->getSExtValue()))
799 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
803 case 'M': // 0x7fffffff
804 if (auto *C = dyn_cast<ConstantSDNode>(Op))
805 if (C->getZExtValue() == 0x7fffffff)
806 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
811 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
814 //===----------------------------------------------------------------------===//
815 // Calling conventions
816 //===----------------------------------------------------------------------===//
818 #include "SystemZGenCallingConv.inc"
820 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
821 Type *ToType) const {
822 return isTruncateFree(FromType, ToType);
825 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
826 return CI->isTailCall();
829 // We do not yet support 128-bit single-element vector types. If the user
830 // attempts to use such types as function argument or return type, prefer
831 // to error out instead of emitting code violating the ABI.
832 static void VerifyVectorType(MVT VT, EVT ArgVT) {
833 if (ArgVT.isVector() && !VT.isVector())
834 report_fatal_error("Unsupported vector argument or return type");
837 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
838 for (unsigned i = 0; i < Ins.size(); ++i)
839 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
842 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
843 for (unsigned i = 0; i < Outs.size(); ++i)
844 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
847 // Value is a value that has been passed to us in the location described by VA
848 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
849 // any loads onto Chain.
850 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
851 CCValAssign &VA, SDValue Chain,
853 // If the argument has been promoted from a smaller type, insert an
854 // assertion to capture this.
855 if (VA.getLocInfo() == CCValAssign::SExt)
856 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
857 DAG.getValueType(VA.getValVT()));
858 else if (VA.getLocInfo() == CCValAssign::ZExt)
859 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
860 DAG.getValueType(VA.getValVT()));
863 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
864 else if (VA.getLocInfo() == CCValAssign::BCvt) {
865 // If this is a short vector argument loaded from the stack,
866 // extend from i64 to full vector size and then bitcast.
867 assert(VA.getLocVT() == MVT::i64);
868 assert(VA.getValVT().isVector());
869 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
870 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
872 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
876 // Value is a value of type VA.getValVT() that we need to copy into
877 // the location described by VA. Return a copy of Value converted to
878 // VA.getValVT(). The caller is responsible for handling indirect values.
879 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
880 CCValAssign &VA, SDValue Value) {
881 switch (VA.getLocInfo()) {
882 case CCValAssign::SExt:
883 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
884 case CCValAssign::ZExt:
885 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
886 case CCValAssign::AExt:
887 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
888 case CCValAssign::BCvt:
889 // If this is a short vector argument to be stored to the stack,
890 // bitcast to v2i64 and then extract first element.
891 assert(VA.getLocVT() == MVT::i64);
892 assert(VA.getValVT().isVector());
893 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
894 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
895 DAG.getConstant(0, DL, MVT::i32));
896 case CCValAssign::Full:
899 llvm_unreachable("Unhandled getLocInfo()");
903 SDValue SystemZTargetLowering::LowerFormalArguments(
904 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
905 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
906 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
907 MachineFunction &MF = DAG.getMachineFunction();
908 MachineFrameInfo &MFI = MF.getFrameInfo();
909 MachineRegisterInfo &MRI = MF.getRegInfo();
910 SystemZMachineFunctionInfo *FuncInfo =
911 MF.getInfo<SystemZMachineFunctionInfo>();
913 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
914 EVT PtrVT = getPointerTy(DAG.getDataLayout());
916 // Detect unsupported vector argument types.
917 if (Subtarget.hasVector())
918 VerifyVectorTypes(Ins);
920 // Assign locations to all of the incoming arguments.
921 SmallVector<CCValAssign, 16> ArgLocs;
922 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
923 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
925 unsigned NumFixedGPRs = 0;
926 unsigned NumFixedFPRs = 0;
927 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
929 CCValAssign &VA = ArgLocs[I];
930 EVT LocVT = VA.getLocVT();
932 // Arguments passed in registers
933 const TargetRegisterClass *RC;
934 switch (LocVT.getSimpleVT().SimpleTy) {
936 // Integers smaller than i64 should be promoted to i64.
937 llvm_unreachable("Unexpected argument type");
940 RC = &SystemZ::GR32BitRegClass;
944 RC = &SystemZ::GR64BitRegClass;
948 RC = &SystemZ::FP32BitRegClass;
952 RC = &SystemZ::FP64BitRegClass;
960 RC = &SystemZ::VR128BitRegClass;
964 unsigned VReg = MRI.createVirtualRegister(RC);
965 MRI.addLiveIn(VA.getLocReg(), VReg);
966 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
968 assert(VA.isMemLoc() && "Argument not register or memory");
970 // Create the frame index object for this incoming parameter.
971 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
972 VA.getLocMemOffset(), true);
974 // Create the SelectionDAG nodes corresponding to a load
975 // from this parameter. Unpromoted ints and floats are
976 // passed as right-justified 8-byte values.
977 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
978 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
979 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
980 DAG.getIntPtrConstant(4, DL));
981 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
982 MachinePointerInfo::getFixedStack(MF, FI));
985 // Convert the value of the argument register into the value that's
987 if (VA.getLocInfo() == CCValAssign::Indirect) {
988 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
989 MachinePointerInfo()));
990 // If the original argument was split (e.g. i128), we need
991 // to load all parts of it here (using the same address).
992 unsigned ArgIndex = Ins[I].OrigArgIndex;
993 assert (Ins[I].PartOffset == 0);
994 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
995 CCValAssign &PartVA = ArgLocs[I + 1];
996 unsigned PartOffset = Ins[I + 1].PartOffset;
997 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
998 DAG.getIntPtrConstant(PartOffset, DL));
999 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1000 MachinePointerInfo()));
1004 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1008 // Save the number of non-varargs registers for later use by va_start, etc.
1009 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1010 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1012 // Likewise the address (in the form of a frame index) of where the
1013 // first stack vararg would be. The 1-byte size here is arbitrary.
1014 int64_t StackSize = CCInfo.getNextStackOffset();
1015 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1017 // ...and a similar frame index for the caller-allocated save area
1018 // that will be used to store the incoming registers.
1019 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1020 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1021 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1023 // Store the FPR varargs in the reserved frame slots. (We store the
1024 // GPRs as part of the prologue.)
1025 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
1026 SDValue MemOps[SystemZ::NumArgFPRs];
1027 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1028 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
1029 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
1030 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1031 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1032 &SystemZ::FP64BitRegClass);
1033 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1034 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1035 MachinePointerInfo::getFixedStack(MF, FI));
1037 // Join the stores, which are independent of one another.
1038 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1039 makeArrayRef(&MemOps[NumFixedFPRs],
1040 SystemZ::NumArgFPRs-NumFixedFPRs));
1047 static bool canUseSiblingCall(const CCState &ArgCCInfo,
1048 SmallVectorImpl<CCValAssign> &ArgLocs,
1049 SmallVectorImpl<ISD::OutputArg> &Outs) {
1050 // Punt if there are any indirect or stack arguments, or if the call
1051 // needs the callee-saved argument register R6, or if the call uses
1052 // the callee-saved register arguments SwiftSelf and SwiftError.
1053 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1054 CCValAssign &VA = ArgLocs[I];
1055 if (VA.getLocInfo() == CCValAssign::Indirect)
1059 unsigned Reg = VA.getLocReg();
1060 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1062 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1069 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
1070 SmallVectorImpl<SDValue> &InVals) const {
1071 SelectionDAG &DAG = CLI.DAG;
1073 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1074 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1075 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1076 SDValue Chain = CLI.Chain;
1077 SDValue Callee = CLI.Callee;
1078 bool &IsTailCall = CLI.IsTailCall;
1079 CallingConv::ID CallConv = CLI.CallConv;
1080 bool IsVarArg = CLI.IsVarArg;
1081 MachineFunction &MF = DAG.getMachineFunction();
1082 EVT PtrVT = getPointerTy(MF.getDataLayout());
1084 // Detect unsupported vector argument and return types.
1085 if (Subtarget.hasVector()) {
1086 VerifyVectorTypes(Outs);
1087 VerifyVectorTypes(Ins);
1090 // Analyze the operands of the call, assigning locations to each operand.
1091 SmallVector<CCValAssign, 16> ArgLocs;
1092 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1093 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1095 // We don't support GuaranteedTailCallOpt, only automatically-detected
1097 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
1100 // Get a count of how many bytes are to be pushed on the stack.
1101 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1103 // Mark the start of the call.
1105 Chain = DAG.getCALLSEQ_START(Chain,
1106 DAG.getConstant(NumBytes, DL, PtrVT, true),
1109 // Copy argument values to their designated locations.
1110 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
1111 SmallVector<SDValue, 8> MemOpChains;
1113 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1114 CCValAssign &VA = ArgLocs[I];
1115 SDValue ArgValue = OutVals[I];
1117 if (VA.getLocInfo() == CCValAssign::Indirect) {
1118 // Store the argument in a stack slot and pass its address.
1119 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1120 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1121 MemOpChains.push_back(
1122 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1123 MachinePointerInfo::getFixedStack(MF, FI)));
1124 // If the original argument was split (e.g. i128), we need
1125 // to store all parts of it here (and pass just one address).
1126 unsigned ArgIndex = Outs[I].OrigArgIndex;
1127 assert (Outs[I].PartOffset == 0);
1128 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1129 SDValue PartValue = OutVals[I + 1];
1130 unsigned PartOffset = Outs[I + 1].PartOffset;
1131 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1132 DAG.getIntPtrConstant(PartOffset, DL));
1133 MemOpChains.push_back(
1134 DAG.getStore(Chain, DL, PartValue, Address,
1135 MachinePointerInfo::getFixedStack(MF, FI)));
1138 ArgValue = SpillSlot;
1140 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1143 // Queue up the argument copies and emit them at the end.
1144 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1146 assert(VA.isMemLoc() && "Argument not register or memory");
1148 // Work out the address of the stack slot. Unpromoted ints and
1149 // floats are passed as right-justified 8-byte values.
1150 if (!StackPtr.getNode())
1151 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1152 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
1153 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1155 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1156 DAG.getIntPtrConstant(Offset, DL));
1159 MemOpChains.push_back(
1160 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1164 // Join the stores, which are independent of one another.
1165 if (!MemOpChains.empty())
1166 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1168 // Accept direct calls by converting symbolic call addresses to the
1169 // associated Target* opcodes. Force %r1 to be used for indirect
1172 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1173 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1174 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1175 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1176 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1177 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1178 } else if (IsTailCall) {
1179 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1180 Glue = Chain.getValue(1);
1181 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1184 // Build a sequence of copy-to-reg nodes, chained and glued together.
1185 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1186 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1187 RegsToPass[I].second, Glue);
1188 Glue = Chain.getValue(1);
1191 // The first call operand is the chain and the second is the target address.
1192 SmallVector<SDValue, 8> Ops;
1193 Ops.push_back(Chain);
1194 Ops.push_back(Callee);
1196 // Add argument registers to the end of the list so that they are
1197 // known live into the call.
1198 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1199 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1200 RegsToPass[I].second.getValueType()));
1202 // Add a register mask operand representing the call-preserved registers.
1203 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1204 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1205 assert(Mask && "Missing call preserved mask for calling convention");
1206 Ops.push_back(DAG.getRegisterMask(Mask));
1208 // Glue the call to the argument copies, if any.
1210 Ops.push_back(Glue);
1213 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1215 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1216 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1217 Glue = Chain.getValue(1);
1219 // Mark the end of the call, which is glued to the call itself.
1220 Chain = DAG.getCALLSEQ_END(Chain,
1221 DAG.getConstant(NumBytes, DL, PtrVT, true),
1222 DAG.getConstant(0, DL, PtrVT, true),
1224 Glue = Chain.getValue(1);
1226 // Assign locations to each value returned by this call.
1227 SmallVector<CCValAssign, 16> RetLocs;
1228 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1229 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1231 // Copy all of the result registers out of their specified physreg.
1232 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1233 CCValAssign &VA = RetLocs[I];
1235 // Copy the value out, gluing the copy to the end of the call sequence.
1236 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1237 VA.getLocVT(), Glue);
1238 Chain = RetValue.getValue(1);
1239 Glue = RetValue.getValue(2);
1241 // Convert the value of the return register into the value that's
1243 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1249 bool SystemZTargetLowering::
1250 CanLowerReturn(CallingConv::ID CallConv,
1251 MachineFunction &MF, bool isVarArg,
1252 const SmallVectorImpl<ISD::OutputArg> &Outs,
1253 LLVMContext &Context) const {
1254 // Detect unsupported vector return types.
1255 if (Subtarget.hasVector())
1256 VerifyVectorTypes(Outs);
1258 // Special case that we cannot easily detect in RetCC_SystemZ since
1259 // i128 is not a legal type.
1260 for (auto &Out : Outs)
1261 if (Out.ArgVT == MVT::i128)
1264 SmallVector<CCValAssign, 16> RetLocs;
1265 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1266 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1270 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1272 const SmallVectorImpl<ISD::OutputArg> &Outs,
1273 const SmallVectorImpl<SDValue> &OutVals,
1274 const SDLoc &DL, SelectionDAG &DAG) const {
1275 MachineFunction &MF = DAG.getMachineFunction();
1277 // Detect unsupported vector return types.
1278 if (Subtarget.hasVector())
1279 VerifyVectorTypes(Outs);
1281 // Assign locations to each returned value.
1282 SmallVector<CCValAssign, 16> RetLocs;
1283 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1284 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1286 // Quick exit for void returns
1287 if (RetLocs.empty())
1288 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1290 // Copy the result values into the output registers.
1292 SmallVector<SDValue, 4> RetOps;
1293 RetOps.push_back(Chain);
1294 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1295 CCValAssign &VA = RetLocs[I];
1296 SDValue RetValue = OutVals[I];
1298 // Make the return register live on exit.
1299 assert(VA.isRegLoc() && "Can only return in registers!");
1301 // Promote the value as required.
1302 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1304 // Chain and glue the copies together.
1305 unsigned Reg = VA.getLocReg();
1306 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1307 Glue = Chain.getValue(1);
1308 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1311 // Update chain and glue.
1314 RetOps.push_back(Glue);
1316 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1319 SDValue SystemZTargetLowering::prepareVolatileOrAtomicLoad(
1320 SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const {
1321 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain);
1324 // Return true if Op is an intrinsic node with chain that returns the CC value
1325 // as its only (other) argument. Provide the associated SystemZISD opcode and
1326 // the mask of valid CC values if so.
1327 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1328 unsigned &CCValid) {
1329 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1331 case Intrinsic::s390_tbegin:
1332 Opcode = SystemZISD::TBEGIN;
1333 CCValid = SystemZ::CCMASK_TBEGIN;
1336 case Intrinsic::s390_tbegin_nofloat:
1337 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1338 CCValid = SystemZ::CCMASK_TBEGIN;
1341 case Intrinsic::s390_tend:
1342 Opcode = SystemZISD::TEND;
1343 CCValid = SystemZ::CCMASK_TEND;
1351 // Return true if Op is an intrinsic node without chain that returns the
1352 // CC value as its final argument. Provide the associated SystemZISD
1353 // opcode and the mask of valid CC values if so.
1354 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1355 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1357 case Intrinsic::s390_vpkshs:
1358 case Intrinsic::s390_vpksfs:
1359 case Intrinsic::s390_vpksgs:
1360 Opcode = SystemZISD::PACKS_CC;
1361 CCValid = SystemZ::CCMASK_VCMP;
1364 case Intrinsic::s390_vpklshs:
1365 case Intrinsic::s390_vpklsfs:
1366 case Intrinsic::s390_vpklsgs:
1367 Opcode = SystemZISD::PACKLS_CC;
1368 CCValid = SystemZ::CCMASK_VCMP;
1371 case Intrinsic::s390_vceqbs:
1372 case Intrinsic::s390_vceqhs:
1373 case Intrinsic::s390_vceqfs:
1374 case Intrinsic::s390_vceqgs:
1375 Opcode = SystemZISD::VICMPES;
1376 CCValid = SystemZ::CCMASK_VCMP;
1379 case Intrinsic::s390_vchbs:
1380 case Intrinsic::s390_vchhs:
1381 case Intrinsic::s390_vchfs:
1382 case Intrinsic::s390_vchgs:
1383 Opcode = SystemZISD::VICMPHS;
1384 CCValid = SystemZ::CCMASK_VCMP;
1387 case Intrinsic::s390_vchlbs:
1388 case Intrinsic::s390_vchlhs:
1389 case Intrinsic::s390_vchlfs:
1390 case Intrinsic::s390_vchlgs:
1391 Opcode = SystemZISD::VICMPHLS;
1392 CCValid = SystemZ::CCMASK_VCMP;
1395 case Intrinsic::s390_vtm:
1396 Opcode = SystemZISD::VTM;
1397 CCValid = SystemZ::CCMASK_VCMP;
1400 case Intrinsic::s390_vfaebs:
1401 case Intrinsic::s390_vfaehs:
1402 case Intrinsic::s390_vfaefs:
1403 Opcode = SystemZISD::VFAE_CC;
1404 CCValid = SystemZ::CCMASK_ANY;
1407 case Intrinsic::s390_vfaezbs:
1408 case Intrinsic::s390_vfaezhs:
1409 case Intrinsic::s390_vfaezfs:
1410 Opcode = SystemZISD::VFAEZ_CC;
1411 CCValid = SystemZ::CCMASK_ANY;
1414 case Intrinsic::s390_vfeebs:
1415 case Intrinsic::s390_vfeehs:
1416 case Intrinsic::s390_vfeefs:
1417 Opcode = SystemZISD::VFEE_CC;
1418 CCValid = SystemZ::CCMASK_ANY;
1421 case Intrinsic::s390_vfeezbs:
1422 case Intrinsic::s390_vfeezhs:
1423 case Intrinsic::s390_vfeezfs:
1424 Opcode = SystemZISD::VFEEZ_CC;
1425 CCValid = SystemZ::CCMASK_ANY;
1428 case Intrinsic::s390_vfenebs:
1429 case Intrinsic::s390_vfenehs:
1430 case Intrinsic::s390_vfenefs:
1431 Opcode = SystemZISD::VFENE_CC;
1432 CCValid = SystemZ::CCMASK_ANY;
1435 case Intrinsic::s390_vfenezbs:
1436 case Intrinsic::s390_vfenezhs:
1437 case Intrinsic::s390_vfenezfs:
1438 Opcode = SystemZISD::VFENEZ_CC;
1439 CCValid = SystemZ::CCMASK_ANY;
1442 case Intrinsic::s390_vistrbs:
1443 case Intrinsic::s390_vistrhs:
1444 case Intrinsic::s390_vistrfs:
1445 Opcode = SystemZISD::VISTR_CC;
1446 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
1449 case Intrinsic::s390_vstrcbs:
1450 case Intrinsic::s390_vstrchs:
1451 case Intrinsic::s390_vstrcfs:
1452 Opcode = SystemZISD::VSTRC_CC;
1453 CCValid = SystemZ::CCMASK_ANY;
1456 case Intrinsic::s390_vstrczbs:
1457 case Intrinsic::s390_vstrczhs:
1458 case Intrinsic::s390_vstrczfs:
1459 Opcode = SystemZISD::VSTRCZ_CC;
1460 CCValid = SystemZ::CCMASK_ANY;
1463 case Intrinsic::s390_vfcedbs:
1464 Opcode = SystemZISD::VFCMPES;
1465 CCValid = SystemZ::CCMASK_VCMP;
1468 case Intrinsic::s390_vfchdbs:
1469 Opcode = SystemZISD::VFCMPHS;
1470 CCValid = SystemZ::CCMASK_VCMP;
1473 case Intrinsic::s390_vfchedbs:
1474 Opcode = SystemZISD::VFCMPHES;
1475 CCValid = SystemZ::CCMASK_VCMP;
1478 case Intrinsic::s390_vftcidb:
1479 Opcode = SystemZISD::VFTCI;
1480 CCValid = SystemZ::CCMASK_VCMP;
1483 case Intrinsic::s390_tdc:
1484 Opcode = SystemZISD::TDC;
1485 CCValid = SystemZ::CCMASK_TDC;
1493 // Emit an intrinsic with chain with a glued value instead of its CC result.
1494 static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op,
1496 // Copy all operands except the intrinsic ID.
1497 unsigned NumOps = Op.getNumOperands();
1498 SmallVector<SDValue, 6> Ops;
1499 Ops.reserve(NumOps - 1);
1500 Ops.push_back(Op.getOperand(0));
1501 for (unsigned I = 2; I < NumOps; ++I)
1502 Ops.push_back(Op.getOperand(I));
1504 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
1505 SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1506 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1507 SDValue OldChain = SDValue(Op.getNode(), 1);
1508 SDValue NewChain = SDValue(Intr.getNode(), 0);
1509 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1513 // Emit an intrinsic with a glued value instead of its CC result.
1514 static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op,
1516 // Copy all operands except the intrinsic ID.
1517 unsigned NumOps = Op.getNumOperands();
1518 SmallVector<SDValue, 6> Ops;
1519 Ops.reserve(NumOps - 1);
1520 for (unsigned I = 1; I < NumOps; ++I)
1521 Ops.push_back(Op.getOperand(I));
1523 if (Op->getNumValues() == 1)
1524 return DAG.getNode(Opcode, SDLoc(Op), MVT::Glue, Ops);
1525 assert(Op->getNumValues() == 2 && "Expected exactly one non-CC result");
1526 SDVTList RawVTs = DAG.getVTList(Op->getValueType(0), MVT::Glue);
1527 return DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1530 // CC is a comparison that will be implemented using an integer or
1531 // floating-point comparison. Return the condition code mask for
1532 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1533 // unsigned comparisons and clear for signed ones. In the floating-point
1534 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1535 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1537 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1538 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1539 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1543 llvm_unreachable("Invalid integer condition!");
1552 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1553 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1558 // Return a sequence for getting a 1 from an IPM result when CC has a
1559 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1560 // The handling of CC values outside CCValid doesn't matter.
1561 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1562 // Deal with cases where the result can be taken directly from a bit
1563 // of the IPM result.
1564 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1565 return IPMConversion(0, 0, SystemZ::IPM_CC);
1566 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1567 return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1569 // Deal with cases where we can add a value to force the sign bit
1570 // to contain the right value. Putting the bit in 31 means we can
1571 // use SRL rather than RISBG(L), and also makes it easier to get a
1572 // 0/-1 value, so it has priority over the other tests below.
1574 // These sequences rely on the fact that the upper two bits of the
1575 // IPM result are zero.
1576 uint64_t TopBit = uint64_t(1) << 31;
1577 if (CCMask == (CCValid & SystemZ::CCMASK_0))
1578 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1579 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1580 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1581 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1583 | SystemZ::CCMASK_2)))
1584 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1585 if (CCMask == (CCValid & SystemZ::CCMASK_3))
1586 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1587 if (CCMask == (CCValid & (SystemZ::CCMASK_1
1589 | SystemZ::CCMASK_3)))
1590 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1592 // Next try inverting the value and testing a bit. 0/1 could be
1593 // handled this way too, but we dealt with that case above.
1594 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1595 return IPMConversion(-1, 0, SystemZ::IPM_CC);
1597 // Handle cases where adding a value forces a non-sign bit to contain
1599 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1600 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1601 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1602 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1604 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
1605 // can be done by inverting the low CC bit and applying one of the
1606 // sign-based extractions above.
1607 if (CCMask == (CCValid & SystemZ::CCMASK_1))
1608 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1609 if (CCMask == (CCValid & SystemZ::CCMASK_2))
1610 return IPMConversion(1 << SystemZ::IPM_CC,
1611 TopBit - (3 << SystemZ::IPM_CC), 31);
1612 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1614 | SystemZ::CCMASK_3)))
1615 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1616 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1618 | SystemZ::CCMASK_3)))
1619 return IPMConversion(1 << SystemZ::IPM_CC,
1620 TopBit - (1 << SystemZ::IPM_CC), 31);
1622 llvm_unreachable("Unexpected CC combination");
1625 // If C can be converted to a comparison against zero, adjust the operands
1627 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
1628 if (C.ICmpType == SystemZICMP::UnsignedOnly)
1631 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1635 int64_t Value = ConstOp1->getSExtValue();
1636 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
1637 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
1638 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
1639 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
1640 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1641 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
1645 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1646 // adjust the operands as necessary.
1647 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
1649 // For us to make any changes, it must a comparison between a single-use
1650 // load and a constant.
1651 if (!C.Op0.hasOneUse() ||
1652 C.Op0.getOpcode() != ISD::LOAD ||
1653 C.Op1.getOpcode() != ISD::Constant)
1656 // We must have an 8- or 16-bit load.
1657 auto *Load = cast<LoadSDNode>(C.Op0);
1658 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1659 if (NumBits != 8 && NumBits != 16)
1662 // The load must be an extending one and the constant must be within the
1663 // range of the unextended value.
1664 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1665 uint64_t Value = ConstOp1->getZExtValue();
1666 uint64_t Mask = (1 << NumBits) - 1;
1667 if (Load->getExtensionType() == ISD::SEXTLOAD) {
1668 // Make sure that ConstOp1 is in range of C.Op0.
1669 int64_t SignedValue = ConstOp1->getSExtValue();
1670 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1672 if (C.ICmpType != SystemZICMP::SignedOnly) {
1673 // Unsigned comparison between two sign-extended values is equivalent
1674 // to unsigned comparison between two zero-extended values.
1676 } else if (NumBits == 8) {
1677 // Try to treat the comparison as unsigned, so that we can use CLI.
1678 // Adjust CCMask and Value as necessary.
1679 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
1680 // Test whether the high bit of the byte is set.
1681 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
1682 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
1683 // Test whether the high bit of the byte is clear.
1684 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
1686 // No instruction exists for this combination.
1688 C.ICmpType = SystemZICMP::UnsignedOnly;
1690 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1693 // If the constant is in range, we can use any comparison.
1694 C.ICmpType = SystemZICMP::Any;
1698 // Make sure that the first operand is an i32 of the right extension type.
1699 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
1702 if (C.Op0.getValueType() != MVT::i32 ||
1703 Load->getExtensionType() != ExtType)
1704 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
1705 Load->getBasePtr(), Load->getPointerInfo(),
1706 Load->getMemoryVT(), Load->getAlignment(),
1707 Load->getMemOperand()->getFlags());
1709 // Make sure that the second operand is an i32 with the right value.
1710 if (C.Op1.getValueType() != MVT::i32 ||
1711 Value != ConstOp1->getZExtValue())
1712 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
1715 // Return true if Op is either an unextended load, or a load suitable
1716 // for integer register-memory comparisons of type ICmpType.
1717 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
1718 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
1720 // There are no instructions to compare a register with a memory byte.
1721 if (Load->getMemoryVT() == MVT::i8)
1723 // Otherwise decide on extension type.
1724 switch (Load->getExtensionType()) {
1725 case ISD::NON_EXTLOAD:
1728 return ICmpType != SystemZICMP::UnsignedOnly;
1730 return ICmpType != SystemZICMP::SignedOnly;
1738 // Return true if it is better to swap the operands of C.
1739 static bool shouldSwapCmpOperands(const Comparison &C) {
1740 // Leave f128 comparisons alone, since they have no memory forms.
1741 if (C.Op0.getValueType() == MVT::f128)
1744 // Always keep a floating-point constant second, since comparisons with
1745 // zero can use LOAD TEST and comparisons with other constants make a
1746 // natural memory operand.
1747 if (isa<ConstantFPSDNode>(C.Op1))
1750 // Never swap comparisons with zero since there are many ways to optimize
1752 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
1753 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
1756 // Also keep natural memory operands second if the loaded value is
1757 // only used here. Several comparisons have memory forms.
1758 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
1761 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1762 // In that case we generally prefer the memory to be second.
1763 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
1764 // The only exceptions are when the second operand is a constant and
1765 // we can use things like CHHSI.
1768 // The unsigned memory-immediate instructions can handle 16-bit
1769 // unsigned integers.
1770 if (C.ICmpType != SystemZICMP::SignedOnly &&
1771 isUInt<16>(ConstOp1->getZExtValue()))
1773 // The signed memory-immediate instructions can handle 16-bit
1775 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
1776 isInt<16>(ConstOp1->getSExtValue()))
1781 // Try to promote the use of CGFR and CLGFR.
1782 unsigned Opcode0 = C.Op0.getOpcode();
1783 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
1785 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
1787 if (C.ICmpType != SystemZICMP::SignedOnly &&
1788 Opcode0 == ISD::AND &&
1789 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
1790 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
1796 // Return a version of comparison CC mask CCMask in which the LT and GT
1797 // actions are swapped.
1798 static unsigned reverseCCMask(unsigned CCMask) {
1799 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1800 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1801 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1802 (CCMask & SystemZ::CCMASK_CMP_UO));
1805 // Check whether C tests for equality between X and Y and whether X - Y
1806 // or Y - X is also computed. In that case it's better to compare the
1807 // result of the subtraction against zero.
1808 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
1810 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
1811 C.CCMask == SystemZ::CCMASK_CMP_NE) {
1812 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1814 if (N->getOpcode() == ISD::SUB &&
1815 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
1816 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
1817 C.Op0 = SDValue(N, 0);
1818 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
1825 // Check whether C compares a floating-point value with zero and if that
1826 // floating-point value is also negated. In this case we can use the
1827 // negation to set CC, so avoiding separate LOAD AND TEST and
1828 // LOAD (NEGATIVE/COMPLEMENT) instructions.
1829 static void adjustForFNeg(Comparison &C) {
1830 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
1831 if (C1 && C1->isZero()) {
1832 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1834 if (N->getOpcode() == ISD::FNEG) {
1835 C.Op0 = SDValue(N, 0);
1836 C.CCMask = reverseCCMask(C.CCMask);
1843 // Check whether C compares (shl X, 32) with 0 and whether X is
1844 // also sign-extended. In that case it is better to test the result
1845 // of the sign extension using LTGFR.
1847 // This case is important because InstCombine transforms a comparison
1848 // with (sext (trunc X)) into a comparison with (shl X, 32).
1849 static void adjustForLTGFR(Comparison &C) {
1850 // Check for a comparison between (shl X, 32) and 0.
1851 if (C.Op0.getOpcode() == ISD::SHL &&
1852 C.Op0.getValueType() == MVT::i64 &&
1853 C.Op1.getOpcode() == ISD::Constant &&
1854 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1855 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
1856 if (C1 && C1->getZExtValue() == 32) {
1857 SDValue ShlOp0 = C.Op0.getOperand(0);
1858 // See whether X has any SIGN_EXTEND_INREG uses.
1859 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
1861 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
1862 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
1863 C.Op0 = SDValue(N, 0);
1871 // If C compares the truncation of an extending load, try to compare
1872 // the untruncated value instead. This exposes more opportunities to
1874 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
1876 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
1877 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
1878 C.Op1.getOpcode() == ISD::Constant &&
1879 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1880 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
1881 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
1882 unsigned Type = L->getExtensionType();
1883 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
1884 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
1885 C.Op0 = C.Op0.getOperand(0);
1886 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
1892 // Return true if shift operation N has an in-range constant shift value.
1893 // Store it in ShiftVal if so.
1894 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
1895 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
1899 uint64_t Amount = Shift->getZExtValue();
1900 if (Amount >= N.getValueSizeInBits())
1907 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
1908 // instruction and whether the CC value is descriptive enough to handle
1909 // a comparison of type Opcode between the AND result and CmpVal.
1910 // CCMask says which comparison result is being tested and BitSize is
1911 // the number of bits in the operands. If TEST UNDER MASK can be used,
1912 // return the corresponding CC mask, otherwise return 0.
1913 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
1914 uint64_t Mask, uint64_t CmpVal,
1915 unsigned ICmpType) {
1916 assert(Mask != 0 && "ANDs with zero should have been removed by now");
1918 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
1919 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
1920 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
1923 // Work out the masks for the lowest and highest bits.
1924 unsigned HighShift = 63 - countLeadingZeros(Mask);
1925 uint64_t High = uint64_t(1) << HighShift;
1926 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
1928 // Signed ordered comparisons are effectively unsigned if the sign
1930 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
1932 // Check for equality comparisons with 0, or the equivalent.
1934 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1935 return SystemZ::CCMASK_TM_ALL_0;
1936 if (CCMask == SystemZ::CCMASK_CMP_NE)
1937 return SystemZ::CCMASK_TM_SOME_1;
1939 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
1940 if (CCMask == SystemZ::CCMASK_CMP_LT)
1941 return SystemZ::CCMASK_TM_ALL_0;
1942 if (CCMask == SystemZ::CCMASK_CMP_GE)
1943 return SystemZ::CCMASK_TM_SOME_1;
1945 if (EffectivelyUnsigned && CmpVal < Low) {
1946 if (CCMask == SystemZ::CCMASK_CMP_LE)
1947 return SystemZ::CCMASK_TM_ALL_0;
1948 if (CCMask == SystemZ::CCMASK_CMP_GT)
1949 return SystemZ::CCMASK_TM_SOME_1;
1952 // Check for equality comparisons with the mask, or the equivalent.
1953 if (CmpVal == Mask) {
1954 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1955 return SystemZ::CCMASK_TM_ALL_1;
1956 if (CCMask == SystemZ::CCMASK_CMP_NE)
1957 return SystemZ::CCMASK_TM_SOME_0;
1959 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
1960 if (CCMask == SystemZ::CCMASK_CMP_GT)
1961 return SystemZ::CCMASK_TM_ALL_1;
1962 if (CCMask == SystemZ::CCMASK_CMP_LE)
1963 return SystemZ::CCMASK_TM_SOME_0;
1965 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
1966 if (CCMask == SystemZ::CCMASK_CMP_GE)
1967 return SystemZ::CCMASK_TM_ALL_1;
1968 if (CCMask == SystemZ::CCMASK_CMP_LT)
1969 return SystemZ::CCMASK_TM_SOME_0;
1972 // Check for ordered comparisons with the top bit.
1973 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
1974 if (CCMask == SystemZ::CCMASK_CMP_LE)
1975 return SystemZ::CCMASK_TM_MSB_0;
1976 if (CCMask == SystemZ::CCMASK_CMP_GT)
1977 return SystemZ::CCMASK_TM_MSB_1;
1979 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
1980 if (CCMask == SystemZ::CCMASK_CMP_LT)
1981 return SystemZ::CCMASK_TM_MSB_0;
1982 if (CCMask == SystemZ::CCMASK_CMP_GE)
1983 return SystemZ::CCMASK_TM_MSB_1;
1986 // If there are just two bits, we can do equality checks for Low and High
1988 if (Mask == Low + High) {
1989 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
1990 return SystemZ::CCMASK_TM_MIXED_MSB_0;
1991 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
1992 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
1993 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
1994 return SystemZ::CCMASK_TM_MIXED_MSB_1;
1995 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
1996 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
1999 // Looks like we've exhausted our options.
2003 // See whether C can be implemented as a TEST UNDER MASK instruction.
2004 // Update the arguments with the TM version if so.
2005 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2007 // Check that we have a comparison with a constant.
2008 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2011 uint64_t CmpVal = ConstOp1->getZExtValue();
2013 // Check whether the nonconstant input is an AND with a constant mask.
2016 ConstantSDNode *Mask = nullptr;
2017 if (C.Op0.getOpcode() == ISD::AND) {
2018 NewC.Op0 = C.Op0.getOperand(0);
2019 NewC.Op1 = C.Op0.getOperand(1);
2020 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2023 MaskVal = Mask->getZExtValue();
2025 // There is no instruction to compare with a 64-bit immediate
2026 // so use TMHH instead if possible. We need an unsigned ordered
2027 // comparison with an i64 immediate.
2028 if (NewC.Op0.getValueType() != MVT::i64 ||
2029 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2030 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2031 NewC.ICmpType == SystemZICMP::SignedOnly)
2033 // Convert LE and GT comparisons into LT and GE.
2034 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2035 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2036 if (CmpVal == uint64_t(-1))
2039 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2041 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2042 // be masked off without changing the result.
2043 MaskVal = -(CmpVal & -CmpVal);
2044 NewC.ICmpType = SystemZICMP::UnsignedOnly;
2049 // Check whether the combination of mask, comparison value and comparison
2050 // type are suitable.
2051 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2052 unsigned NewCCMask, ShiftVal;
2053 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2054 NewC.Op0.getOpcode() == ISD::SHL &&
2055 isSimpleShift(NewC.Op0, ShiftVal) &&
2056 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2057 MaskVal >> ShiftVal,
2059 SystemZICMP::Any))) {
2060 NewC.Op0 = NewC.Op0.getOperand(0);
2061 MaskVal >>= ShiftVal;
2062 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2063 NewC.Op0.getOpcode() == ISD::SRL &&
2064 isSimpleShift(NewC.Op0, ShiftVal) &&
2065 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2066 MaskVal << ShiftVal,
2068 SystemZICMP::UnsignedOnly))) {
2069 NewC.Op0 = NewC.Op0.getOperand(0);
2070 MaskVal <<= ShiftVal;
2072 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2078 // Go ahead and make the change.
2079 C.Opcode = SystemZISD::TM;
2081 if (Mask && Mask->getZExtValue() == MaskVal)
2082 C.Op1 = SDValue(Mask, 0);
2084 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2085 C.CCValid = SystemZ::CCMASK_TM;
2086 C.CCMask = NewCCMask;
2089 // Return a Comparison that tests the condition-code result of intrinsic
2090 // node Call against constant integer CC using comparison code Cond.
2091 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2092 // and CCValid is the set of possible condition-code results.
2093 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2094 SDValue Call, unsigned CCValid, uint64_t CC,
2095 ISD::CondCode Cond) {
2096 Comparison C(Call, SDValue());
2098 C.CCValid = CCValid;
2099 if (Cond == ISD::SETEQ)
2100 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2101 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2102 else if (Cond == ISD::SETNE)
2103 // ...and the inverse of that.
2104 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2105 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2106 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2107 // always true for CC>3.
2108 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2109 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2110 // ...and the inverse of that.
2111 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2112 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2113 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2114 // always true for CC>3.
2115 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2116 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2117 // ...and the inverse of that.
2118 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2120 llvm_unreachable("Unexpected integer comparison type");
2121 C.CCMask &= CCValid;
2125 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2126 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2127 ISD::CondCode Cond, const SDLoc &DL) {
2128 if (CmpOp1.getOpcode() == ISD::Constant) {
2129 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2130 unsigned Opcode, CCValid;
2131 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2132 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2133 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2134 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2135 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2136 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2137 isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2138 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2140 Comparison C(CmpOp0, CmpOp1);
2141 C.CCMask = CCMaskForCondCode(Cond);
2142 if (C.Op0.getValueType().isFloatingPoint()) {
2143 C.CCValid = SystemZ::CCMASK_FCMP;
2144 C.Opcode = SystemZISD::FCMP;
2147 C.CCValid = SystemZ::CCMASK_ICMP;
2148 C.Opcode = SystemZISD::ICMP;
2149 // Choose the type of comparison. Equality and inequality tests can
2150 // use either signed or unsigned comparisons. The choice also doesn't
2151 // matter if both sign bits are known to be clear. In those cases we
2152 // want to give the main isel code the freedom to choose whichever
2154 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2155 C.CCMask == SystemZ::CCMASK_CMP_NE ||
2156 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2157 C.ICmpType = SystemZICMP::Any;
2158 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2159 C.ICmpType = SystemZICMP::UnsignedOnly;
2161 C.ICmpType = SystemZICMP::SignedOnly;
2162 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2163 adjustZeroCmp(DAG, DL, C);
2164 adjustSubwordCmp(DAG, DL, C);
2165 adjustForSubtraction(DAG, DL, C);
2167 adjustICmpTruncate(DAG, DL, C);
2170 if (shouldSwapCmpOperands(C)) {
2171 std::swap(C.Op0, C.Op1);
2172 C.CCMask = reverseCCMask(C.CCMask);
2175 adjustForTestUnderMask(DAG, DL, C);
2179 // Emit the comparison instruction described by C.
2180 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2181 if (!C.Op1.getNode()) {
2183 switch (C.Op0.getOpcode()) {
2184 case ISD::INTRINSIC_W_CHAIN:
2185 Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode);
2187 case ISD::INTRINSIC_WO_CHAIN:
2188 Op = emitIntrinsicWithGlue(DAG, C.Op0, C.Opcode);
2191 llvm_unreachable("Invalid comparison operands");
2193 return SDValue(Op.getNode(), Op->getNumValues() - 1);
2195 if (C.Opcode == SystemZISD::ICMP)
2196 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
2197 DAG.getConstant(C.ICmpType, DL, MVT::i32));
2198 if (C.Opcode == SystemZISD::TM) {
2199 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2200 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2201 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
2202 DAG.getConstant(RegisterOnly, DL, MVT::i32));
2204 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
2207 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2208 // 64 bits. Extend is the extension type to use. Store the high part
2209 // in Hi and the low part in Lo.
2210 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2211 SDValue Op0, SDValue Op1, SDValue &Hi,
2213 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2214 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2215 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2216 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2217 DAG.getConstant(32, DL, MVT::i64));
2218 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2219 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2222 // Lower a binary operation that produces two VT results, one in each
2223 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2224 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
2225 // on the extended Op0 and (unextended) Op1. Store the even register result
2226 // in Even and the odd register result in Odd.
2227 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2228 unsigned Extend, unsigned Opcode, SDValue Op0,
2229 SDValue Op1, SDValue &Even, SDValue &Odd) {
2230 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
2231 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
2232 SDValue(In128, 0), Op1);
2233 bool Is32Bit = is32Bit(VT);
2234 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2235 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2238 // Return an i32 value that is 1 if the CC value produced by Glue is
2239 // in the mask CCMask and 0 otherwise. CC is known to have a value
2240 // in CCValid, so other values can be ignored.
2241 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue,
2242 unsigned CCValid, unsigned CCMask) {
2243 IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
2244 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
2246 if (Conversion.XORValue)
2247 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
2248 DAG.getConstant(Conversion.XORValue, DL, MVT::i32));
2250 if (Conversion.AddValue)
2251 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
2252 DAG.getConstant(Conversion.AddValue, DL, MVT::i32));
2254 // The SHR/AND sequence should get optimized to an RISBG.
2255 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
2256 DAG.getConstant(Conversion.Bit, DL, MVT::i32));
2257 if (Conversion.Bit != 31)
2258 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
2259 DAG.getConstant(1, DL, MVT::i32));
2263 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2264 // be done directly. IsFP is true if CC is for a floating-point rather than
2265 // integer comparison.
2266 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
2270 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;
2274 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);
2278 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;
2281 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;
2288 // Return the SystemZISD vector comparison operation for CC or its inverse,
2289 // or 0 if neither can be done directly. Indicate in Invert whether the
2290 // result is for the inverse of CC. IsFP is true if CC is for a
2291 // floating-point rather than integer comparison.
2292 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
2294 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2299 CC = ISD::getSetCCInverse(CC, !IsFP);
2300 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2308 // Return a v2f64 that contains the extended form of elements Start and Start+1
2309 // of v4f32 value Op.
2310 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2312 int Mask[] = { Start, -1, Start + 1, -1 };
2313 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2314 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2317 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2318 // producing a result of type VT.
2319 static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &DL,
2320 EVT VT, SDValue CmpOp0, SDValue CmpOp1) {
2321 // There is no hardware support for v4f32, so extend the vector into
2322 // two v2f64s and compare those.
2323 if (CmpOp0.getValueType() == MVT::v4f32) {
2324 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
2325 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
2326 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
2327 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
2328 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2329 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2330 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2332 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2335 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2336 // an integer mask of type VT.
2337 static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2338 ISD::CondCode CC, SDValue CmpOp0,
2340 bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2341 bool Invert = false;
2344 // Handle tests for order using (or (ogt y x) (oge x y)).
2348 assert(IsFP && "Unexpected integer comparison");
2349 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2350 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
2351 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2355 // Handle <> tests using (or (ogt y x) (ogt x y)).
2359 assert(IsFP && "Unexpected integer comparison");
2360 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2361 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
2362 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2366 // Otherwise a single comparison is enough. It doesn't really
2367 // matter whether we try the inversion or the swap first, since
2368 // there are no cases where both work.
2370 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2371 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2373 CC = ISD::getSetCCSwappedOperands(CC);
2374 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2375 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2377 llvm_unreachable("Unhandled comparison");
2382 SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
2383 DAG.getConstant(65535, DL, MVT::i32));
2384 Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask);
2385 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2390 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2391 SelectionDAG &DAG) const {
2392 SDValue CmpOp0 = Op.getOperand(0);
2393 SDValue CmpOp1 = Op.getOperand(1);
2394 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2396 EVT VT = Op.getValueType();
2398 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2400 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2401 SDValue Glue = emitCmp(DAG, DL, C);
2402 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
2405 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2406 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2407 SDValue CmpOp0 = Op.getOperand(2);
2408 SDValue CmpOp1 = Op.getOperand(3);
2409 SDValue Dest = Op.getOperand(4);
2412 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2413 SDValue Glue = emitCmp(DAG, DL, C);
2414 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
2415 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
2416 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue);
2419 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2420 // allowing Pos and Neg to be wider than CmpOp.
2421 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2422 return (Neg.getOpcode() == ISD::SUB &&
2423 Neg.getOperand(0).getOpcode() == ISD::Constant &&
2424 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2425 Neg.getOperand(1) == Pos &&
2427 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2428 Pos.getOperand(0) == CmpOp)));
2431 // Return the absolute or negative absolute of Op; IsNegative decides which.
2432 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2434 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2436 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2437 DAG.getConstant(0, DL, Op.getValueType()), Op);
2441 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2442 SelectionDAG &DAG) const {
2443 SDValue CmpOp0 = Op.getOperand(0);
2444 SDValue CmpOp1 = Op.getOperand(1);
2445 SDValue TrueOp = Op.getOperand(2);
2446 SDValue FalseOp = Op.getOperand(3);
2447 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2450 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2452 // Check for absolute and negative-absolute selections, including those
2453 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2454 // This check supplements the one in DAGCombiner.
2455 if (C.Opcode == SystemZISD::ICMP &&
2456 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2457 C.CCMask != SystemZ::CCMASK_CMP_NE &&
2458 C.Op1.getOpcode() == ISD::Constant &&
2459 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2460 if (isAbsolute(C.Op0, TrueOp, FalseOp))
2461 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2462 if (isAbsolute(C.Op0, FalseOp, TrueOp))
2463 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2466 SDValue Glue = emitCmp(DAG, DL, C);
2468 // Special case for handling -1/0 results. The shifts we use here
2469 // should get optimized with the IPM conversion sequence.
2470 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp);
2471 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp);
2472 if (TrueC && FalseC) {
2473 int64_t TrueVal = TrueC->getSExtValue();
2474 int64_t FalseVal = FalseC->getSExtValue();
2475 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
2476 // Invert the condition if we want -1 on false.
2478 C.CCMask ^= C.CCValid;
2479 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
2480 EVT VT = Op.getValueType();
2481 // Extend the result to VT. Upper bits are ignored.
2483 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
2484 // Sign-extend from the low bit.
2485 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32);
2486 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
2487 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
2491 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
2492 DAG.getConstant(C.CCMask, DL, MVT::i32), Glue};
2494 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2495 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
2498 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
2499 SelectionDAG &DAG) const {
2501 const GlobalValue *GV = Node->getGlobal();
2502 int64_t Offset = Node->getOffset();
2503 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2504 CodeModel::Model CM = DAG.getTarget().getCodeModel();
2507 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2508 // Assign anchors at 1<<12 byte boundaries.
2509 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2510 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
2511 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2513 // The offset can be folded into the address if it is aligned to a halfword.
2515 if (Offset != 0 && (Offset & 1) == 0) {
2516 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
2517 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
2521 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
2522 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2523 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2524 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2527 // If there was a non-zero offset that we didn't fold, create an explicit
2530 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2531 DAG.getConstant(Offset, DL, PtrVT));
2536 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
2539 SDValue GOTOffset) const {
2541 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2542 SDValue Chain = DAG.getEntryNode();
2545 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2546 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2547 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2548 Glue = Chain.getValue(1);
2549 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2550 Glue = Chain.getValue(1);
2552 // The first call operand is the chain and the second is the TLS symbol.
2553 SmallVector<SDValue, 8> Ops;
2554 Ops.push_back(Chain);
2555 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
2556 Node->getValueType(0),
2559 // Add argument registers to the end of the list so that they are
2560 // known live into the call.
2561 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
2562 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
2564 // Add a register mask operand representing the call-preserved registers.
2565 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2566 const uint32_t *Mask =
2567 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
2568 assert(Mask && "Missing call preserved mask for calling convention");
2569 Ops.push_back(DAG.getRegisterMask(Mask));
2571 // Glue the call to the argument copies.
2572 Ops.push_back(Glue);
2575 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2576 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
2577 Glue = Chain.getValue(1);
2579 // Copy the return value from %r2.
2580 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
2583 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
2584 SelectionDAG &DAG) const {
2585 SDValue Chain = DAG.getEntryNode();
2586 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2588 // The high part of the thread pointer is in access register 0.
2589 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
2590 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
2592 // The low part of the thread pointer is in access register 1.
2593 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
2594 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
2596 // Merge them into a single 64-bit address.
2597 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
2598 DAG.getConstant(32, DL, PtrVT));
2599 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
2602 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
2603 SelectionDAG &DAG) const {
2604 if (DAG.getTarget().Options.EmulatedTLS)
2605 return LowerToTLSEmulatedModel(Node, DAG);
2607 const GlobalValue *GV = Node->getGlobal();
2608 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2609 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
2611 SDValue TP = lowerThreadPointer(DL, DAG);
2613 // Get the offset of GA from the thread pointer, based on the TLS model.
2616 case TLSModel::GeneralDynamic: {
2617 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2618 SystemZConstantPoolValue *CPV =
2619 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
2621 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2622 Offset = DAG.getLoad(
2623 PtrVT, DL, DAG.getEntryNode(), Offset,
2624 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2626 // Call __tls_get_offset to retrieve the offset.
2627 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
2631 case TLSModel::LocalDynamic: {
2632 // Load the GOT offset of the module ID.
2633 SystemZConstantPoolValue *CPV =
2634 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
2636 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2637 Offset = DAG.getLoad(
2638 PtrVT, DL, DAG.getEntryNode(), Offset,
2639 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2641 // Call __tls_get_offset to retrieve the module base offset.
2642 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
2644 // Note: The SystemZLDCleanupPass will remove redundant computations
2645 // of the module base offset. Count total number of local-dynamic
2646 // accesses to trigger execution of that pass.
2647 SystemZMachineFunctionInfo* MFI =
2648 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
2649 MFI->incNumLocalDynamicTLSAccesses();
2651 // Add the per-symbol offset.
2652 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
2654 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
2655 DTPOffset = DAG.getLoad(
2656 PtrVT, DL, DAG.getEntryNode(), DTPOffset,
2657 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2659 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
2663 case TLSModel::InitialExec: {
2664 // Load the offset from the GOT.
2665 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2666 SystemZII::MO_INDNTPOFF);
2667 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
2669 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
2670 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2674 case TLSModel::LocalExec: {
2675 // Force the offset into the constant pool and load it from there.
2676 SystemZConstantPoolValue *CPV =
2677 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
2679 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2680 Offset = DAG.getLoad(
2681 PtrVT, DL, DAG.getEntryNode(), Offset,
2682 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2687 // Add the base and offset together.
2688 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
2691 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
2692 SelectionDAG &DAG) const {
2694 const BlockAddress *BA = Node->getBlockAddress();
2695 int64_t Offset = Node->getOffset();
2696 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2698 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
2699 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2703 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
2704 SelectionDAG &DAG) const {
2706 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2707 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2709 // Use LARL to load the address of the table.
2710 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2713 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
2714 SelectionDAG &DAG) const {
2716 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2719 if (CP->isMachineConstantPoolEntry())
2720 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2721 CP->getAlignment());
2723 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2724 CP->getAlignment(), CP->getOffset());
2726 // Use LARL to load the address of the constant pool entry.
2727 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2730 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
2731 SelectionDAG &DAG) const {
2732 MachineFunction &MF = DAG.getMachineFunction();
2733 MachineFrameInfo &MFI = MF.getFrameInfo();
2734 MFI.setFrameAddressIsTaken(true);
2737 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2738 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2740 // If the back chain frame index has not been allocated yet, do so.
2741 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
2742 int BackChainIdx = FI->getFramePointerSaveIndex();
2743 if (!BackChainIdx) {
2744 // By definition, the frame address is the address of the back chain.
2745 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
2746 FI->setFramePointerSaveIndex(BackChainIdx);
2748 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
2750 // FIXME The frontend should detect this case.
2752 report_fatal_error("Unsupported stack frame traversal count");
2758 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
2759 SelectionDAG &DAG) const {
2760 MachineFunction &MF = DAG.getMachineFunction();
2761 MachineFrameInfo &MFI = MF.getFrameInfo();
2762 MFI.setReturnAddressIsTaken(true);
2764 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2768 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2769 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2771 // FIXME The frontend should detect this case.
2773 report_fatal_error("Unsupported stack frame traversal count");
2776 // Return R14D, which has the return address. Mark it an implicit live-in.
2777 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
2778 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
2781 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
2782 SelectionDAG &DAG) const {
2784 SDValue In = Op.getOperand(0);
2785 EVT InVT = In.getValueType();
2786 EVT ResVT = Op.getValueType();
2788 // Convert loads directly. This is normally done by DAGCombiner,
2789 // but we need this case for bitcasts that are created during lowering
2790 // and which are then lowered themselves.
2791 if (auto *LoadN = dyn_cast<LoadSDNode>(In))
2792 return DAG.getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(),
2793 LoadN->getMemOperand());
2795 if (InVT == MVT::i32 && ResVT == MVT::f32) {
2797 if (Subtarget.hasHighWord()) {
2798 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
2800 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
2801 MVT::i64, SDValue(U64, 0), In);
2803 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
2804 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
2805 DAG.getConstant(32, DL, MVT::i64));
2807 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
2808 return DAG.getTargetExtractSubreg(SystemZ::subreg_r32,
2809 DL, MVT::f32, Out64);
2811 if (InVT == MVT::f32 && ResVT == MVT::i32) {
2812 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
2813 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_r32, DL,
2814 MVT::f64, SDValue(U64, 0), In);
2815 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
2816 if (Subtarget.hasHighWord())
2817 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
2819 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
2820 DAG.getConstant(32, DL, MVT::i64));
2821 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
2823 llvm_unreachable("Unexpected bitcast combination");
2826 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
2827 SelectionDAG &DAG) const {
2828 MachineFunction &MF = DAG.getMachineFunction();
2829 SystemZMachineFunctionInfo *FuncInfo =
2830 MF.getInfo<SystemZMachineFunctionInfo>();
2831 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2833 SDValue Chain = Op.getOperand(0);
2834 SDValue Addr = Op.getOperand(1);
2835 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2838 // The initial values of each field.
2839 const unsigned NumFields = 4;
2840 SDValue Fields[NumFields] = {
2841 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
2842 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
2843 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
2844 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
2847 // Store each field into its respective slot.
2848 SDValue MemOps[NumFields];
2849 unsigned Offset = 0;
2850 for (unsigned I = 0; I < NumFields; ++I) {
2851 SDValue FieldAddr = Addr;
2853 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
2854 DAG.getIntPtrConstant(Offset, DL));
2855 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
2856 MachinePointerInfo(SV, Offset));
2859 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
2862 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
2863 SelectionDAG &DAG) const {
2864 SDValue Chain = Op.getOperand(0);
2865 SDValue DstPtr = Op.getOperand(1);
2866 SDValue SrcPtr = Op.getOperand(2);
2867 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2868 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
2871 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
2872 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
2873 /*isTailCall*/false,
2874 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
2877 SDValue SystemZTargetLowering::
2878 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
2879 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
2880 MachineFunction &MF = DAG.getMachineFunction();
2881 bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack");
2882 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
2884 SDValue Chain = Op.getOperand(0);
2885 SDValue Size = Op.getOperand(1);
2886 SDValue Align = Op.getOperand(2);
2889 // If user has set the no alignment function attribute, ignore
2890 // alloca alignments.
2891 uint64_t AlignVal = (RealignOpt ?
2892 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
2894 uint64_t StackAlign = TFI->getStackAlignment();
2895 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
2896 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
2898 unsigned SPReg = getStackPointerRegisterToSaveRestore();
2899 SDValue NeededSpace = Size;
2901 // Get a reference to the stack pointer.
2902 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
2904 // If we need a backchain, save it now.
2907 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
2909 // Add extra space for alignment if needed.
2910 if (ExtraAlignSpace)
2911 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
2912 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
2914 // Get the new stack pointer value.
2915 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
2917 // Copy the new stack pointer back.
2918 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
2920 // The allocated data lives above the 160 bytes allocated for the standard
2921 // frame, plus any outgoing stack arguments. We don't know how much that
2922 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
2923 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
2924 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
2926 // Dynamically realign if needed.
2927 if (RequiredAlign > StackAlign) {
2929 DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
2930 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
2932 DAG.getNode(ISD::AND, DL, MVT::i64, Result,
2933 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
2937 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
2939 SDValue Ops[2] = { Result, Chain };
2940 return DAG.getMergeValues(Ops, DL);
2943 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
2944 SDValue Op, SelectionDAG &DAG) const {
2947 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
2950 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
2951 SelectionDAG &DAG) const {
2952 EVT VT = Op.getValueType();
2956 // Just do a normal 64-bit multiplication and extract the results.
2957 // We define this so that it can be used for constant division.
2958 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
2959 Op.getOperand(1), Ops[1], Ops[0]);
2961 // Do a full 128-bit multiplication based on UMUL_LOHI64:
2963 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
2965 // but using the fact that the upper halves are either all zeros
2968 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
2970 // and grouping the right terms together since they are quicker than the
2973 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
2974 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
2975 SDValue LL = Op.getOperand(0);
2976 SDValue RL = Op.getOperand(1);
2977 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
2978 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
2979 // UMUL_LOHI64 returns the low result in the odd register and the high
2980 // result in the even register. SMUL_LOHI is defined to return the
2981 // low half first, so the results are in reverse order.
2982 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
2983 LL, RL, Ops[1], Ops[0]);
2984 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
2985 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
2986 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
2987 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
2989 return DAG.getMergeValues(Ops, DL);
2992 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
2993 SelectionDAG &DAG) const {
2994 EVT VT = Op.getValueType();
2998 // Just do a normal 64-bit multiplication and extract the results.
2999 // We define this so that it can be used for constant division.
3000 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
3001 Op.getOperand(1), Ops[1], Ops[0]);
3003 // UMUL_LOHI64 returns the low result in the odd register and the high
3004 // result in the even register. UMUL_LOHI is defined to return the
3005 // low half first, so the results are in reverse order.
3006 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
3007 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3008 return DAG.getMergeValues(Ops, DL);
3011 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3012 SelectionDAG &DAG) const {
3013 SDValue Op0 = Op.getOperand(0);
3014 SDValue Op1 = Op.getOperand(1);
3015 EVT VT = Op.getValueType();
3019 // We use DSGF for 32-bit division.
3021 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3022 Opcode = SystemZISD::SDIVREM32;
3023 } else if (DAG.ComputeNumSignBits(Op1) > 32) {
3024 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3025 Opcode = SystemZISD::SDIVREM32;
3027 Opcode = SystemZISD::SDIVREM64;
3029 // DSG(F) takes a 64-bit dividend, so the even register in the GR128
3030 // input is "don't care". The instruction returns the remainder in
3031 // the even register and the quotient in the odd register.
3033 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
3034 Op0, Op1, Ops[1], Ops[0]);
3035 return DAG.getMergeValues(Ops, DL);
3038 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3039 SelectionDAG &DAG) const {
3040 EVT VT = Op.getValueType();
3043 // DL(G) uses a double-width dividend, so we need to clear the even
3044 // register in the GR128 input. The instruction returns the remainder
3045 // in the even register and the quotient in the odd register.
3048 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
3049 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3051 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
3052 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3053 return DAG.getMergeValues(Ops, DL);
3056 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3057 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
3059 // Get the known-zero masks for each operand.
3060 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
3061 APInt KnownZero[2], KnownOne[2];
3062 DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
3063 DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
3065 // See if the upper 32 bits of one operand and the lower 32 bits of the
3066 // other are known zero. They are the low and high operands respectively.
3067 uint64_t Masks[] = { KnownZero[0].getZExtValue(),
3068 KnownZero[1].getZExtValue() };
3070 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3072 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3077 SDValue LowOp = Ops[Low];
3078 SDValue HighOp = Ops[High];
3080 // If the high part is a constant, we're better off using IILH.
3081 if (HighOp.getOpcode() == ISD::Constant)
3084 // If the low part is a constant that is outside the range of LHI,
3085 // then we're better off using IILF.
3086 if (LowOp.getOpcode() == ISD::Constant) {
3087 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3088 if (!isInt<16>(Value))
3092 // Check whether the high part is an AND that doesn't change the
3093 // high 32 bits and just masks out low bits. We can skip it if so.
3094 if (HighOp.getOpcode() == ISD::AND &&
3095 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3096 SDValue HighOp0 = HighOp.getOperand(0);
3097 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3098 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3102 // Take advantage of the fact that all GR32 operations only change the
3103 // low 32 bits by truncating Low to an i32 and inserting it directly
3104 // using a subreg. The interesting cases are those where the truncation
3107 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3108 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3109 MVT::i64, HighOp, Low32);
3112 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3113 SelectionDAG &DAG) const {
3114 EVT VT = Op.getValueType();
3116 Op = Op.getOperand(0);
3118 // Handle vector types via VPOPCT.
3119 if (VT.isVector()) {
3120 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3121 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3122 switch (VT.getScalarSizeInBits()) {
3126 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3127 SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3128 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3129 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3130 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3134 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
3135 DAG.getConstant(0, DL, MVT::i32));
3136 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3140 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
3141 DAG.getConstant(0, DL, MVT::i32));
3142 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3143 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3147 llvm_unreachable("Unexpected type");
3152 // Get the known-zero mask for the operand.
3153 APInt KnownZero, KnownOne;
3154 DAG.computeKnownBits(Op, KnownZero, KnownOne);
3155 unsigned NumSignificantBits = (~KnownZero).getActiveBits();
3156 if (NumSignificantBits == 0)
3157 return DAG.getConstant(0, DL, VT);
3159 // Skip known-zero high parts of the operand.
3160 int64_t OrigBitSize = VT.getSizeInBits();
3161 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3162 BitSize = std::min(BitSize, OrigBitSize);
3164 // The POPCNT instruction counts the number of bits in each byte.
3165 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3166 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3167 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3169 // Add up per-byte counts in a binary tree. All bits of Op at
3170 // position larger than BitSize remain zero throughout.
3171 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3172 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3173 if (BitSize != OrigBitSize)
3174 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3175 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3176 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3179 // Extract overall result from high byte.
3181 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3182 DAG.getConstant(BitSize - 8, DL, VT));
3187 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3188 SelectionDAG &DAG) const {
3190 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3191 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3192 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
3193 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3195 // The only fence that needs an instruction is a sequentially-consistent
3196 // cross-thread fence.
3197 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3198 FenceScope == CrossThread) {
3199 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3204 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3205 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3208 // Op is an atomic load. Lower it into a normal volatile load.
3209 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3210 SelectionDAG &DAG) const {
3211 auto *Node = cast<AtomicSDNode>(Op.getNode());
3212 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3213 Node->getChain(), Node->getBasePtr(),
3214 Node->getMemoryVT(), Node->getMemOperand());
3217 // Op is an atomic store. Lower it into a normal volatile store followed
3218 // by a serialization.
3219 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3220 SelectionDAG &DAG) const {
3221 auto *Node = cast<AtomicSDNode>(Op.getNode());
3222 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3223 Node->getBasePtr(), Node->getMemoryVT(),
3224 Node->getMemOperand());
3225 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other,
3229 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3230 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3231 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3233 unsigned Opcode) const {
3234 auto *Node = cast<AtomicSDNode>(Op.getNode());
3236 // 32-bit operations need no code outside the main loop.
3237 EVT NarrowVT = Node->getMemoryVT();
3238 EVT WideVT = MVT::i32;
3239 if (NarrowVT == WideVT)
3242 int64_t BitSize = NarrowVT.getSizeInBits();
3243 SDValue ChainIn = Node->getChain();
3244 SDValue Addr = Node->getBasePtr();
3245 SDValue Src2 = Node->getVal();
3246 MachineMemOperand *MMO = Node->getMemOperand();
3248 EVT PtrVT = Addr.getValueType();
3250 // Convert atomic subtracts of constants into additions.
3251 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3252 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3253 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
3254 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3257 // Get the address of the containing word.
3258 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3259 DAG.getConstant(-4, DL, PtrVT));
3261 // Get the number of bits that the word must be rotated left in order
3262 // to bring the field to the top bits of a GR32.
3263 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3264 DAG.getConstant(3, DL, PtrVT));
3265 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3267 // Get the complementing shift amount, for rotating a field in the top
3268 // bits back to its proper position.
3269 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3270 DAG.getConstant(0, DL, WideVT), BitShift);
3272 // Extend the source operand to 32 bits and prepare it for the inner loop.
3273 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3274 // operations require the source to be shifted in advance. (This shift
3275 // can be folded if the source is constant.) For AND and NAND, the lower
3276 // bits must be set, while for other opcodes they should be left clear.
3277 if (Opcode != SystemZISD::ATOMIC_SWAPW)
3278 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3279 DAG.getConstant(32 - BitSize, DL, WideVT));
3280 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3281 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
3282 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3283 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3285 // Construct the ATOMIC_LOADW_* node.
3286 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3287 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3288 DAG.getConstant(BitSize, DL, WideVT) };
3289 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3292 // Rotate the result of the final CS so that the field is in the lower
3293 // bits of a GR32, then truncate it.
3294 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3295 DAG.getConstant(BitSize, DL, WideVT));
3296 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3298 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3299 return DAG.getMergeValues(RetOps, DL);
3302 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3303 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3304 // operations into additions.
3305 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3306 SelectionDAG &DAG) const {
3307 auto *Node = cast<AtomicSDNode>(Op.getNode());
3308 EVT MemVT = Node->getMemoryVT();
3309 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3310 // A full-width operation.
3311 assert(Op.getValueType() == MemVT && "Mismatched VTs");
3312 SDValue Src2 = Node->getVal();
3316 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3317 // Use an addition if the operand is constant and either LAA(G) is
3318 // available or the negative value is in the range of A(G)FHI.
3319 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3320 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3321 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3322 } else if (Subtarget.hasInterlockedAccess1())
3323 // Use LAA(G) if available.
3324 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3327 if (NegSrc2.getNode())
3328 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3329 Node->getChain(), Node->getBasePtr(), NegSrc2,
3330 Node->getMemOperand());
3332 // Use the node as-is.
3336 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3339 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
3340 // into a fullword ATOMIC_CMP_SWAPW operation.
3341 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3342 SelectionDAG &DAG) const {
3343 auto *Node = cast<AtomicSDNode>(Op.getNode());
3345 // We have native support for 32-bit compare and swap.
3346 EVT NarrowVT = Node->getMemoryVT();
3347 EVT WideVT = MVT::i32;
3348 if (NarrowVT == WideVT)
3351 int64_t BitSize = NarrowVT.getSizeInBits();
3352 SDValue ChainIn = Node->getOperand(0);
3353 SDValue Addr = Node->getOperand(1);
3354 SDValue CmpVal = Node->getOperand(2);
3355 SDValue SwapVal = Node->getOperand(3);
3356 MachineMemOperand *MMO = Node->getMemOperand();
3358 EVT PtrVT = Addr.getValueType();
3360 // Get the address of the containing word.
3361 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3362 DAG.getConstant(-4, DL, PtrVT));
3364 // Get the number of bits that the word must be rotated left in order
3365 // to bring the field to the top bits of a GR32.
3366 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3367 DAG.getConstant(3, DL, PtrVT));
3368 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3370 // Get the complementing shift amount, for rotating a field in the top
3371 // bits back to its proper position.
3372 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3373 DAG.getConstant(0, DL, WideVT), BitShift);
3375 // Construct the ATOMIC_CMP_SWAPW node.
3376 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3377 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3378 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
3379 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
3380 VTList, Ops, NarrowVT, MMO);
3384 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
3385 SelectionDAG &DAG) const {
3386 MachineFunction &MF = DAG.getMachineFunction();
3387 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3388 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
3389 SystemZ::R15D, Op.getValueType());
3392 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
3393 SelectionDAG &DAG) const {
3394 MachineFunction &MF = DAG.getMachineFunction();
3395 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3396 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
3398 SDValue Chain = Op.getOperand(0);
3399 SDValue NewSP = Op.getOperand(1);
3403 if (StoreBackchain) {
3404 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
3405 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3408 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3411 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3416 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
3417 SelectionDAG &DAG) const {
3418 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3420 // Just preserve the chain.
3421 return Op.getOperand(0);
3424 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3425 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
3426 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
3429 DAG.getConstant(Code, DL, MVT::i32),
3432 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
3433 Node->getVTList(), Ops,
3434 Node->getMemoryVT(), Node->getMemOperand());
3437 // Return an i32 that contains the value of CC immediately after After,
3438 // whose final operand must be MVT::Glue.
3439 static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) {
3441 SDValue Glue = SDValue(After, After->getNumValues() - 1);
3442 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
3443 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
3444 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
3448 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
3449 SelectionDAG &DAG) const {
3450 unsigned Opcode, CCValid;
3451 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
3452 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
3453 SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode);
3454 SDValue CC = getCCResult(DAG, Glued.getNode());
3455 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
3463 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
3464 SelectionDAG &DAG) const {
3465 unsigned Opcode, CCValid;
3466 if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
3467 SDValue Glued = emitIntrinsicWithGlue(DAG, Op, Opcode);
3468 SDValue CC = getCCResult(DAG, Glued.getNode());
3469 if (Op->getNumValues() == 1)
3471 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
3472 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), Glued,
3476 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3478 case Intrinsic::thread_pointer:
3479 return lowerThreadPointer(SDLoc(Op), DAG);
3481 case Intrinsic::s390_vpdi:
3482 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
3483 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3485 case Intrinsic::s390_vperm:
3486 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
3487 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3489 case Intrinsic::s390_vuphb:
3490 case Intrinsic::s390_vuphh:
3491 case Intrinsic::s390_vuphf:
3492 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
3495 case Intrinsic::s390_vuplhb:
3496 case Intrinsic::s390_vuplhh:
3497 case Intrinsic::s390_vuplhf:
3498 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
3501 case Intrinsic::s390_vuplb:
3502 case Intrinsic::s390_vuplhw:
3503 case Intrinsic::s390_vuplf:
3504 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
3507 case Intrinsic::s390_vupllb:
3508 case Intrinsic::s390_vupllh:
3509 case Intrinsic::s390_vupllf:
3510 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
3513 case Intrinsic::s390_vsumb:
3514 case Intrinsic::s390_vsumh:
3515 case Intrinsic::s390_vsumgh:
3516 case Intrinsic::s390_vsumgf:
3517 case Intrinsic::s390_vsumqf:
3518 case Intrinsic::s390_vsumqg:
3519 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
3520 Op.getOperand(1), Op.getOperand(2));
3527 // Says that SystemZISD operation Opcode can be used to perform the equivalent
3528 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
3529 // Operand is the constant third operand, otherwise it is the number of
3530 // bytes in each element of the result.
3534 unsigned char Bytes[SystemZ::VectorBytes];
3538 static const Permute PermuteForms[] = {
3540 { SystemZISD::MERGE_HIGH, 8,
3541 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3543 { SystemZISD::MERGE_HIGH, 4,
3544 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
3546 { SystemZISD::MERGE_HIGH, 2,
3547 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
3549 { SystemZISD::MERGE_HIGH, 1,
3550 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
3552 { SystemZISD::MERGE_LOW, 8,
3553 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
3555 { SystemZISD::MERGE_LOW, 4,
3556 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
3558 { SystemZISD::MERGE_LOW, 2,
3559 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
3561 { SystemZISD::MERGE_LOW, 1,
3562 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
3564 { SystemZISD::PACK, 4,
3565 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
3567 { SystemZISD::PACK, 2,
3568 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
3570 { SystemZISD::PACK, 1,
3571 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
3572 // VPDI V1, V2, 4 (low half of V1, high half of V2)
3573 { SystemZISD::PERMUTE_DWORDS, 4,
3574 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
3575 // VPDI V1, V2, 1 (high half of V1, low half of V2)
3576 { SystemZISD::PERMUTE_DWORDS, 1,
3577 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
3580 // Called after matching a vector shuffle against a particular pattern.
3581 // Both the original shuffle and the pattern have two vector operands.
3582 // OpNos[0] is the operand of the original shuffle that should be used for
3583 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
3584 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
3585 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
3586 // for operands 0 and 1 of the pattern.
3587 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
3591 OpNo0 = OpNo1 = OpNos[1];
3592 } else if (OpNos[1] < 0) {
3593 OpNo0 = OpNo1 = OpNos[0];
3601 // Bytes is a VPERM-like permute vector, except that -1 is used for
3602 // undefined bytes. Return true if the VPERM can be implemented using P.
3603 // When returning true set OpNo0 to the VPERM operand that should be
3604 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
3606 // For example, if swapping the VPERM operands allows P to match, OpNo0
3607 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
3608 // operand, but rewriting it to use two duplicated operands allows it to
3609 // match P, then OpNo0 and OpNo1 will be the same.
3610 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
3611 unsigned &OpNo0, unsigned &OpNo1) {
3612 int OpNos[] = { -1, -1 };
3613 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
3616 // Make sure that the two permute vectors use the same suboperand
3617 // byte number. Only the operand numbers (the high bits) are
3618 // allowed to differ.
3619 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
3621 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
3622 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
3623 // Make sure that the operand mappings are consistent with previous
3625 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3627 OpNos[ModelOpNo] = RealOpNo;
3630 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
3633 // As above, but search for a matching permute.
3634 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
3635 unsigned &OpNo0, unsigned &OpNo1) {
3636 for (auto &P : PermuteForms)
3637 if (matchPermute(Bytes, P, OpNo0, OpNo1))
3642 // Bytes is a VPERM-like permute vector, except that -1 is used for
3643 // undefined bytes. This permute is an operand of an outer permute.
3644 // See whether redistributing the -1 bytes gives a shuffle that can be
3645 // implemented using P. If so, set Transform to a VPERM-like permute vector
3646 // that, when applied to the result of P, gives the original permute in Bytes.
3647 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
3649 SmallVectorImpl<int> &Transform) {
3651 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
3652 int Elt = Bytes[From];
3654 // Byte number From of the result is undefined.
3655 Transform[From] = -1;
3657 while (P.Bytes[To] != Elt) {
3659 if (To == SystemZ::VectorBytes)
3662 Transform[From] = To;
3668 // As above, but search for a matching permute.
3669 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
3670 SmallVectorImpl<int> &Transform) {
3671 for (auto &P : PermuteForms)
3672 if (matchDoublePermute(Bytes, P, Transform))
3677 // Convert the mask of the given VECTOR_SHUFFLE into a byte-level mask,
3678 // as if it had type vNi8.
3679 static void getVPermMask(ShuffleVectorSDNode *VSN,
3680 SmallVectorImpl<int> &Bytes) {
3681 EVT VT = VSN->getValueType(0);
3682 unsigned NumElements = VT.getVectorNumElements();
3683 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3684 Bytes.resize(NumElements * BytesPerElement, -1);
3685 for (unsigned I = 0; I < NumElements; ++I) {
3686 int Index = VSN->getMaskElt(I);
3688 for (unsigned J = 0; J < BytesPerElement; ++J)
3689 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
3693 // Bytes is a VPERM-like permute vector, except that -1 is used for
3694 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
3695 // the result come from a contiguous sequence of bytes from one input.
3696 // Set Base to the selector for the first byte if so.
3697 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
3698 unsigned BytesPerElement, int &Base) {
3700 for (unsigned I = 0; I < BytesPerElement; ++I) {
3701 if (Bytes[Start + I] >= 0) {
3702 unsigned Elem = Bytes[Start + I];
3705 // Make sure the bytes would come from one input operand.
3706 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
3708 } else if (unsigned(Base) != Elem - I)
3715 // Bytes is a VPERM-like permute vector, except that -1 is used for
3716 // undefined bytes. Return true if it can be performed using VSLDI.
3717 // When returning true, set StartIndex to the shift amount and OpNo0
3718 // and OpNo1 to the VPERM operands that should be used as the first
3719 // and second shift operand respectively.
3720 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
3721 unsigned &StartIndex, unsigned &OpNo0,
3723 int OpNos[] = { -1, -1 };
3725 for (unsigned I = 0; I < 16; ++I) {
3726 int Index = Bytes[I];
3728 int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
3729 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
3730 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
3732 Shift = ExpectedShift;
3733 else if (Shift != ExpectedShift)
3735 // Make sure that the operand mappings are consistent with previous
3737 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3739 OpNos[ModelOpNo] = RealOpNo;
3743 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
3746 // Create a node that performs P on operands Op0 and Op1, casting the
3747 // operands to the appropriate type. The type of the result is determined by P.
3748 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
3749 const Permute &P, SDValue Op0, SDValue Op1) {
3750 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
3751 // elements of a PACK are twice as wide as the outputs.
3752 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
3753 P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
3755 // Cast both operands to the appropriate type.
3756 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
3757 SystemZ::VectorBytes / InBytes);
3758 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
3759 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
3761 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
3762 SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32);
3763 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
3764 } else if (P.Opcode == SystemZISD::PACK) {
3765 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
3766 SystemZ::VectorBytes / P.Operand);
3767 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
3769 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
3774 // Bytes is a VPERM-like permute vector, except that -1 is used for
3775 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
3777 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
3779 const SmallVectorImpl<int> &Bytes) {
3780 for (unsigned I = 0; I < 2; ++I)
3781 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
3783 // First see whether VSLDI can be used.
3784 unsigned StartIndex, OpNo0, OpNo1;
3785 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
3786 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
3787 Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32));
3789 // Fall back on VPERM. Construct an SDNode for the permute vector.
3790 SDValue IndexNodes[SystemZ::VectorBytes];
3791 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
3793 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
3795 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
3796 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
3797 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
3801 // Describes a general N-operand vector shuffle.
3802 struct GeneralShuffle {
3803 GeneralShuffle(EVT vt) : VT(vt) {}
3805 void add(SDValue, unsigned);
3806 SDValue getNode(SelectionDAG &, const SDLoc &);
3808 // The operands of the shuffle.
3809 SmallVector<SDValue, SystemZ::VectorBytes> Ops;
3811 // Index I is -1 if byte I of the result is undefined. Otherwise the
3812 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
3813 // Bytes[I] / SystemZ::VectorBytes.
3814 SmallVector<int, SystemZ::VectorBytes> Bytes;
3816 // The type of the shuffle result.
3821 // Add an extra undefined element to the shuffle.
3822 void GeneralShuffle::addUndef() {
3823 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3824 for (unsigned I = 0; I < BytesPerElement; ++I)
3825 Bytes.push_back(-1);
3828 // Add an extra element to the shuffle, taking it from element Elem of Op.
3829 // A null Op indicates a vector input whose value will be calculated later;
3830 // there is at most one such input per shuffle and it always has the same
3831 // type as the result.
3832 void GeneralShuffle::add(SDValue Op, unsigned Elem) {
3833 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3835 // The source vector can have wider elements than the result,
3836 // either through an explicit TRUNCATE or because of type legalization.
3837 // We want the least significant part.
3838 EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
3839 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
3840 assert(FromBytesPerElement >= BytesPerElement &&
3841 "Invalid EXTRACT_VECTOR_ELT");
3842 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
3843 (FromBytesPerElement - BytesPerElement));
3845 // Look through things like shuffles and bitcasts.
3846 while (Op.getNode()) {
3847 if (Op.getOpcode() == ISD::BITCAST)
3848 Op = Op.getOperand(0);
3849 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
3850 // See whether the bytes we need come from a contiguous part of one
3852 SmallVector<int, SystemZ::VectorBytes> OpBytes;
3853 getVPermMask(cast<ShuffleVectorSDNode>(Op), OpBytes);
3855 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
3861 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
3862 Byte = unsigned(NewByte) % SystemZ::VectorBytes;
3863 } else if (Op.isUndef()) {
3870 // Make sure that the source of the extraction is in Ops.
3872 for (; OpNo < Ops.size(); ++OpNo)
3873 if (Ops[OpNo] == Op)
3875 if (OpNo == Ops.size())
3878 // Add the element to Bytes.
3879 unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
3880 for (unsigned I = 0; I < BytesPerElement; ++I)
3881 Bytes.push_back(Base + I);
3884 // Return SDNodes for the completed shuffle.
3885 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
3886 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
3888 if (Ops.size() == 0)
3889 return DAG.getUNDEF(VT);
3891 // Make sure that there are at least two shuffle operands.
3892 if (Ops.size() == 1)
3893 Ops.push_back(DAG.getUNDEF(MVT::v16i8));
3895 // Create a tree of shuffles, deferring root node until after the loop.
3896 // Try to redistribute the undefined elements of non-root nodes so that
3897 // the non-root shuffles match something like a pack or merge, then adjust
3898 // the parent node's permute vector to compensate for the new order.
3899 // Among other things, this copes with vectors like <2 x i16> that were
3900 // padded with undefined elements during type legalization.
3902 // In the best case this redistribution will lead to the whole tree
3903 // using packs and merges. It should rarely be a loss in other cases.
3904 unsigned Stride = 1;
3905 for (; Stride * 2 < Ops.size(); Stride *= 2) {
3906 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
3907 SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
3909 // Create a mask for just these two operands.
3910 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
3911 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
3912 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
3913 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
3916 else if (OpNo == I + Stride)
3917 NewBytes[J] = SystemZ::VectorBytes + Byte;
3921 // See if it would be better to reorganize NewMask to avoid using VPERM.
3922 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
3923 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
3924 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
3925 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
3926 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
3927 if (NewBytes[J] >= 0) {
3928 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
3929 "Invalid double permute");
3930 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
3932 assert(NewBytesMap[J] < 0 && "Invalid double permute");
3935 // Just use NewBytes on the operands.
3936 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
3937 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
3938 if (NewBytes[J] >= 0)
3939 Bytes[J] = I * SystemZ::VectorBytes + J;
3944 // Now we just have 2 inputs. Put the second operand in Ops[1].
3946 Ops[1] = Ops[Stride];
3947 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
3948 if (Bytes[I] >= int(SystemZ::VectorBytes))
3949 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
3952 // Look for an instruction that can do the permute without resorting
3954 unsigned OpNo0, OpNo1;
3956 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
3957 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
3959 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
3960 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
3963 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
3964 static bool isScalarToVector(SDValue Op) {
3965 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
3966 if (!Op.getOperand(I).isUndef())
3971 // Return a vector of type VT that contains Value in the first element.
3972 // The other elements don't matter.
3973 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
3975 // If we have a constant, replicate it to all elements and let the
3976 // BUILD_VECTOR lowering take care of it.
3977 if (Value.getOpcode() == ISD::Constant ||
3978 Value.getOpcode() == ISD::ConstantFP) {
3979 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
3980 return DAG.getBuildVector(VT, DL, Ops);
3982 if (Value.isUndef())
3983 return DAG.getUNDEF(VT);
3984 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
3987 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
3988 // element 1. Used for cases in which replication is cheap.
3989 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
3990 SDValue Op0, SDValue Op1) {
3991 if (Op0.isUndef()) {
3993 return DAG.getUNDEF(VT);
3994 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
3997 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
3998 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
3999 buildScalarToVector(DAG, DL, VT, Op0),
4000 buildScalarToVector(DAG, DL, VT, Op1));
4003 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4005 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4007 if (Op0.isUndef() && Op1.isUndef())
4008 return DAG.getUNDEF(MVT::v2i64);
4009 // If one of the two inputs is undefined then replicate the other one,
4010 // in order to avoid using another register unnecessarily.
4012 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4013 else if (Op1.isUndef())
4014 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4016 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4017 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4019 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4022 // Try to represent constant BUILD_VECTOR node BVN using a
4023 // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask
4025 static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
4026 EVT ElemVT = BVN->getValueType(0).getVectorElementType();
4027 unsigned BytesPerElement = ElemVT.getStoreSize();
4028 for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) {
4029 SDValue Op = BVN->getOperand(I);
4030 if (!Op.isUndef()) {
4032 if (Op.getOpcode() == ISD::Constant)
4033 Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue();
4034 else if (Op.getOpcode() == ISD::ConstantFP)
4035 Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
4039 for (unsigned J = 0; J < BytesPerElement; ++J) {
4040 uint64_t Byte = (Value >> (J * 8)) & 0xff;
4042 Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J);
4051 // Try to load a vector constant in which BitsPerElement-bit value Value
4052 // is replicated to fill the vector. VT is the type of the resulting
4053 // constant, which may have elements of a different size from BitsPerElement.
4054 // Return the SDValue of the constant on success, otherwise return
4056 static SDValue tryBuildVectorReplicate(SelectionDAG &DAG,
4057 const SystemZInstrInfo *TII,
4058 const SDLoc &DL, EVT VT, uint64_t Value,
4059 unsigned BitsPerElement) {
4060 // Signed 16-bit values can be replicated using VREPI.
4061 int64_t SignedValue = SignExtend64(Value, BitsPerElement);
4062 if (isInt<16>(SignedValue)) {
4063 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
4064 SystemZ::VectorBits / BitsPerElement);
4065 SDValue Op = DAG.getNode(SystemZISD::REPLICATE, DL, VecVT,
4066 DAG.getConstant(SignedValue, DL, MVT::i32));
4067 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4069 // See whether rotating the constant left some N places gives a value that
4070 // is one less than a power of 2 (i.e. all zeros followed by all ones).
4071 // If so we can use VGM.
4072 unsigned Start, End;
4073 if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) {
4074 // isRxSBGMask returns the bit numbers for a full 64-bit value,
4075 // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to
4076 // bit numbers for an BitsPerElement value, so that 0 denotes
4077 // 1 << (BitsPerElement-1).
4078 Start -= 64 - BitsPerElement;
4079 End -= 64 - BitsPerElement;
4080 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
4081 SystemZ::VectorBits / BitsPerElement);
4082 SDValue Op = DAG.getNode(SystemZISD::ROTATE_MASK, DL, VecVT,
4083 DAG.getConstant(Start, DL, MVT::i32),
4084 DAG.getConstant(End, DL, MVT::i32));
4085 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4090 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4091 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4092 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4093 // would benefit from this representation and return it if so.
4094 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
4095 BuildVectorSDNode *BVN) {
4096 EVT VT = BVN->getValueType(0);
4097 unsigned NumElements = VT.getVectorNumElements();
4099 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4100 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4101 // need a BUILD_VECTOR, add an additional placeholder operand for that
4102 // BUILD_VECTOR and store its operands in ResidueOps.
4103 GeneralShuffle GS(VT);
4104 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
4105 bool FoundOne = false;
4106 for (unsigned I = 0; I < NumElements; ++I) {
4107 SDValue Op = BVN->getOperand(I);
4108 if (Op.getOpcode() == ISD::TRUNCATE)
4109 Op = Op.getOperand(0);
4110 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4111 Op.getOperand(1).getOpcode() == ISD::Constant) {
4112 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4113 GS.add(Op.getOperand(0), Elem);
4115 } else if (Op.isUndef()) {
4118 GS.add(SDValue(), ResidueOps.size());
4119 ResidueOps.push_back(BVN->getOperand(I));
4123 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4127 // Create the BUILD_VECTOR for the remaining elements, if any.
4128 if (!ResidueOps.empty()) {
4129 while (ResidueOps.size() < NumElements)
4130 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4131 for (auto &Op : GS.Ops) {
4132 if (!Op.getNode()) {
4133 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4138 return GS.getNode(DAG, SDLoc(BVN));
4141 // Combine GPR scalar values Elems into a vector of type VT.
4142 static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4143 SmallVectorImpl<SDValue> &Elems) {
4144 // See whether there is a single replicated value.
4146 unsigned int NumElements = Elems.size();
4147 unsigned int Count = 0;
4148 for (auto Elem : Elems) {
4149 if (!Elem.isUndef()) {
4150 if (!Single.getNode())
4152 else if (Elem != Single) {
4159 // There are three cases here:
4161 // - if the only defined element is a loaded one, the best sequence
4162 // is a replicating load.
4164 // - otherwise, if the only defined element is an i64 value, we will
4165 // end up with the same VLVGP sequence regardless of whether we short-cut
4166 // for replication or fall through to the later code.
4168 // - otherwise, if the only defined element is an i32 or smaller value,
4169 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4170 // This is only a win if the single defined element is used more than once.
4171 // In other cases we're better off using a single VLVGx.
4172 if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD))
4173 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
4175 // The best way of building a v2i64 from two i64s is to use VLVGP.
4176 if (VT == MVT::v2i64)
4177 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4179 // Use a 64-bit merge high to combine two doubles.
4180 if (VT == MVT::v2f64)
4181 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4183 // Build v4f32 values directly from the FPRs:
4185 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4190 if (VT == MVT::v4f32) {
4191 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4192 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
4193 // Avoid unnecessary undefs by reusing the other operand.
4196 else if (Op23.isUndef())
4198 // Merging identical replications is a no-op.
4199 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
4201 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
4202 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
4203 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
4204 DL, MVT::v2i64, Op01, Op23);
4205 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4208 // Collect the constant terms.
4209 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
4210 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
4212 unsigned NumConstants = 0;
4213 for (unsigned I = 0; I < NumElements; ++I) {
4214 SDValue Elem = Elems[I];
4215 if (Elem.getOpcode() == ISD::Constant ||
4216 Elem.getOpcode() == ISD::ConstantFP) {
4218 Constants[I] = Elem;
4222 // If there was at least one constant, fill in the other elements of
4223 // Constants with undefs to get a full vector constant and use that
4224 // as the starting point.
4226 if (NumConstants > 0) {
4227 for (unsigned I = 0; I < NumElements; ++I)
4228 if (!Constants[I].getNode())
4229 Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
4230 Result = DAG.getBuildVector(VT, DL, Constants);
4232 // Otherwise try to use VLVGP to start the sequence in order to
4233 // avoid a false dependency on any previous contents of the vector
4234 // register. This only makes sense if one of the associated elements
4236 unsigned I1 = NumElements / 2 - 1;
4237 unsigned I2 = NumElements - 1;
4238 bool Def1 = !Elems[I1].isUndef();
4239 bool Def2 = !Elems[I2].isUndef();
4241 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4242 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4243 Result = DAG.getNode(ISD::BITCAST, DL, VT,
4244 joinDwords(DAG, DL, Elem1, Elem2));
4248 Result = DAG.getUNDEF(VT);
4251 // Use VLVGx to insert the other elements.
4252 for (unsigned I = 0; I < NumElements; ++I)
4253 if (!Done[I] && !Elems[I].isUndef())
4254 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
4255 DAG.getConstant(I, DL, MVT::i32));
4259 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
4260 SelectionDAG &DAG) const {
4261 const SystemZInstrInfo *TII =
4262 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
4263 auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
4265 EVT VT = Op.getValueType();
4267 if (BVN->isConstant()) {
4268 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
4269 // preferred way of creating all-zero and all-one vectors so give it
4270 // priority over other methods below.
4272 if (tryBuildVectorByteMask(BVN, Mask)) {
4273 SDValue Op = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
4274 DAG.getConstant(Mask, DL, MVT::i32));
4275 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4278 // Try using some form of replication.
4279 APInt SplatBits, SplatUndef;
4280 unsigned SplatBitSize;
4282 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4284 SplatBitSize <= 64) {
4285 // First try assuming that any undefined bits above the highest set bit
4286 // and below the lowest set bit are 1s. This increases the likelihood of
4287 // being able to use a sign-extended element value in VECTOR REPLICATE
4288 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
4289 uint64_t SplatBitsZ = SplatBits.getZExtValue();
4290 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
4291 uint64_t Lower = (SplatUndefZ
4292 & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
4293 uint64_t Upper = (SplatUndefZ
4294 & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
4295 uint64_t Value = SplatBitsZ | Upper | Lower;
4296 SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value,
4301 // Now try assuming that any undefined bits between the first and
4302 // last defined set bits are set. This increases the chances of
4303 // using a non-wraparound mask.
4304 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
4305 Value = SplatBitsZ | Middle;
4306 Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize);
4311 // Fall back to loading it from memory.
4315 // See if we should use shuffles to construct the vector from other vectors.
4316 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
4319 // Detect SCALAR_TO_VECTOR conversions.
4320 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
4321 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
4323 // Otherwise use buildVector to build the vector up from GPRs.
4324 unsigned NumElements = Op.getNumOperands();
4325 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
4326 for (unsigned I = 0; I < NumElements; ++I)
4327 Ops[I] = Op.getOperand(I);
4328 return buildVector(DAG, DL, VT, Ops);
4331 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4332 SelectionDAG &DAG) const {
4333 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
4335 EVT VT = Op.getValueType();
4336 unsigned NumElements = VT.getVectorNumElements();
4338 if (VSN->isSplat()) {
4339 SDValue Op0 = Op.getOperand(0);
4340 unsigned Index = VSN->getSplatIndex();
4341 assert(Index < VT.getVectorNumElements() &&
4342 "Splat index should be defined and in first operand");
4343 // See whether the value we're splatting is directly available as a scalar.
4344 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4345 Op0.getOpcode() == ISD::BUILD_VECTOR)
4346 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
4347 // Otherwise keep it as a vector-to-vector operation.
4348 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
4349 DAG.getConstant(Index, DL, MVT::i32));
4352 GeneralShuffle GS(VT);
4353 for (unsigned I = 0; I < NumElements; ++I) {
4354 int Elt = VSN->getMaskElt(I);
4358 GS.add(Op.getOperand(unsigned(Elt) / NumElements),
4359 unsigned(Elt) % NumElements);
4361 return GS.getNode(DAG, SDLoc(VSN));
4364 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
4365 SelectionDAG &DAG) const {
4367 // Just insert the scalar into element 0 of an undefined vector.
4368 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
4369 Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
4370 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
4373 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4374 SelectionDAG &DAG) const {
4375 // Handle insertions of floating-point values.
4377 SDValue Op0 = Op.getOperand(0);
4378 SDValue Op1 = Op.getOperand(1);
4379 SDValue Op2 = Op.getOperand(2);
4380 EVT VT = Op.getValueType();
4382 // Insertions into constant indices of a v2f64 can be done using VPDI.
4383 // However, if the inserted value is a bitcast or a constant then it's
4384 // better to use GPRs, as below.
4385 if (VT == MVT::v2f64 &&
4386 Op1.getOpcode() != ISD::BITCAST &&
4387 Op1.getOpcode() != ISD::ConstantFP &&
4388 Op2.getOpcode() == ISD::Constant) {
4389 uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue();
4390 unsigned Mask = VT.getVectorNumElements() - 1;
4395 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4396 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
4397 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
4398 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
4399 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
4400 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
4401 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4405 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4406 SelectionDAG &DAG) const {
4407 // Handle extractions of floating-point values.
4409 SDValue Op0 = Op.getOperand(0);
4410 SDValue Op1 = Op.getOperand(1);
4411 EVT VT = Op.getValueType();
4412 EVT VecVT = Op0.getValueType();
4414 // Extractions of constant indices can be done directly.
4415 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
4416 uint64_t Index = CIndexN->getZExtValue();
4417 unsigned Mask = VecVT.getVectorNumElements() - 1;
4422 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
4423 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
4424 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
4425 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
4426 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
4427 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4431 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
4432 unsigned UnpackHigh) const {
4433 SDValue PackedOp = Op.getOperand(0);
4434 EVT OutVT = Op.getValueType();
4435 EVT InVT = PackedOp.getValueType();
4436 unsigned ToBits = OutVT.getScalarSizeInBits();
4437 unsigned FromBits = InVT.getScalarSizeInBits();
4440 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
4441 SystemZ::VectorBits / FromBits);
4442 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
4443 } while (FromBits != ToBits);
4447 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
4448 unsigned ByScalar) const {
4449 // Look for cases where a vector shift can use the *_BY_SCALAR form.
4450 SDValue Op0 = Op.getOperand(0);
4451 SDValue Op1 = Op.getOperand(1);
4453 EVT VT = Op.getValueType();
4454 unsigned ElemBitSize = VT.getScalarSizeInBits();
4456 // See whether the shift vector is a splat represented as BUILD_VECTOR.
4457 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
4458 APInt SplatBits, SplatUndef;
4459 unsigned SplatBitSize;
4461 // Check for constant splats. Use ElemBitSize as the minimum element
4462 // width and reject splats that need wider elements.
4463 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4464 ElemBitSize, true) &&
4465 SplatBitSize == ElemBitSize) {
4466 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
4468 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4470 // Check for variable splats.
4471 BitVector UndefElements;
4472 SDValue Splat = BVN->getSplatValue(&UndefElements);
4474 // Since i32 is the smallest legal type, we either need a no-op
4476 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
4477 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4481 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
4482 // and the shift amount is directly available in a GPR.
4483 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
4484 if (VSN->isSplat()) {
4485 SDValue VSNOp0 = VSN->getOperand(0);
4486 unsigned Index = VSN->getSplatIndex();
4487 assert(Index < VT.getVectorNumElements() &&
4488 "Splat index should be defined and in first operand");
4489 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4490 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
4491 // Since i32 is the smallest legal type, we either need a no-op
4493 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
4494 VSNOp0.getOperand(Index));
4495 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4500 // Otherwise just treat the current form as legal.
4504 SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
4505 SelectionDAG &DAG) const {
4506 switch (Op.getOpcode()) {
4507 case ISD::FRAMEADDR:
4508 return lowerFRAMEADDR(Op, DAG);
4509 case ISD::RETURNADDR:
4510 return lowerRETURNADDR(Op, DAG);
4512 return lowerBR_CC(Op, DAG);
4513 case ISD::SELECT_CC:
4514 return lowerSELECT_CC(Op, DAG);
4516 return lowerSETCC(Op, DAG);
4517 case ISD::GlobalAddress:
4518 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
4519 case ISD::GlobalTLSAddress:
4520 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
4521 case ISD::BlockAddress:
4522 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
4523 case ISD::JumpTable:
4524 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
4525 case ISD::ConstantPool:
4526 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
4528 return lowerBITCAST(Op, DAG);
4530 return lowerVASTART(Op, DAG);
4532 return lowerVACOPY(Op, DAG);
4533 case ISD::DYNAMIC_STACKALLOC:
4534 return lowerDYNAMIC_STACKALLOC(Op, DAG);
4535 case ISD::GET_DYNAMIC_AREA_OFFSET:
4536 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
4537 case ISD::SMUL_LOHI:
4538 return lowerSMUL_LOHI(Op, DAG);
4539 case ISD::UMUL_LOHI:
4540 return lowerUMUL_LOHI(Op, DAG);
4542 return lowerSDIVREM(Op, DAG);
4544 return lowerUDIVREM(Op, DAG);
4546 return lowerOR(Op, DAG);
4548 return lowerCTPOP(Op, DAG);
4549 case ISD::ATOMIC_FENCE:
4550 return lowerATOMIC_FENCE(Op, DAG);
4551 case ISD::ATOMIC_SWAP:
4552 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
4553 case ISD::ATOMIC_STORE:
4554 return lowerATOMIC_STORE(Op, DAG);
4555 case ISD::ATOMIC_LOAD:
4556 return lowerATOMIC_LOAD(Op, DAG);
4557 case ISD::ATOMIC_LOAD_ADD:
4558 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
4559 case ISD::ATOMIC_LOAD_SUB:
4560 return lowerATOMIC_LOAD_SUB(Op, DAG);
4561 case ISD::ATOMIC_LOAD_AND:
4562 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
4563 case ISD::ATOMIC_LOAD_OR:
4564 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
4565 case ISD::ATOMIC_LOAD_XOR:
4566 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
4567 case ISD::ATOMIC_LOAD_NAND:
4568 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
4569 case ISD::ATOMIC_LOAD_MIN:
4570 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
4571 case ISD::ATOMIC_LOAD_MAX:
4572 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
4573 case ISD::ATOMIC_LOAD_UMIN:
4574 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
4575 case ISD::ATOMIC_LOAD_UMAX:
4576 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
4577 case ISD::ATOMIC_CMP_SWAP:
4578 return lowerATOMIC_CMP_SWAP(Op, DAG);
4579 case ISD::STACKSAVE:
4580 return lowerSTACKSAVE(Op, DAG);
4581 case ISD::STACKRESTORE:
4582 return lowerSTACKRESTORE(Op, DAG);
4584 return lowerPREFETCH(Op, DAG);
4585 case ISD::INTRINSIC_W_CHAIN:
4586 return lowerINTRINSIC_W_CHAIN(Op, DAG);
4587 case ISD::INTRINSIC_WO_CHAIN:
4588 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
4589 case ISD::BUILD_VECTOR:
4590 return lowerBUILD_VECTOR(Op, DAG);
4591 case ISD::VECTOR_SHUFFLE:
4592 return lowerVECTOR_SHUFFLE(Op, DAG);
4593 case ISD::SCALAR_TO_VECTOR:
4594 return lowerSCALAR_TO_VECTOR(Op, DAG);
4595 case ISD::INSERT_VECTOR_ELT:
4596 return lowerINSERT_VECTOR_ELT(Op, DAG);
4597 case ISD::EXTRACT_VECTOR_ELT:
4598 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4599 case ISD::SIGN_EXTEND_VECTOR_INREG:
4600 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
4601 case ISD::ZERO_EXTEND_VECTOR_INREG:
4602 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
4604 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
4606 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
4608 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
4610 llvm_unreachable("Unexpected node to lower");
4614 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
4615 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
4616 switch ((SystemZISD::NodeType)Opcode) {
4617 case SystemZISD::FIRST_NUMBER: break;
4623 OPCODE(PCREL_WRAPPER);
4624 OPCODE(PCREL_OFFSET);
4630 OPCODE(SELECT_CCMASK);
4631 OPCODE(ADJDYNALLOC);
4633 OPCODE(UMUL_LOHI64);
4650 OPCODE(SEARCH_STRING);
4655 OPCODE(TBEGIN_NOFLOAT);
4658 OPCODE(ROTATE_MASK);
4660 OPCODE(JOIN_DWORDS);
4665 OPCODE(PERMUTE_DWORDS);
4670 OPCODE(UNPACK_HIGH);
4671 OPCODE(UNPACKL_HIGH);
4673 OPCODE(UNPACKL_LOW);
4674 OPCODE(VSHL_BY_SCALAR);
4675 OPCODE(VSRL_BY_SCALAR);
4676 OPCODE(VSRA_BY_SCALAR);
4704 OPCODE(ATOMIC_SWAPW);
4705 OPCODE(ATOMIC_LOADW_ADD);
4706 OPCODE(ATOMIC_LOADW_SUB);
4707 OPCODE(ATOMIC_LOADW_AND);
4708 OPCODE(ATOMIC_LOADW_OR);
4709 OPCODE(ATOMIC_LOADW_XOR);
4710 OPCODE(ATOMIC_LOADW_NAND);
4711 OPCODE(ATOMIC_LOADW_MIN);
4712 OPCODE(ATOMIC_LOADW_MAX);
4713 OPCODE(ATOMIC_LOADW_UMIN);
4714 OPCODE(ATOMIC_LOADW_UMAX);
4715 OPCODE(ATOMIC_CMP_SWAPW);
4724 // Return true if VT is a vector whose elements are a whole number of bytes
4726 static bool canTreatAsByteVector(EVT VT) {
4727 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0;
4730 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
4731 // producing a result of type ResVT. Op is a possibly bitcast version
4732 // of the input vector and Index is the index (based on type VecVT) that
4733 // should be extracted. Return the new extraction if a simplification
4734 // was possible or if Force is true.
4735 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
4736 EVT VecVT, SDValue Op,
4738 DAGCombinerInfo &DCI,
4740 SelectionDAG &DAG = DCI.DAG;
4742 // The number of bytes being extracted.
4743 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
4746 unsigned Opcode = Op.getOpcode();
4747 if (Opcode == ISD::BITCAST)
4748 // Look through bitcasts.
4749 Op = Op.getOperand(0);
4750 else if (Opcode == ISD::VECTOR_SHUFFLE &&
4751 canTreatAsByteVector(Op.getValueType())) {
4752 // Get a VPERM-like permute mask and see whether the bytes covered
4753 // by the extracted element are a contiguous sequence from one
4755 SmallVector<int, SystemZ::VectorBytes> Bytes;
4756 getVPermMask(cast<ShuffleVectorSDNode>(Op), Bytes);
4758 if (!getShuffleInput(Bytes, Index * BytesPerElement,
4759 BytesPerElement, First))
4762 return DAG.getUNDEF(ResVT);
4763 // Make sure the contiguous sequence starts at a multiple of the
4764 // original element size.
4765 unsigned Byte = unsigned(First) % Bytes.size();
4766 if (Byte % BytesPerElement != 0)
4768 // We can get the extracted value directly from an input.
4769 Index = Byte / BytesPerElement;
4770 Op = Op.getOperand(unsigned(First) / Bytes.size());
4772 } else if (Opcode == ISD::BUILD_VECTOR &&
4773 canTreatAsByteVector(Op.getValueType())) {
4774 // We can only optimize this case if the BUILD_VECTOR elements are
4775 // at least as wide as the extracted value.
4776 EVT OpVT = Op.getValueType();
4777 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
4778 if (OpBytesPerElement < BytesPerElement)
4780 // Make sure that the least-significant bit of the extracted value
4781 // is the least significant bit of an input.
4782 unsigned End = (Index + 1) * BytesPerElement;
4783 if (End % OpBytesPerElement != 0)
4785 // We're extracting the low part of one operand of the BUILD_VECTOR.
4786 Op = Op.getOperand(End / OpBytesPerElement - 1);
4787 if (!Op.getValueType().isInteger()) {
4788 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
4789 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
4790 DCI.AddToWorklist(Op.getNode());
4792 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
4793 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
4795 DCI.AddToWorklist(Op.getNode());
4796 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
4799 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
4800 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
4801 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
4802 canTreatAsByteVector(Op.getValueType()) &&
4803 canTreatAsByteVector(Op.getOperand(0).getValueType())) {
4804 // Make sure that only the unextended bits are significant.
4805 EVT ExtVT = Op.getValueType();
4806 EVT OpVT = Op.getOperand(0).getValueType();
4807 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
4808 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
4809 unsigned Byte = Index * BytesPerElement;
4810 unsigned SubByte = Byte % ExtBytesPerElement;
4811 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
4812 if (SubByte < MinSubByte ||
4813 SubByte + BytesPerElement > ExtBytesPerElement)
4815 // Get the byte offset of the unextended element
4816 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
4817 // ...then add the byte offset relative to that element.
4818 Byte += SubByte - MinSubByte;
4819 if (Byte % BytesPerElement != 0)
4821 Op = Op.getOperand(0);
4822 Index = Byte / BytesPerElement;
4828 if (Op.getValueType() != VecVT) {
4829 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
4830 DCI.AddToWorklist(Op.getNode());
4832 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
4833 DAG.getConstant(Index, DL, MVT::i32));
4838 // Optimize vector operations in scalar value Op on the basis that Op
4839 // is truncated to TruncVT.
4840 SDValue SystemZTargetLowering::combineTruncateExtract(
4841 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
4842 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
4843 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
4845 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4846 TruncVT.getSizeInBits() % 8 == 0) {
4847 SDValue Vec = Op.getOperand(0);
4848 EVT VecVT = Vec.getValueType();
4849 if (canTreatAsByteVector(VecVT)) {
4850 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
4851 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
4852 unsigned TruncBytes = TruncVT.getStoreSize();
4853 if (BytesPerElement % TruncBytes == 0) {
4854 // Calculate the value of Y' in the above description. We are
4855 // splitting the original elements into Scale equal-sized pieces
4856 // and for truncation purposes want the last (least-significant)
4857 // of these pieces for IndexN. This is easiest to do by calculating
4858 // the start index of the following element and then subtracting 1.
4859 unsigned Scale = BytesPerElement / TruncBytes;
4860 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
4862 // Defer the creation of the bitcast from X to combineExtract,
4863 // which might be able to optimize the extraction.
4864 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
4865 VecVT.getStoreSize() / TruncBytes);
4866 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
4867 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
4875 SDValue SystemZTargetLowering::combineSIGN_EXTEND(
4876 SDNode *N, DAGCombinerInfo &DCI) const {
4877 // Convert (sext (ashr (shl X, C1), C2)) to
4878 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
4879 // cheap as narrower ones.
4880 SelectionDAG &DAG = DCI.DAG;
4881 SDValue N0 = N->getOperand(0);
4882 EVT VT = N->getValueType(0);
4883 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
4884 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4885 SDValue Inner = N0.getOperand(0);
4886 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
4887 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
4888 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
4889 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
4890 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
4891 EVT ShiftVT = N0.getOperand(1).getValueType();
4892 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
4893 Inner.getOperand(0));
4894 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
4895 DAG.getConstant(NewShlAmt, SDLoc(Inner),
4897 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
4898 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
4905 SDValue SystemZTargetLowering::combineMERGE(
4906 SDNode *N, DAGCombinerInfo &DCI) const {
4907 SelectionDAG &DAG = DCI.DAG;
4908 unsigned Opcode = N->getOpcode();
4909 SDValue Op0 = N->getOperand(0);
4910 SDValue Op1 = N->getOperand(1);
4911 if (Op0.getOpcode() == ISD::BITCAST)
4912 Op0 = Op0.getOperand(0);
4913 if (Op0.getOpcode() == SystemZISD::BYTE_MASK &&
4914 cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) {
4915 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
4917 if (Op1 == N->getOperand(0))
4919 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
4920 EVT VT = Op1.getValueType();
4921 unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
4922 if (ElemBytes <= 4) {
4923 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
4924 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
4925 EVT InVT = VT.changeVectorElementTypeToInteger();
4926 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
4927 SystemZ::VectorBytes / ElemBytes / 2);
4929 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
4930 DCI.AddToWorklist(Op1.getNode());
4932 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
4933 DCI.AddToWorklist(Op.getNode());
4934 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
4940 SDValue SystemZTargetLowering::combineSTORE(
4941 SDNode *N, DAGCombinerInfo &DCI) const {
4942 SelectionDAG &DAG = DCI.DAG;
4943 auto *SN = cast<StoreSDNode>(N);
4944 auto &Op1 = N->getOperand(1);
4945 EVT MemVT = SN->getMemoryVT();
4946 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
4947 // for the extraction to be done on a vMiN value, so that we can use VSTE.
4948 // If X has wider elements then convert it to:
4949 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
4950 if (MemVT.isInteger()) {
4952 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
4953 DCI.AddToWorklist(Value.getNode());
4955 // Rewrite the store with the new form of stored value.
4956 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
4957 SN->getBasePtr(), SN->getMemoryVT(),
4958 SN->getMemOperand());
4961 // Combine STORE (BSWAP) into STRVH/STRV/STRVG
4962 // See comment in combineBSWAP about volatile accesses.
4963 if (!SN->isVolatile() &&
4964 Op1.getOpcode() == ISD::BSWAP &&
4965 Op1.getNode()->hasOneUse() &&
4966 (Op1.getValueType() == MVT::i16 ||
4967 Op1.getValueType() == MVT::i32 ||
4968 Op1.getValueType() == MVT::i64)) {
4970 SDValue BSwapOp = Op1.getOperand(0);
4972 if (BSwapOp.getValueType() == MVT::i16)
4973 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
4976 N->getOperand(0), BSwapOp, N->getOperand(2),
4977 DAG.getValueType(Op1.getValueType())
4981 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
4982 Ops, MemVT, SN->getMemOperand());
4987 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
4988 SDNode *N, DAGCombinerInfo &DCI) const {
4989 // Try to simplify a vector extraction.
4990 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
4991 SDValue Op0 = N->getOperand(0);
4992 EVT VecVT = Op0.getValueType();
4993 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
4994 IndexN->getZExtValue(), DCI, false);
4999 SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5000 SDNode *N, DAGCombinerInfo &DCI) const {
5001 SelectionDAG &DAG = DCI.DAG;
5002 // (join_dwords X, X) == (replicate X)
5003 if (N->getOperand(0) == N->getOperand(1))
5004 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
5009 SDValue SystemZTargetLowering::combineFP_ROUND(
5010 SDNode *N, DAGCombinerInfo &DCI) const {
5011 // (fpround (extract_vector_elt X 0))
5012 // (fpround (extract_vector_elt X 1)) ->
5013 // (extract_vector_elt (VROUND X) 0)
5014 // (extract_vector_elt (VROUND X) 1)
5016 // This is a special case since the target doesn't really support v2f32s.
5017 SelectionDAG &DAG = DCI.DAG;
5018 SDValue Op0 = N->getOperand(0);
5019 if (N->getValueType(0) == MVT::f32 &&
5021 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5022 Op0.getOperand(0).getValueType() == MVT::v2f64 &&
5023 Op0.getOperand(1).getOpcode() == ISD::Constant &&
5024 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
5025 SDValue Vec = Op0.getOperand(0);
5026 for (auto *U : Vec->uses()) {
5027 if (U != Op0.getNode() &&
5029 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5030 U->getOperand(0) == Vec &&
5031 U->getOperand(1).getOpcode() == ISD::Constant &&
5032 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5033 SDValue OtherRound = SDValue(*U->use_begin(), 0);
5034 if (OtherRound.getOpcode() == ISD::FP_ROUND &&
5035 OtherRound.getOperand(0) == SDValue(U, 0) &&
5036 OtherRound.getValueType() == MVT::f32) {
5037 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
5039 DCI.AddToWorklist(VRound.getNode());
5041 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
5042 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
5043 DCI.AddToWorklist(Extract1.getNode());
5044 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
5046 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
5047 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
5056 SDValue SystemZTargetLowering::combineBSWAP(
5057 SDNode *N, DAGCombinerInfo &DCI) const {
5058 SelectionDAG &DAG = DCI.DAG;
5059 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG
5060 // These loads are allowed to access memory multiple times, and so we must check
5061 // that the loads are not volatile before performing the combine.
5062 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5063 N->getOperand(0).hasOneUse() &&
5064 (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 ||
5065 N->getValueType(0) == MVT::i64) &&
5066 !cast<LoadSDNode>(N->getOperand(0))->isVolatile()) {
5067 SDValue Load = N->getOperand(0);
5068 LoadSDNode *LD = cast<LoadSDNode>(Load);
5070 // Create the byte-swapping load.
5072 LD->getChain(), // Chain
5073 LD->getBasePtr(), // Ptr
5074 DAG.getValueType(N->getValueType(0)) // VT
5077 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
5078 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
5079 MVT::i64 : MVT::i32, MVT::Other),
5080 Ops, LD->getMemoryVT(), LD->getMemOperand());
5082 // If this is an i16 load, insert the truncate.
5083 SDValue ResVal = BSLoad;
5084 if (N->getValueType(0) == MVT::i16)
5085 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
5087 // First, combine the bswap away. This makes the value produced by the
5089 DCI.CombineTo(N, ResVal);
5091 // Next, combine the load away, we give it a bogus result value but a real
5092 // chain result. The result value is dead because the bswap is dead.
5093 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
5095 // Return N so it doesn't get rechecked!
5096 return SDValue(N, 0);
5101 SDValue SystemZTargetLowering::combineSHIFTROT(
5102 SDNode *N, DAGCombinerInfo &DCI) const {
5104 SelectionDAG &DAG = DCI.DAG;
5106 // Shift/rotate instructions only use the last 6 bits of the second operand
5107 // register. If the second operand is the result of an AND with an immediate
5108 // value that has its last 6 bits set, we can safely remove the AND operation.
5110 // If the AND operation doesn't have the last 6 bits set, we can't remove it
5111 // entirely, but we can still truncate it to a 16-bit value. This prevents
5112 // us from ending up with a NILL with a signed operand, which will cause the
5113 // instruction printer to abort.
5114 SDValue N1 = N->getOperand(1);
5115 if (N1.getOpcode() == ISD::AND) {
5116 SDValue AndMaskOp = N1->getOperand(1);
5117 auto *AndMask = dyn_cast<ConstantSDNode>(AndMaskOp);
5119 // The AND mask is constant
5121 auto AmtVal = AndMask->getZExtValue();
5123 // Bottom 6 bits are set
5124 if ((AmtVal & 0x3f) == 0x3f) {
5125 SDValue AndOp = N1->getOperand(0);
5127 // This is the only use, so remove the node
5128 if (N1.hasOneUse()) {
5129 // Combine the AND away
5130 DCI.CombineTo(N1.getNode(), AndOp);
5132 // Return N so it isn't rechecked
5133 return SDValue(N, 0);
5135 // The node will be reused, so create a new node for this one use
5137 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
5138 N->getValueType(0), N->getOperand(0),
5140 DCI.AddToWorklist(Replace.getNode());
5145 // We can't remove the AND, but we can use NILL here (normally we would
5146 // use NILF). Only keep the last 16 bits of the mask. The actual
5147 // transformation will be handled by .td definitions.
5148 } else if (AmtVal >> 16 != 0) {
5149 SDValue AndOp = N1->getOperand(0);
5151 auto NewMask = DAG.getConstant(AndMask->getZExtValue() & 0x0000ffff,
5153 AndMaskOp.getValueType());
5155 auto NewAnd = DAG.getNode(N1.getOpcode(), SDLoc(N1), N1.getValueType(),
5158 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
5159 N->getValueType(0), N->getOperand(0),
5161 DCI.AddToWorklist(Replace.getNode());
5171 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
5172 DAGCombinerInfo &DCI) const {
5173 switch(N->getOpcode()) {
5175 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI);
5176 case SystemZISD::MERGE_HIGH:
5177 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI);
5178 case ISD::STORE: return combineSTORE(N, DCI);
5179 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
5180 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
5181 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI);
5182 case ISD::BSWAP: return combineBSWAP(N, DCI);
5186 case ISD::ROTL: return combineSHIFTROT(N, DCI);
5192 //===----------------------------------------------------------------------===//
5194 //===----------------------------------------------------------------------===//
5196 // Create a new basic block after MBB.
5197 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
5198 MachineFunction &MF = *MBB->getParent();
5199 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
5200 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
5204 // Split MBB after MI and return the new block (the one that contains
5205 // instructions after MI).
5206 static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI,
5207 MachineBasicBlock *MBB) {
5208 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
5209 NewMBB->splice(NewMBB->begin(), MBB,
5210 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
5211 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
5215 // Split MBB before MI and return the new block (the one that contains MI).
5216 static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
5217 MachineBasicBlock *MBB) {
5218 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
5219 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
5220 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
5224 // Force base value Base into a register before MI. Return the register.
5225 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
5226 const SystemZInstrInfo *TII) {
5228 return Base.getReg();
5230 MachineBasicBlock *MBB = MI.getParent();
5231 MachineFunction &MF = *MBB->getParent();
5232 MachineRegisterInfo &MRI = MF.getRegInfo();
5234 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5235 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
5242 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
5244 SystemZTargetLowering::emitSelect(MachineInstr &MI,
5245 MachineBasicBlock *MBB,
5246 unsigned LOCROpcode) const {
5247 const SystemZInstrInfo *TII =
5248 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5250 unsigned DestReg = MI.getOperand(0).getReg();
5251 unsigned TrueReg = MI.getOperand(1).getReg();
5252 unsigned FalseReg = MI.getOperand(2).getReg();
5253 unsigned CCValid = MI.getOperand(3).getImm();
5254 unsigned CCMask = MI.getOperand(4).getImm();
5255 DebugLoc DL = MI.getDebugLoc();
5257 // Use LOCROpcode if possible.
5258 if (LOCROpcode && Subtarget.hasLoadStoreOnCond()) {
5259 BuildMI(*MBB, MI, DL, TII->get(LOCROpcode), DestReg)
5260 .addReg(FalseReg).addReg(TrueReg)
5261 .addImm(CCValid).addImm(CCMask);
5262 MI.eraseFromParent();
5266 MachineBasicBlock *StartMBB = MBB;
5267 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
5268 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
5271 // BRC CCMask, JoinMBB
5272 // # fallthrough to FalseMBB
5274 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5275 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
5276 MBB->addSuccessor(JoinMBB);
5277 MBB->addSuccessor(FalseMBB);
5280 // # fallthrough to JoinMBB
5282 MBB->addSuccessor(JoinMBB);
5285 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
5288 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
5289 .addReg(TrueReg).addMBB(StartMBB)
5290 .addReg(FalseReg).addMBB(FalseMBB);
5292 MI.eraseFromParent();
5296 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
5297 // StoreOpcode is the store to use and Invert says whether the store should
5298 // happen when the condition is false rather than true. If a STORE ON
5299 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
5300 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
5301 MachineBasicBlock *MBB,
5302 unsigned StoreOpcode,
5303 unsigned STOCOpcode,
5304 bool Invert) const {
5305 const SystemZInstrInfo *TII =
5306 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5308 unsigned SrcReg = MI.getOperand(0).getReg();
5309 MachineOperand Base = MI.getOperand(1);
5310 int64_t Disp = MI.getOperand(2).getImm();
5311 unsigned IndexReg = MI.getOperand(3).getReg();
5312 unsigned CCValid = MI.getOperand(4).getImm();
5313 unsigned CCMask = MI.getOperand(5).getImm();
5314 DebugLoc DL = MI.getDebugLoc();
5316 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
5318 // Use STOCOpcode if possible. We could use different store patterns in
5319 // order to avoid matching the index register, but the performance trade-offs
5320 // might be more complicated in that case.
5321 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
5324 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
5325 .addReg(SrcReg).addOperand(Base).addImm(Disp)
5326 .addImm(CCValid).addImm(CCMask);
5327 MI.eraseFromParent();
5331 // Get the condition needed to branch around the store.
5335 MachineBasicBlock *StartMBB = MBB;
5336 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
5337 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
5340 // BRC CCMask, JoinMBB
5341 // # fallthrough to FalseMBB
5343 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5344 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
5345 MBB->addSuccessor(JoinMBB);
5346 MBB->addSuccessor(FalseMBB);
5349 // store %SrcReg, %Disp(%Index,%Base)
5350 // # fallthrough to JoinMBB
5352 BuildMI(MBB, DL, TII->get(StoreOpcode))
5353 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
5354 MBB->addSuccessor(JoinMBB);
5356 MI.eraseFromParent();
5360 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
5361 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
5362 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
5363 // BitSize is the width of the field in bits, or 0 if this is a partword
5364 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
5365 // is one of the operands. Invert says whether the field should be
5366 // inverted after performing BinOpcode (e.g. for NAND).
5367 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
5368 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
5369 unsigned BitSize, bool Invert) const {
5370 MachineFunction &MF = *MBB->getParent();
5371 const SystemZInstrInfo *TII =
5372 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5373 MachineRegisterInfo &MRI = MF.getRegInfo();
5374 bool IsSubWord = (BitSize < 32);
5376 // Extract the operands. Base can be a register or a frame index.
5377 // Src2 can be a register or immediate.
5378 unsigned Dest = MI.getOperand(0).getReg();
5379 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5380 int64_t Disp = MI.getOperand(2).getImm();
5381 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
5382 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
5383 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
5384 DebugLoc DL = MI.getDebugLoc();
5386 BitSize = MI.getOperand(6).getImm();
5388 // Subword operations use 32-bit registers.
5389 const TargetRegisterClass *RC = (BitSize <= 32 ?
5390 &SystemZ::GR32BitRegClass :
5391 &SystemZ::GR64BitRegClass);
5392 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
5393 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5395 // Get the right opcodes for the displacement.
5396 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
5397 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
5398 assert(LOpcode && CSOpcode && "Displacement out of range");
5400 // Create virtual registers for temporary results.
5401 unsigned OrigVal = MRI.createVirtualRegister(RC);
5402 unsigned OldVal = MRI.createVirtualRegister(RC);
5403 unsigned NewVal = (BinOpcode || IsSubWord ?
5404 MRI.createVirtualRegister(RC) : Src2.getReg());
5405 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
5406 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
5408 // Insert a basic block for the main loop.
5409 MachineBasicBlock *StartMBB = MBB;
5410 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5411 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5415 // %OrigVal = L Disp(%Base)
5416 // # fall through to LoopMMB
5418 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
5419 .addOperand(Base).addImm(Disp).addReg(0);
5420 MBB->addSuccessor(LoopMBB);
5423 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
5424 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
5425 // %RotatedNewVal = OP %RotatedOldVal, %Src2
5426 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
5427 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
5429 // # fall through to DoneMMB
5431 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5432 .addReg(OrigVal).addMBB(StartMBB)
5433 .addReg(Dest).addMBB(LoopMBB);
5435 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5436 .addReg(OldVal).addReg(BitShift).addImm(0);
5438 // Perform the operation normally and then invert every bit of the field.
5439 unsigned Tmp = MRI.createVirtualRegister(RC);
5440 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
5441 .addReg(RotatedOldVal).addOperand(Src2);
5443 // XILF with the upper BitSize bits set.
5444 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
5445 .addReg(Tmp).addImm(-1U << (32 - BitSize));
5447 // Use LCGR and add -1 to the result, which is more compact than
5448 // an XILF, XILH pair.
5449 unsigned Tmp2 = MRI.createVirtualRegister(RC);
5450 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
5451 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
5452 .addReg(Tmp2).addImm(-1);
5454 } else if (BinOpcode)
5455 // A simply binary operation.
5456 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
5457 .addReg(RotatedOldVal).addOperand(Src2);
5459 // Use RISBG to rotate Src2 into position and use it to replace the
5460 // field in RotatedOldVal.
5461 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
5462 .addReg(RotatedOldVal).addReg(Src2.getReg())
5463 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
5465 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5466 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
5467 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5468 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
5469 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5470 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5471 MBB->addSuccessor(LoopMBB);
5472 MBB->addSuccessor(DoneMBB);
5474 MI.eraseFromParent();
5478 // Implement EmitInstrWithCustomInserter for pseudo
5479 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
5480 // instruction that should be used to compare the current field with the
5481 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
5482 // for when the current field should be kept. BitSize is the width of
5483 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
5484 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
5485 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
5486 unsigned KeepOldMask, unsigned BitSize) const {
5487 MachineFunction &MF = *MBB->getParent();
5488 const SystemZInstrInfo *TII =
5489 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5490 MachineRegisterInfo &MRI = MF.getRegInfo();
5491 bool IsSubWord = (BitSize < 32);
5493 // Extract the operands. Base can be a register or a frame index.
5494 unsigned Dest = MI.getOperand(0).getReg();
5495 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5496 int64_t Disp = MI.getOperand(2).getImm();
5497 unsigned Src2 = MI.getOperand(3).getReg();
5498 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
5499 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
5500 DebugLoc DL = MI.getDebugLoc();
5502 BitSize = MI.getOperand(6).getImm();
5504 // Subword operations use 32-bit registers.
5505 const TargetRegisterClass *RC = (BitSize <= 32 ?
5506 &SystemZ::GR32BitRegClass :
5507 &SystemZ::GR64BitRegClass);
5508 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
5509 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5511 // Get the right opcodes for the displacement.
5512 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
5513 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
5514 assert(LOpcode && CSOpcode && "Displacement out of range");
5516 // Create virtual registers for temporary results.
5517 unsigned OrigVal = MRI.createVirtualRegister(RC);
5518 unsigned OldVal = MRI.createVirtualRegister(RC);
5519 unsigned NewVal = MRI.createVirtualRegister(RC);
5520 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
5521 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
5522 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
5524 // Insert 3 basic blocks for the loop.
5525 MachineBasicBlock *StartMBB = MBB;
5526 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5527 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5528 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
5529 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
5533 // %OrigVal = L Disp(%Base)
5534 // # fall through to LoopMMB
5536 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
5537 .addOperand(Base).addImm(Disp).addReg(0);
5538 MBB->addSuccessor(LoopMBB);
5541 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
5542 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
5543 // CompareOpcode %RotatedOldVal, %Src2
5544 // BRC KeepOldMask, UpdateMBB
5546 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5547 .addReg(OrigVal).addMBB(StartMBB)
5548 .addReg(Dest).addMBB(UpdateMBB);
5550 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5551 .addReg(OldVal).addReg(BitShift).addImm(0);
5552 BuildMI(MBB, DL, TII->get(CompareOpcode))
5553 .addReg(RotatedOldVal).addReg(Src2);
5554 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5555 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
5556 MBB->addSuccessor(UpdateMBB);
5557 MBB->addSuccessor(UseAltMBB);
5560 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
5561 // # fall through to UpdateMMB
5564 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
5565 .addReg(RotatedOldVal).addReg(Src2)
5566 .addImm(32).addImm(31 + BitSize).addImm(0);
5567 MBB->addSuccessor(UpdateMBB);
5570 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
5571 // [ %RotatedAltVal, UseAltMBB ]
5572 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
5573 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
5575 // # fall through to DoneMMB
5577 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
5578 .addReg(RotatedOldVal).addMBB(LoopMBB)
5579 .addReg(RotatedAltVal).addMBB(UseAltMBB);
5581 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5582 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
5583 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5584 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
5585 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5586 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5587 MBB->addSuccessor(LoopMBB);
5588 MBB->addSuccessor(DoneMBB);
5590 MI.eraseFromParent();
5594 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
5597 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
5598 MachineBasicBlock *MBB) const {
5600 MachineFunction &MF = *MBB->getParent();
5601 const SystemZInstrInfo *TII =
5602 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5603 MachineRegisterInfo &MRI = MF.getRegInfo();
5605 // Extract the operands. Base can be a register or a frame index.
5606 unsigned Dest = MI.getOperand(0).getReg();
5607 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5608 int64_t Disp = MI.getOperand(2).getImm();
5609 unsigned OrigCmpVal = MI.getOperand(3).getReg();
5610 unsigned OrigSwapVal = MI.getOperand(4).getReg();
5611 unsigned BitShift = MI.getOperand(5).getReg();
5612 unsigned NegBitShift = MI.getOperand(6).getReg();
5613 int64_t BitSize = MI.getOperand(7).getImm();
5614 DebugLoc DL = MI.getDebugLoc();
5616 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
5618 // Get the right opcodes for the displacement.
5619 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
5620 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
5621 assert(LOpcode && CSOpcode && "Displacement out of range");
5623 // Create virtual registers for temporary results.
5624 unsigned OrigOldVal = MRI.createVirtualRegister(RC);
5625 unsigned OldVal = MRI.createVirtualRegister(RC);
5626 unsigned CmpVal = MRI.createVirtualRegister(RC);
5627 unsigned SwapVal = MRI.createVirtualRegister(RC);
5628 unsigned StoreVal = MRI.createVirtualRegister(RC);
5629 unsigned RetryOldVal = MRI.createVirtualRegister(RC);
5630 unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
5631 unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
5633 // Insert 2 basic blocks for the loop.
5634 MachineBasicBlock *StartMBB = MBB;
5635 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5636 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5637 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
5641 // %OrigOldVal = L Disp(%Base)
5642 // # fall through to LoopMMB
5644 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
5645 .addOperand(Base).addImm(Disp).addReg(0);
5646 MBB->addSuccessor(LoopMBB);
5649 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
5650 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
5651 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
5652 // %Dest = RLL %OldVal, BitSize(%BitShift)
5653 // ^^ The low BitSize bits contain the field
5655 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
5656 // ^^ Replace the upper 32-BitSize bits of the
5657 // comparison value with those that we loaded,
5658 // so that we can use a full word comparison.
5659 // CR %Dest, %RetryCmpVal
5661 // # Fall through to SetMBB
5663 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5664 .addReg(OrigOldVal).addMBB(StartMBB)
5665 .addReg(RetryOldVal).addMBB(SetMBB);
5666 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
5667 .addReg(OrigCmpVal).addMBB(StartMBB)
5668 .addReg(RetryCmpVal).addMBB(SetMBB);
5669 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
5670 .addReg(OrigSwapVal).addMBB(StartMBB)
5671 .addReg(RetrySwapVal).addMBB(SetMBB);
5672 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
5673 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
5674 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
5675 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
5676 BuildMI(MBB, DL, TII->get(SystemZ::CR))
5677 .addReg(Dest).addReg(RetryCmpVal);
5678 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5679 .addImm(SystemZ::CCMASK_ICMP)
5680 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
5681 MBB->addSuccessor(DoneMBB);
5682 MBB->addSuccessor(SetMBB);
5685 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
5686 // ^^ Replace the upper 32-BitSize bits of the new
5687 // value with those that we loaded.
5688 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
5689 // ^^ Rotate the new field to its proper position.
5690 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
5692 // # fall through to ExitMMB
5694 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
5695 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
5696 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
5697 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
5698 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
5699 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
5700 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5701 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5702 MBB->addSuccessor(LoopMBB);
5703 MBB->addSuccessor(DoneMBB);
5705 MI.eraseFromParent();
5709 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
5710 // if the high register of the GR128 value must be cleared or false if
5711 // it's "don't care". SubReg is subreg_l32 when extending a GR32
5712 // and subreg_l64 when extending a GR64.
5713 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
5714 MachineBasicBlock *MBB,
5716 unsigned SubReg) const {
5717 MachineFunction &MF = *MBB->getParent();
5718 const SystemZInstrInfo *TII =
5719 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5720 MachineRegisterInfo &MRI = MF.getRegInfo();
5721 DebugLoc DL = MI.getDebugLoc();
5723 unsigned Dest = MI.getOperand(0).getReg();
5724 unsigned Src = MI.getOperand(1).getReg();
5725 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
5727 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
5729 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
5730 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
5732 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
5734 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
5735 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
5738 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
5739 .addReg(In128).addReg(Src).addImm(SubReg);
5741 MI.eraseFromParent();
5745 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
5746 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
5747 MachineFunction &MF = *MBB->getParent();
5748 const SystemZInstrInfo *TII =
5749 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5750 MachineRegisterInfo &MRI = MF.getRegInfo();
5751 DebugLoc DL = MI.getDebugLoc();
5753 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
5754 uint64_t DestDisp = MI.getOperand(1).getImm();
5755 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
5756 uint64_t SrcDisp = MI.getOperand(3).getImm();
5757 uint64_t Length = MI.getOperand(4).getImm();
5759 // When generating more than one CLC, all but the last will need to
5760 // branch to the end when a difference is found.
5761 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
5762 splitBlockAfter(MI, MBB) : nullptr);
5764 // Check for the loop form, in which operand 5 is the trip count.
5765 if (MI.getNumExplicitOperands() > 5) {
5766 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
5768 uint64_t StartCountReg = MI.getOperand(5).getReg();
5769 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
5770 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
5771 forceReg(MI, DestBase, TII));
5773 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
5774 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
5775 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
5776 MRI.createVirtualRegister(RC));
5777 uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
5778 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
5779 MRI.createVirtualRegister(RC));
5781 RC = &SystemZ::GR64BitRegClass;
5782 uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
5783 uint64_t NextCountReg = MRI.createVirtualRegister(RC);
5785 MachineBasicBlock *StartMBB = MBB;
5786 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5787 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5788 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
5791 // # fall through to LoopMMB
5792 MBB->addSuccessor(LoopMBB);
5795 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
5796 // [ %NextDestReg, NextMBB ]
5797 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
5798 // [ %NextSrcReg, NextMBB ]
5799 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
5800 // [ %NextCountReg, NextMBB ]
5801 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
5802 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
5805 // The prefetch is used only for MVC. The JLH is used only for CLC.
5808 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
5809 .addReg(StartDestReg).addMBB(StartMBB)
5810 .addReg(NextDestReg).addMBB(NextMBB);
5811 if (!HaveSingleBase)
5812 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
5813 .addReg(StartSrcReg).addMBB(StartMBB)
5814 .addReg(NextSrcReg).addMBB(NextMBB);
5815 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
5816 .addReg(StartCountReg).addMBB(StartMBB)
5817 .addReg(NextCountReg).addMBB(NextMBB);
5818 if (Opcode == SystemZ::MVC)
5819 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
5820 .addImm(SystemZ::PFD_WRITE)
5821 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
5822 BuildMI(MBB, DL, TII->get(Opcode))
5823 .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
5824 .addReg(ThisSrcReg).addImm(SrcDisp);
5826 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5827 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5829 MBB->addSuccessor(EndMBB);
5830 MBB->addSuccessor(NextMBB);
5834 // %NextDestReg = LA 256(%ThisDestReg)
5835 // %NextSrcReg = LA 256(%ThisSrcReg)
5836 // %NextCountReg = AGHI %ThisCountReg, -1
5837 // CGHI %NextCountReg, 0
5839 // # fall through to DoneMMB
5841 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
5844 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
5845 .addReg(ThisDestReg).addImm(256).addReg(0);
5846 if (!HaveSingleBase)
5847 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
5848 .addReg(ThisSrcReg).addImm(256).addReg(0);
5849 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
5850 .addReg(ThisCountReg).addImm(-1);
5851 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
5852 .addReg(NextCountReg).addImm(0);
5853 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5854 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5856 MBB->addSuccessor(LoopMBB);
5857 MBB->addSuccessor(DoneMBB);
5859 DestBase = MachineOperand::CreateReg(NextDestReg, false);
5860 SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
5864 // Handle any remaining bytes with straight-line code.
5865 while (Length > 0) {
5866 uint64_t ThisLength = std::min(Length, uint64_t(256));
5867 // The previous iteration might have created out-of-range displacements.
5868 // Apply them using LAY if so.
5869 if (!isUInt<12>(DestDisp)) {
5870 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5871 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
5872 .addOperand(DestBase)
5875 DestBase = MachineOperand::CreateReg(Reg, false);
5878 if (!isUInt<12>(SrcDisp)) {
5879 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5880 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
5881 .addOperand(SrcBase)
5884 SrcBase = MachineOperand::CreateReg(Reg, false);
5887 BuildMI(*MBB, MI, DL, TII->get(Opcode))
5888 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
5889 .addOperand(SrcBase).addImm(SrcDisp);
5890 DestDisp += ThisLength;
5891 SrcDisp += ThisLength;
5892 Length -= ThisLength;
5893 // If there's another CLC to go, branch to the end if a difference
5895 if (EndMBB && Length > 0) {
5896 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
5897 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5898 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5900 MBB->addSuccessor(EndMBB);
5901 MBB->addSuccessor(NextMBB);
5906 MBB->addSuccessor(EndMBB);
5908 MBB->addLiveIn(SystemZ::CC);
5911 MI.eraseFromParent();
5915 // Decompose string pseudo-instruction MI into a loop that continually performs
5916 // Opcode until CC != 3.
5917 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
5918 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
5919 MachineFunction &MF = *MBB->getParent();
5920 const SystemZInstrInfo *TII =
5921 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5922 MachineRegisterInfo &MRI = MF.getRegInfo();
5923 DebugLoc DL = MI.getDebugLoc();
5925 uint64_t End1Reg = MI.getOperand(0).getReg();
5926 uint64_t Start1Reg = MI.getOperand(1).getReg();
5927 uint64_t Start2Reg = MI.getOperand(2).getReg();
5928 uint64_t CharReg = MI.getOperand(3).getReg();
5930 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
5931 uint64_t This1Reg = MRI.createVirtualRegister(RC);
5932 uint64_t This2Reg = MRI.createVirtualRegister(RC);
5933 uint64_t End2Reg = MRI.createVirtualRegister(RC);
5935 MachineBasicBlock *StartMBB = MBB;
5936 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5937 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5940 // # fall through to LoopMMB
5941 MBB->addSuccessor(LoopMBB);
5944 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
5945 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
5947 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
5949 // # fall through to DoneMMB
5951 // The load of R0L can be hoisted by post-RA LICM.
5954 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
5955 .addReg(Start1Reg).addMBB(StartMBB)
5956 .addReg(End1Reg).addMBB(LoopMBB);
5957 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
5958 .addReg(Start2Reg).addMBB(StartMBB)
5959 .addReg(End2Reg).addMBB(LoopMBB);
5960 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
5961 BuildMI(MBB, DL, TII->get(Opcode))
5962 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
5963 .addReg(This1Reg).addReg(This2Reg);
5964 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5965 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
5966 MBB->addSuccessor(LoopMBB);
5967 MBB->addSuccessor(DoneMBB);
5969 DoneMBB->addLiveIn(SystemZ::CC);
5971 MI.eraseFromParent();
5975 // Update TBEGIN instruction with final opcode and register clobbers.
5976 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
5977 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
5978 bool NoFloat) const {
5979 MachineFunction &MF = *MBB->getParent();
5980 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
5981 const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
5984 MI.setDesc(TII->get(Opcode));
5986 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
5987 // Make sure to add the corresponding GRSM bits if they are missing.
5988 uint64_t Control = MI.getOperand(2).getImm();
5989 static const unsigned GPRControlBit[16] = {
5990 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
5991 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
5993 Control |= GPRControlBit[15];
5995 Control |= GPRControlBit[11];
5996 MI.getOperand(2).setImm(Control);
5998 // Add GPR clobbers.
5999 for (int I = 0; I < 16; I++) {
6000 if ((Control & GPRControlBit[I]) == 0) {
6001 unsigned Reg = SystemZMC::GR64Regs[I];
6002 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6006 // Add FPR/VR clobbers.
6007 if (!NoFloat && (Control & 4) != 0) {
6008 if (Subtarget.hasVector()) {
6009 for (int I = 0; I < 32; I++) {
6010 unsigned Reg = SystemZMC::VR128Regs[I];
6011 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6014 for (int I = 0; I < 16; I++) {
6015 unsigned Reg = SystemZMC::FP64Regs[I];
6016 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6024 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
6025 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
6026 MachineFunction &MF = *MBB->getParent();
6027 MachineRegisterInfo *MRI = &MF.getRegInfo();
6028 const SystemZInstrInfo *TII =
6029 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
6030 DebugLoc DL = MI.getDebugLoc();
6032 unsigned SrcReg = MI.getOperand(0).getReg();
6034 // Create new virtual register of the same class as source.
6035 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
6036 unsigned DstReg = MRI->createVirtualRegister(RC);
6038 // Replace pseudo with a normal load-and-test that models the def as
6040 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
6042 MI.eraseFromParent();
6047 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
6048 MachineInstr &MI, MachineBasicBlock *MBB) const {
6049 switch (MI.getOpcode()) {
6050 case SystemZ::Select32Mux:
6051 return emitSelect(MI, MBB,
6052 Subtarget.hasLoadStoreOnCond2()? SystemZ::LOCRMux : 0);
6053 case SystemZ::Select32:
6054 return emitSelect(MI, MBB, SystemZ::LOCR);
6055 case SystemZ::Select64:
6056 return emitSelect(MI, MBB, SystemZ::LOCGR);
6057 case SystemZ::SelectF32:
6058 case SystemZ::SelectF64:
6059 case SystemZ::SelectF128:
6060 return emitSelect(MI, MBB, 0);
6062 case SystemZ::CondStore8Mux:
6063 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
6064 case SystemZ::CondStore8MuxInv:
6065 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
6066 case SystemZ::CondStore16Mux:
6067 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
6068 case SystemZ::CondStore16MuxInv:
6069 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
6070 case SystemZ::CondStore32Mux:
6071 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false);
6072 case SystemZ::CondStore32MuxInv:
6073 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true);
6074 case SystemZ::CondStore8:
6075 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
6076 case SystemZ::CondStore8Inv:
6077 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
6078 case SystemZ::CondStore16:
6079 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
6080 case SystemZ::CondStore16Inv:
6081 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
6082 case SystemZ::CondStore32:
6083 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
6084 case SystemZ::CondStore32Inv:
6085 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
6086 case SystemZ::CondStore64:
6087 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
6088 case SystemZ::CondStore64Inv:
6089 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
6090 case SystemZ::CondStoreF32:
6091 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
6092 case SystemZ::CondStoreF32Inv:
6093 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
6094 case SystemZ::CondStoreF64:
6095 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
6096 case SystemZ::CondStoreF64Inv:
6097 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
6099 case SystemZ::AEXT128_64:
6100 return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
6101 case SystemZ::ZEXT128_32:
6102 return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
6103 case SystemZ::ZEXT128_64:
6104 return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
6106 case SystemZ::ATOMIC_SWAPW:
6107 return emitAtomicLoadBinary(MI, MBB, 0, 0);
6108 case SystemZ::ATOMIC_SWAP_32:
6109 return emitAtomicLoadBinary(MI, MBB, 0, 32);
6110 case SystemZ::ATOMIC_SWAP_64:
6111 return emitAtomicLoadBinary(MI, MBB, 0, 64);
6113 case SystemZ::ATOMIC_LOADW_AR:
6114 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
6115 case SystemZ::ATOMIC_LOADW_AFI:
6116 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
6117 case SystemZ::ATOMIC_LOAD_AR:
6118 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
6119 case SystemZ::ATOMIC_LOAD_AHI:
6120 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
6121 case SystemZ::ATOMIC_LOAD_AFI:
6122 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
6123 case SystemZ::ATOMIC_LOAD_AGR:
6124 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
6125 case SystemZ::ATOMIC_LOAD_AGHI:
6126 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
6127 case SystemZ::ATOMIC_LOAD_AGFI:
6128 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
6130 case SystemZ::ATOMIC_LOADW_SR:
6131 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
6132 case SystemZ::ATOMIC_LOAD_SR:
6133 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
6134 case SystemZ::ATOMIC_LOAD_SGR:
6135 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
6137 case SystemZ::ATOMIC_LOADW_NR:
6138 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
6139 case SystemZ::ATOMIC_LOADW_NILH:
6140 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
6141 case SystemZ::ATOMIC_LOAD_NR:
6142 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
6143 case SystemZ::ATOMIC_LOAD_NILL:
6144 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
6145 case SystemZ::ATOMIC_LOAD_NILH:
6146 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
6147 case SystemZ::ATOMIC_LOAD_NILF:
6148 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
6149 case SystemZ::ATOMIC_LOAD_NGR:
6150 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
6151 case SystemZ::ATOMIC_LOAD_NILL64:
6152 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
6153 case SystemZ::ATOMIC_LOAD_NILH64:
6154 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
6155 case SystemZ::ATOMIC_LOAD_NIHL64:
6156 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
6157 case SystemZ::ATOMIC_LOAD_NIHH64:
6158 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
6159 case SystemZ::ATOMIC_LOAD_NILF64:
6160 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
6161 case SystemZ::ATOMIC_LOAD_NIHF64:
6162 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
6164 case SystemZ::ATOMIC_LOADW_OR:
6165 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
6166 case SystemZ::ATOMIC_LOADW_OILH:
6167 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
6168 case SystemZ::ATOMIC_LOAD_OR:
6169 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
6170 case SystemZ::ATOMIC_LOAD_OILL:
6171 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
6172 case SystemZ::ATOMIC_LOAD_OILH:
6173 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
6174 case SystemZ::ATOMIC_LOAD_OILF:
6175 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
6176 case SystemZ::ATOMIC_LOAD_OGR:
6177 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
6178 case SystemZ::ATOMIC_LOAD_OILL64:
6179 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
6180 case SystemZ::ATOMIC_LOAD_OILH64:
6181 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
6182 case SystemZ::ATOMIC_LOAD_OIHL64:
6183 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
6184 case SystemZ::ATOMIC_LOAD_OIHH64:
6185 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
6186 case SystemZ::ATOMIC_LOAD_OILF64:
6187 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
6188 case SystemZ::ATOMIC_LOAD_OIHF64:
6189 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
6191 case SystemZ::ATOMIC_LOADW_XR:
6192 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
6193 case SystemZ::ATOMIC_LOADW_XILF:
6194 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
6195 case SystemZ::ATOMIC_LOAD_XR:
6196 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
6197 case SystemZ::ATOMIC_LOAD_XILF:
6198 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
6199 case SystemZ::ATOMIC_LOAD_XGR:
6200 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
6201 case SystemZ::ATOMIC_LOAD_XILF64:
6202 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
6203 case SystemZ::ATOMIC_LOAD_XIHF64:
6204 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
6206 case SystemZ::ATOMIC_LOADW_NRi:
6207 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
6208 case SystemZ::ATOMIC_LOADW_NILHi:
6209 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
6210 case SystemZ::ATOMIC_LOAD_NRi:
6211 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
6212 case SystemZ::ATOMIC_LOAD_NILLi:
6213 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
6214 case SystemZ::ATOMIC_LOAD_NILHi:
6215 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
6216 case SystemZ::ATOMIC_LOAD_NILFi:
6217 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
6218 case SystemZ::ATOMIC_LOAD_NGRi:
6219 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
6220 case SystemZ::ATOMIC_LOAD_NILL64i:
6221 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
6222 case SystemZ::ATOMIC_LOAD_NILH64i:
6223 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
6224 case SystemZ::ATOMIC_LOAD_NIHL64i:
6225 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
6226 case SystemZ::ATOMIC_LOAD_NIHH64i:
6227 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
6228 case SystemZ::ATOMIC_LOAD_NILF64i:
6229 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
6230 case SystemZ::ATOMIC_LOAD_NIHF64i:
6231 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
6233 case SystemZ::ATOMIC_LOADW_MIN:
6234 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6235 SystemZ::CCMASK_CMP_LE, 0);
6236 case SystemZ::ATOMIC_LOAD_MIN_32:
6237 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6238 SystemZ::CCMASK_CMP_LE, 32);
6239 case SystemZ::ATOMIC_LOAD_MIN_64:
6240 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6241 SystemZ::CCMASK_CMP_LE, 64);
6243 case SystemZ::ATOMIC_LOADW_MAX:
6244 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6245 SystemZ::CCMASK_CMP_GE, 0);
6246 case SystemZ::ATOMIC_LOAD_MAX_32:
6247 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6248 SystemZ::CCMASK_CMP_GE, 32);
6249 case SystemZ::ATOMIC_LOAD_MAX_64:
6250 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6251 SystemZ::CCMASK_CMP_GE, 64);
6253 case SystemZ::ATOMIC_LOADW_UMIN:
6254 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6255 SystemZ::CCMASK_CMP_LE, 0);
6256 case SystemZ::ATOMIC_LOAD_UMIN_32:
6257 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6258 SystemZ::CCMASK_CMP_LE, 32);
6259 case SystemZ::ATOMIC_LOAD_UMIN_64:
6260 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6261 SystemZ::CCMASK_CMP_LE, 64);
6263 case SystemZ::ATOMIC_LOADW_UMAX:
6264 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6265 SystemZ::CCMASK_CMP_GE, 0);
6266 case SystemZ::ATOMIC_LOAD_UMAX_32:
6267 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6268 SystemZ::CCMASK_CMP_GE, 32);
6269 case SystemZ::ATOMIC_LOAD_UMAX_64:
6270 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6271 SystemZ::CCMASK_CMP_GE, 64);
6273 case SystemZ::ATOMIC_CMP_SWAPW:
6274 return emitAtomicCmpSwapW(MI, MBB);
6275 case SystemZ::MVCSequence:
6276 case SystemZ::MVCLoop:
6277 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
6278 case SystemZ::NCSequence:
6279 case SystemZ::NCLoop:
6280 return emitMemMemWrapper(MI, MBB, SystemZ::NC);
6281 case SystemZ::OCSequence:
6282 case SystemZ::OCLoop:
6283 return emitMemMemWrapper(MI, MBB, SystemZ::OC);
6284 case SystemZ::XCSequence:
6285 case SystemZ::XCLoop:
6286 return emitMemMemWrapper(MI, MBB, SystemZ::XC);
6287 case SystemZ::CLCSequence:
6288 case SystemZ::CLCLoop:
6289 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
6290 case SystemZ::CLSTLoop:
6291 return emitStringWrapper(MI, MBB, SystemZ::CLST);
6292 case SystemZ::MVSTLoop:
6293 return emitStringWrapper(MI, MBB, SystemZ::MVST);
6294 case SystemZ::SRSTLoop:
6295 return emitStringWrapper(MI, MBB, SystemZ::SRST);
6296 case SystemZ::TBEGIN:
6297 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
6298 case SystemZ::TBEGIN_nofloat:
6299 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
6300 case SystemZ::TBEGINC:
6301 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
6302 case SystemZ::LTEBRCompare_VecPseudo:
6303 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
6304 case SystemZ::LTDBRCompare_VecPseudo:
6305 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
6306 case SystemZ::LTXBRCompare_VecPseudo:
6307 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
6310 llvm_unreachable("Unexpected instr type to insert");