1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SystemZTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZISelLowering.h"
15 #include "SystemZCallingConv.h"
16 #include "SystemZConstantPoolValue.h"
17 #include "SystemZMachineFunctionInfo.h"
18 #include "SystemZTargetMachine.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/KnownBits.h"
30 #define DEBUG_TYPE "systemz-lower"
33 // Represents a sequence for extracting a 0/1 value from an IPM result:
34 // (((X ^ XORValue) + AddValue) >> Bit)
35 struct IPMConversion {
36 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
37 : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
44 // Represents information about a comparison.
46 Comparison(SDValue Op0In, SDValue Op1In)
47 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
49 // The operands to the comparison.
52 // The opcode that should be used to compare Op0 and Op1.
55 // A SystemZICMP value. Only used for integer comparisons.
58 // The mask of CC values that Opcode can produce.
61 // The mask of CC values for which the original condition is true.
64 } // end anonymous namespace
66 // Classify VT as either 32 or 64 bit.
67 static bool is32Bit(EVT VT) {
68 switch (VT.getSimpleVT().SimpleTy) {
74 llvm_unreachable("Unsupported type");
78 // Return a version of MachineOperand that can be safely used before the
80 static MachineOperand earlyUseOperand(MachineOperand Op) {
86 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
87 const SystemZSubtarget &STI)
88 : TargetLowering(TM), Subtarget(STI) {
89 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
91 // Set up the register classes.
92 if (Subtarget.hasHighWord())
93 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
95 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
96 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
97 if (Subtarget.hasVector()) {
98 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
99 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
101 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
102 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
104 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
106 if (Subtarget.hasVector()) {
107 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
108 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
109 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
110 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
111 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
112 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
115 // Compute derived properties from the register classes
116 computeRegisterProperties(Subtarget.getRegisterInfo());
118 // Set up special registers.
119 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
121 // TODO: It may be better to default to latency-oriented scheduling, however
122 // LLVM's current latency-oriented scheduler can't handle physreg definitions
123 // such as SystemZ has with CC, so set this to the register-pressure
124 // scheduler, because it can.
125 setSchedulingPreference(Sched::RegPressure);
127 setBooleanContents(ZeroOrOneBooleanContent);
128 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
130 // Instructions are strings of 2-byte aligned 2-byte values.
131 setMinFunctionAlignment(2);
133 // Handle operations that are handled in a similar way for all types.
134 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
135 I <= MVT::LAST_FP_VALUETYPE;
137 MVT VT = MVT::SimpleValueType(I);
138 if (isTypeLegal(VT)) {
139 // Lower SET_CC into an IPM-based sequence.
140 setOperationAction(ISD::SETCC, VT, Custom);
142 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
143 setOperationAction(ISD::SELECT, VT, Expand);
145 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
146 setOperationAction(ISD::SELECT_CC, VT, Custom);
147 setOperationAction(ISD::BR_CC, VT, Custom);
151 // Expand jump table branches as address arithmetic followed by an
153 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
155 // Expand BRCOND into a BR_CC (see above).
156 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
158 // Handle integer types.
159 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
160 I <= MVT::LAST_INTEGER_VALUETYPE;
162 MVT VT = MVT::SimpleValueType(I);
163 if (isTypeLegal(VT)) {
164 // Expand individual DIV and REMs into DIVREMs.
165 setOperationAction(ISD::SDIV, VT, Expand);
166 setOperationAction(ISD::UDIV, VT, Expand);
167 setOperationAction(ISD::SREM, VT, Expand);
168 setOperationAction(ISD::UREM, VT, Expand);
169 setOperationAction(ISD::SDIVREM, VT, Custom);
170 setOperationAction(ISD::UDIVREM, VT, Custom);
172 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
173 // stores, putting a serialization instruction after the stores.
174 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
175 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
177 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
178 // available, or if the operand is constant.
179 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
181 // Use POPCNT on z196 and above.
182 if (Subtarget.hasPopulationCount())
183 setOperationAction(ISD::CTPOP, VT, Custom);
185 setOperationAction(ISD::CTPOP, VT, Expand);
187 // No special instructions for these.
188 setOperationAction(ISD::CTTZ, VT, Expand);
189 setOperationAction(ISD::ROTR, VT, Expand);
191 // Use *MUL_LOHI where possible instead of MULH*.
192 setOperationAction(ISD::MULHS, VT, Expand);
193 setOperationAction(ISD::MULHU, VT, Expand);
194 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
195 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
197 // Only z196 and above have native support for conversions to unsigned.
198 // On z10, promoting to i64 doesn't generate an inexact condition for
199 // values that are outside the i32 range but in the i64 range, so use
200 // the default expansion.
201 if (!Subtarget.hasFPExtension())
202 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
206 // Type legalization will convert 8- and 16-bit atomic operations into
207 // forms that operate on i32s (but still keeping the original memory VT).
208 // Lower them into full i32 operations.
209 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
210 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
211 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
212 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
213 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
214 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
215 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
216 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
217 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
218 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
219 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
220 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
222 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
224 // Traps are legal, as we will convert them to "j .+2".
225 setOperationAction(ISD::TRAP, MVT::Other, Legal);
227 // z10 has instructions for signed but not unsigned FP conversion.
228 // Handle unsigned 32-bit types as signed 64-bit types.
229 if (!Subtarget.hasFPExtension()) {
230 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
231 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
234 // We have native support for a 64-bit CTLZ, via FLOGR.
235 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
236 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
238 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
239 setOperationAction(ISD::OR, MVT::i64, Custom);
241 // FIXME: Can we support these natively?
242 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
243 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
244 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
246 // We have native instructions for i8, i16 and i32 extensions, but not i1.
247 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
248 for (MVT VT : MVT::integer_valuetypes()) {
249 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
250 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
251 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
254 // Handle the various types of symbolic address.
255 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
256 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
257 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
258 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
259 setOperationAction(ISD::JumpTable, PtrVT, Custom);
261 // We need to handle dynamic allocations specially because of the
262 // 160-byte area at the bottom of the stack.
263 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
264 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
266 // Use custom expanders so that we can force the function to use
268 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
269 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
271 // Handle prefetches with PFD or PFDRL.
272 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
274 for (MVT VT : MVT::vector_valuetypes()) {
275 // Assume by default that all vector operations need to be expanded.
276 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
277 if (getOperationAction(Opcode, VT) == Legal)
278 setOperationAction(Opcode, VT, Expand);
280 // Likewise all truncating stores and extending loads.
281 for (MVT InnerVT : MVT::vector_valuetypes()) {
282 setTruncStoreAction(VT, InnerVT, Expand);
283 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
284 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
285 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
288 if (isTypeLegal(VT)) {
289 // These operations are legal for anything that can be stored in a
290 // vector register, even if there is no native support for the format
291 // as such. In particular, we can do these for v4f32 even though there
292 // are no specific instructions for that format.
293 setOperationAction(ISD::LOAD, VT, Legal);
294 setOperationAction(ISD::STORE, VT, Legal);
295 setOperationAction(ISD::VSELECT, VT, Legal);
296 setOperationAction(ISD::BITCAST, VT, Legal);
297 setOperationAction(ISD::UNDEF, VT, Legal);
299 // Likewise, except that we need to replace the nodes with something
301 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
302 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
306 // Handle integer vector types.
307 for (MVT VT : MVT::integer_vector_valuetypes()) {
308 if (isTypeLegal(VT)) {
309 // These operations have direct equivalents.
310 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
311 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
312 setOperationAction(ISD::ADD, VT, Legal);
313 setOperationAction(ISD::SUB, VT, Legal);
314 if (VT != MVT::v2i64)
315 setOperationAction(ISD::MUL, VT, Legal);
316 setOperationAction(ISD::AND, VT, Legal);
317 setOperationAction(ISD::OR, VT, Legal);
318 setOperationAction(ISD::XOR, VT, Legal);
319 setOperationAction(ISD::CTPOP, VT, Custom);
320 setOperationAction(ISD::CTTZ, VT, Legal);
321 setOperationAction(ISD::CTLZ, VT, Legal);
323 // Convert a GPR scalar to a vector by inserting it into element 0.
324 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
326 // Use a series of unpacks for extensions.
327 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
328 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
330 // Detect shifts by a scalar amount and convert them into
332 setOperationAction(ISD::SHL, VT, Custom);
333 setOperationAction(ISD::SRA, VT, Custom);
334 setOperationAction(ISD::SRL, VT, Custom);
336 // At present ROTL isn't matched by DAGCombiner. ROTR should be
337 // converted into ROTL.
338 setOperationAction(ISD::ROTL, VT, Expand);
339 setOperationAction(ISD::ROTR, VT, Expand);
341 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
342 // and inverting the result as necessary.
343 setOperationAction(ISD::SETCC, VT, Custom);
347 if (Subtarget.hasVector()) {
348 // There should be no need to check for float types other than v2f64
349 // since <2 x f32> isn't a legal type.
350 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
351 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
352 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
353 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
354 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
355 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
356 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
357 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);
360 // Handle floating-point types.
361 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
362 I <= MVT::LAST_FP_VALUETYPE;
364 MVT VT = MVT::SimpleValueType(I);
365 if (isTypeLegal(VT)) {
366 // We can use FI for FRINT.
367 setOperationAction(ISD::FRINT, VT, Legal);
369 // We can use the extended form of FI for other rounding operations.
370 if (Subtarget.hasFPExtension()) {
371 setOperationAction(ISD::FNEARBYINT, VT, Legal);
372 setOperationAction(ISD::FFLOOR, VT, Legal);
373 setOperationAction(ISD::FCEIL, VT, Legal);
374 setOperationAction(ISD::FTRUNC, VT, Legal);
375 setOperationAction(ISD::FROUND, VT, Legal);
378 // No special instructions for these.
379 setOperationAction(ISD::FSIN, VT, Expand);
380 setOperationAction(ISD::FCOS, VT, Expand);
381 setOperationAction(ISD::FSINCOS, VT, Expand);
382 setOperationAction(ISD::FREM, VT, Expand);
383 setOperationAction(ISD::FPOW, VT, Expand);
387 // Handle floating-point vector types.
388 if (Subtarget.hasVector()) {
389 // Scalar-to-vector conversion is just a subreg.
390 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
391 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
393 // Some insertions and extractions can be done directly but others
394 // need to go via integers.
395 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
396 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
397 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
398 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
400 // These operations have direct equivalents.
401 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
402 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
403 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
404 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
405 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
406 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
407 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
408 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
409 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
410 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
411 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
412 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
413 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
414 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
417 // We have fused multiply-addition for f32 and f64 but not f128.
418 setOperationAction(ISD::FMA, MVT::f32, Legal);
419 setOperationAction(ISD::FMA, MVT::f64, Legal);
420 setOperationAction(ISD::FMA, MVT::f128, Expand);
422 // Needed so that we don't try to implement f128 constant loads using
423 // a load-and-extend of a f80 constant (in cases where the constant
424 // would fit in an f80).
425 for (MVT VT : MVT::fp_valuetypes())
426 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
428 // Floating-point truncation and stores need to be done separately.
429 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
430 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
431 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
433 // We have 64-bit FPR<->GPR moves, but need special handling for
435 if (!Subtarget.hasVector()) {
436 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
437 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
440 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
441 // structure, but VAEND is a no-op.
442 setOperationAction(ISD::VASTART, MVT::Other, Custom);
443 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
444 setOperationAction(ISD::VAEND, MVT::Other, Expand);
446 // Codes for which we want to perform some z-specific combinations.
447 setTargetDAGCombine(ISD::SIGN_EXTEND);
448 setTargetDAGCombine(ISD::STORE);
449 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
450 setTargetDAGCombine(ISD::FP_ROUND);
451 setTargetDAGCombine(ISD::BSWAP);
452 setTargetDAGCombine(ISD::SHL);
453 setTargetDAGCombine(ISD::SRA);
454 setTargetDAGCombine(ISD::SRL);
455 setTargetDAGCombine(ISD::ROTL);
457 // Handle intrinsics.
458 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
459 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
461 // We want to use MVC in preference to even a single load/store pair.
462 MaxStoresPerMemcpy = 0;
463 MaxStoresPerMemcpyOptSize = 0;
465 // The main memset sequence is a byte store followed by an MVC.
466 // Two STC or MV..I stores win over that, but the kind of fused stores
467 // generated by target-independent code don't when the byte value is
468 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
469 // than "STC;MVC". Handle the choice in target-specific code instead.
470 MaxStoresPerMemset = 0;
471 MaxStoresPerMemsetOptSize = 0;
474 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
475 LLVMContext &, EVT VT) const {
478 return VT.changeVectorElementTypeToInteger();
481 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
482 VT = VT.getScalarType();
487 switch (VT.getSimpleVT().SimpleTy) {
500 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
501 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
502 return Imm.isZero() || Imm.isNegZero();
505 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
506 // We can use CGFI or CLGFI.
507 return isInt<32>(Imm) || isUInt<32>(Imm);
510 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
511 // We can use ALGFI or SLGFI.
512 return isUInt<32>(Imm) || isUInt<32>(-Imm);
515 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
519 // Unaligned accesses should never be slower than the expanded version.
520 // We check specifically for aligned accesses in the few cases where
521 // they are required.
527 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
528 const AddrMode &AM, Type *Ty,
530 // Punt on globals for now, although they can be used in limited
531 // RELATIVE LONG cases.
535 // Require a 20-bit signed offset.
536 if (!isInt<20>(AM.BaseOffs))
539 // Indexing is OK but no scale factor can be applied.
540 return AM.Scale == 0 || AM.Scale == 1;
543 bool SystemZTargetLowering::isFoldableMemAccessOffset(Instruction *I,
544 int64_t Offset) const {
545 // This only applies to z13.
546 if (!Subtarget.hasVector())
549 // * Use LDE instead of LE/LEY to avoid partial register
550 // dependencies (LDE only supports small offsets).
551 // * Utilize the vector registers to hold floating point
552 // values (vector load / store instructions only support small
555 assert (isa<LoadInst>(I) || isa<StoreInst>(I));
556 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
557 I->getOperand(0)->getType());
558 bool IsFPAccess = MemAccessTy->isFloatingPointTy();
559 bool IsVectorAccess = MemAccessTy->isVectorTy();
561 // A store of an extracted vector element will be combined into a VSTE type
563 if (!IsVectorAccess && isa<StoreInst>(I)) {
564 Value *DataOp = I->getOperand(0);
565 if (isa<ExtractElementInst>(DataOp))
566 IsVectorAccess = true;
569 // A load which gets inserted into a vector element will be combined into a
570 // VLE type instruction.
571 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
572 User *LoadUser = *I->user_begin();
573 if (isa<InsertElementInst>(LoadUser))
574 IsVectorAccess = true;
577 if (!isUInt<12>(Offset) && (IsFPAccess || IsVectorAccess))
583 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
584 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
586 unsigned FromBits = FromType->getPrimitiveSizeInBits();
587 unsigned ToBits = ToType->getPrimitiveSizeInBits();
588 return FromBits > ToBits;
591 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
592 if (!FromVT.isInteger() || !ToVT.isInteger())
594 unsigned FromBits = FromVT.getSizeInBits();
595 unsigned ToBits = ToVT.getSizeInBits();
596 return FromBits > ToBits;
599 //===----------------------------------------------------------------------===//
600 // Inline asm support
601 //===----------------------------------------------------------------------===//
603 TargetLowering::ConstraintType
604 SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
605 if (Constraint.size() == 1) {
606 switch (Constraint[0]) {
607 case 'a': // Address register
608 case 'd': // Data register (equivalent to 'r')
609 case 'f': // Floating-point register
610 case 'h': // High-part register
611 case 'r': // General-purpose register
612 return C_RegisterClass;
614 case 'Q': // Memory with base and unsigned 12-bit displacement
615 case 'R': // Likewise, plus an index
616 case 'S': // Memory with base and signed 20-bit displacement
617 case 'T': // Likewise, plus an index
618 case 'm': // Equivalent to 'T'.
621 case 'I': // Unsigned 8-bit constant
622 case 'J': // Unsigned 12-bit constant
623 case 'K': // Signed 16-bit constant
624 case 'L': // Signed 20-bit displacement (on all targets we support)
625 case 'M': // 0x7fffffff
632 return TargetLowering::getConstraintType(Constraint);
635 TargetLowering::ConstraintWeight SystemZTargetLowering::
636 getSingleConstraintMatchWeight(AsmOperandInfo &info,
637 const char *constraint) const {
638 ConstraintWeight weight = CW_Invalid;
639 Value *CallOperandVal = info.CallOperandVal;
640 // If we don't have a value, we can't do a match,
641 // but allow it at the lowest weight.
644 Type *type = CallOperandVal->getType();
645 // Look at the constraint type.
646 switch (*constraint) {
648 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
651 case 'a': // Address register
652 case 'd': // Data register (equivalent to 'r')
653 case 'h': // High-part register
654 case 'r': // General-purpose register
655 if (CallOperandVal->getType()->isIntegerTy())
656 weight = CW_Register;
659 case 'f': // Floating-point register
660 if (type->isFloatingPointTy())
661 weight = CW_Register;
664 case 'I': // Unsigned 8-bit constant
665 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
666 if (isUInt<8>(C->getZExtValue()))
667 weight = CW_Constant;
670 case 'J': // Unsigned 12-bit constant
671 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
672 if (isUInt<12>(C->getZExtValue()))
673 weight = CW_Constant;
676 case 'K': // Signed 16-bit constant
677 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
678 if (isInt<16>(C->getSExtValue()))
679 weight = CW_Constant;
682 case 'L': // Signed 20-bit displacement (on all targets we support)
683 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
684 if (isInt<20>(C->getSExtValue()))
685 weight = CW_Constant;
688 case 'M': // 0x7fffffff
689 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
690 if (C->getZExtValue() == 0x7fffffff)
691 weight = CW_Constant;
697 // Parse a "{tNNN}" register constraint for which the register type "t"
698 // has already been verified. MC is the class associated with "t" and
699 // Map maps 0-based register numbers to LLVM register numbers.
700 static std::pair<unsigned, const TargetRegisterClass *>
701 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
702 const unsigned *Map) {
703 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
704 if (isdigit(Constraint[2])) {
707 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
708 if (!Failed && Index < 16 && Map[Index])
709 return std::make_pair(Map[Index], RC);
711 return std::make_pair(0U, nullptr);
714 std::pair<unsigned, const TargetRegisterClass *>
715 SystemZTargetLowering::getRegForInlineAsmConstraint(
716 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
717 if (Constraint.size() == 1) {
718 // GCC Constraint Letters
719 switch (Constraint[0]) {
721 case 'd': // Data register (equivalent to 'r')
722 case 'r': // General-purpose register
724 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
725 else if (VT == MVT::i128)
726 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
727 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
729 case 'a': // Address register
731 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
732 else if (VT == MVT::i128)
733 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
734 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
736 case 'h': // High-part register (an LLVM extension)
737 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
739 case 'f': // Floating-point register
741 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
742 else if (VT == MVT::f128)
743 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
744 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
747 if (Constraint.size() > 0 && Constraint[0] == '{') {
748 // We need to override the default register parsing for GPRs and FPRs
749 // because the interpretation depends on VT. The internal names of
750 // the registers are also different from the external names
751 // (F0D and F0S instead of F0, etc.).
752 if (Constraint[1] == 'r') {
754 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
755 SystemZMC::GR32Regs);
757 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
758 SystemZMC::GR128Regs);
759 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
760 SystemZMC::GR64Regs);
762 if (Constraint[1] == 'f') {
764 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
765 SystemZMC::FP32Regs);
767 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
768 SystemZMC::FP128Regs);
769 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
770 SystemZMC::FP64Regs);
773 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
776 void SystemZTargetLowering::
777 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
778 std::vector<SDValue> &Ops,
779 SelectionDAG &DAG) const {
780 // Only support length 1 constraints for now.
781 if (Constraint.length() == 1) {
782 switch (Constraint[0]) {
783 case 'I': // Unsigned 8-bit constant
784 if (auto *C = dyn_cast<ConstantSDNode>(Op))
785 if (isUInt<8>(C->getZExtValue()))
786 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
790 case 'J': // Unsigned 12-bit constant
791 if (auto *C = dyn_cast<ConstantSDNode>(Op))
792 if (isUInt<12>(C->getZExtValue()))
793 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
797 case 'K': // Signed 16-bit constant
798 if (auto *C = dyn_cast<ConstantSDNode>(Op))
799 if (isInt<16>(C->getSExtValue()))
800 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
804 case 'L': // Signed 20-bit displacement (on all targets we support)
805 if (auto *C = dyn_cast<ConstantSDNode>(Op))
806 if (isInt<20>(C->getSExtValue()))
807 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
811 case 'M': // 0x7fffffff
812 if (auto *C = dyn_cast<ConstantSDNode>(Op))
813 if (C->getZExtValue() == 0x7fffffff)
814 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
819 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
822 //===----------------------------------------------------------------------===//
823 // Calling conventions
824 //===----------------------------------------------------------------------===//
826 #include "SystemZGenCallingConv.inc"
828 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
829 Type *ToType) const {
830 return isTruncateFree(FromType, ToType);
833 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
834 return CI->isTailCall();
837 // We do not yet support 128-bit single-element vector types. If the user
838 // attempts to use such types as function argument or return type, prefer
839 // to error out instead of emitting code violating the ABI.
840 static void VerifyVectorType(MVT VT, EVT ArgVT) {
841 if (ArgVT.isVector() && !VT.isVector())
842 report_fatal_error("Unsupported vector argument or return type");
845 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
846 for (unsigned i = 0; i < Ins.size(); ++i)
847 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
850 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
851 for (unsigned i = 0; i < Outs.size(); ++i)
852 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
855 // Value is a value that has been passed to us in the location described by VA
856 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
857 // any loads onto Chain.
858 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
859 CCValAssign &VA, SDValue Chain,
861 // If the argument has been promoted from a smaller type, insert an
862 // assertion to capture this.
863 if (VA.getLocInfo() == CCValAssign::SExt)
864 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
865 DAG.getValueType(VA.getValVT()));
866 else if (VA.getLocInfo() == CCValAssign::ZExt)
867 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
868 DAG.getValueType(VA.getValVT()));
871 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
872 else if (VA.getLocInfo() == CCValAssign::BCvt) {
873 // If this is a short vector argument loaded from the stack,
874 // extend from i64 to full vector size and then bitcast.
875 assert(VA.getLocVT() == MVT::i64);
876 assert(VA.getValVT().isVector());
877 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
878 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
880 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
884 // Value is a value of type VA.getValVT() that we need to copy into
885 // the location described by VA. Return a copy of Value converted to
886 // VA.getValVT(). The caller is responsible for handling indirect values.
887 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
888 CCValAssign &VA, SDValue Value) {
889 switch (VA.getLocInfo()) {
890 case CCValAssign::SExt:
891 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
892 case CCValAssign::ZExt:
893 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
894 case CCValAssign::AExt:
895 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
896 case CCValAssign::BCvt:
897 // If this is a short vector argument to be stored to the stack,
898 // bitcast to v2i64 and then extract first element.
899 assert(VA.getLocVT() == MVT::i64);
900 assert(VA.getValVT().isVector());
901 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
902 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
903 DAG.getConstant(0, DL, MVT::i32));
904 case CCValAssign::Full:
907 llvm_unreachable("Unhandled getLocInfo()");
911 SDValue SystemZTargetLowering::LowerFormalArguments(
912 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
913 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
914 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
915 MachineFunction &MF = DAG.getMachineFunction();
916 MachineFrameInfo &MFI = MF.getFrameInfo();
917 MachineRegisterInfo &MRI = MF.getRegInfo();
918 SystemZMachineFunctionInfo *FuncInfo =
919 MF.getInfo<SystemZMachineFunctionInfo>();
921 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
922 EVT PtrVT = getPointerTy(DAG.getDataLayout());
924 // Detect unsupported vector argument types.
925 if (Subtarget.hasVector())
926 VerifyVectorTypes(Ins);
928 // Assign locations to all of the incoming arguments.
929 SmallVector<CCValAssign, 16> ArgLocs;
930 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
931 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
933 unsigned NumFixedGPRs = 0;
934 unsigned NumFixedFPRs = 0;
935 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
937 CCValAssign &VA = ArgLocs[I];
938 EVT LocVT = VA.getLocVT();
940 // Arguments passed in registers
941 const TargetRegisterClass *RC;
942 switch (LocVT.getSimpleVT().SimpleTy) {
944 // Integers smaller than i64 should be promoted to i64.
945 llvm_unreachable("Unexpected argument type");
948 RC = &SystemZ::GR32BitRegClass;
952 RC = &SystemZ::GR64BitRegClass;
956 RC = &SystemZ::FP32BitRegClass;
960 RC = &SystemZ::FP64BitRegClass;
968 RC = &SystemZ::VR128BitRegClass;
972 unsigned VReg = MRI.createVirtualRegister(RC);
973 MRI.addLiveIn(VA.getLocReg(), VReg);
974 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
976 assert(VA.isMemLoc() && "Argument not register or memory");
978 // Create the frame index object for this incoming parameter.
979 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
980 VA.getLocMemOffset(), true);
982 // Create the SelectionDAG nodes corresponding to a load
983 // from this parameter. Unpromoted ints and floats are
984 // passed as right-justified 8-byte values.
985 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
986 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
987 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
988 DAG.getIntPtrConstant(4, DL));
989 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
990 MachinePointerInfo::getFixedStack(MF, FI));
993 // Convert the value of the argument register into the value that's
995 if (VA.getLocInfo() == CCValAssign::Indirect) {
996 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
997 MachinePointerInfo()));
998 // If the original argument was split (e.g. i128), we need
999 // to load all parts of it here (using the same address).
1000 unsigned ArgIndex = Ins[I].OrigArgIndex;
1001 assert (Ins[I].PartOffset == 0);
1002 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
1003 CCValAssign &PartVA = ArgLocs[I + 1];
1004 unsigned PartOffset = Ins[I + 1].PartOffset;
1005 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1006 DAG.getIntPtrConstant(PartOffset, DL));
1007 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1008 MachinePointerInfo()));
1012 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1016 // Save the number of non-varargs registers for later use by va_start, etc.
1017 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1018 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1020 // Likewise the address (in the form of a frame index) of where the
1021 // first stack vararg would be. The 1-byte size here is arbitrary.
1022 int64_t StackSize = CCInfo.getNextStackOffset();
1023 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1025 // ...and a similar frame index for the caller-allocated save area
1026 // that will be used to store the incoming registers.
1027 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1028 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1029 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1031 // Store the FPR varargs in the reserved frame slots. (We store the
1032 // GPRs as part of the prologue.)
1033 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
1034 SDValue MemOps[SystemZ::NumArgFPRs];
1035 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1036 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
1037 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
1038 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1039 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1040 &SystemZ::FP64BitRegClass);
1041 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1042 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1043 MachinePointerInfo::getFixedStack(MF, FI));
1045 // Join the stores, which are independent of one another.
1046 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1047 makeArrayRef(&MemOps[NumFixedFPRs],
1048 SystemZ::NumArgFPRs-NumFixedFPRs));
1055 static bool canUseSiblingCall(const CCState &ArgCCInfo,
1056 SmallVectorImpl<CCValAssign> &ArgLocs,
1057 SmallVectorImpl<ISD::OutputArg> &Outs) {
1058 // Punt if there are any indirect or stack arguments, or if the call
1059 // needs the callee-saved argument register R6, or if the call uses
1060 // the callee-saved register arguments SwiftSelf and SwiftError.
1061 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1062 CCValAssign &VA = ArgLocs[I];
1063 if (VA.getLocInfo() == CCValAssign::Indirect)
1067 unsigned Reg = VA.getLocReg();
1068 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1070 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1077 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
1078 SmallVectorImpl<SDValue> &InVals) const {
1079 SelectionDAG &DAG = CLI.DAG;
1081 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1082 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1083 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1084 SDValue Chain = CLI.Chain;
1085 SDValue Callee = CLI.Callee;
1086 bool &IsTailCall = CLI.IsTailCall;
1087 CallingConv::ID CallConv = CLI.CallConv;
1088 bool IsVarArg = CLI.IsVarArg;
1089 MachineFunction &MF = DAG.getMachineFunction();
1090 EVT PtrVT = getPointerTy(MF.getDataLayout());
1092 // Detect unsupported vector argument and return types.
1093 if (Subtarget.hasVector()) {
1094 VerifyVectorTypes(Outs);
1095 VerifyVectorTypes(Ins);
1098 // Analyze the operands of the call, assigning locations to each operand.
1099 SmallVector<CCValAssign, 16> ArgLocs;
1100 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1101 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1103 // We don't support GuaranteedTailCallOpt, only automatically-detected
1105 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
1108 // Get a count of how many bytes are to be pushed on the stack.
1109 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1111 // Mark the start of the call.
1113 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1115 // Copy argument values to their designated locations.
1116 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
1117 SmallVector<SDValue, 8> MemOpChains;
1119 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1120 CCValAssign &VA = ArgLocs[I];
1121 SDValue ArgValue = OutVals[I];
1123 if (VA.getLocInfo() == CCValAssign::Indirect) {
1124 // Store the argument in a stack slot and pass its address.
1125 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1126 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1127 MemOpChains.push_back(
1128 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1129 MachinePointerInfo::getFixedStack(MF, FI)));
1130 // If the original argument was split (e.g. i128), we need
1131 // to store all parts of it here (and pass just one address).
1132 unsigned ArgIndex = Outs[I].OrigArgIndex;
1133 assert (Outs[I].PartOffset == 0);
1134 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1135 SDValue PartValue = OutVals[I + 1];
1136 unsigned PartOffset = Outs[I + 1].PartOffset;
1137 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1138 DAG.getIntPtrConstant(PartOffset, DL));
1139 MemOpChains.push_back(
1140 DAG.getStore(Chain, DL, PartValue, Address,
1141 MachinePointerInfo::getFixedStack(MF, FI)));
1144 ArgValue = SpillSlot;
1146 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1149 // Queue up the argument copies and emit them at the end.
1150 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1152 assert(VA.isMemLoc() && "Argument not register or memory");
1154 // Work out the address of the stack slot. Unpromoted ints and
1155 // floats are passed as right-justified 8-byte values.
1156 if (!StackPtr.getNode())
1157 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1158 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
1159 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1161 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1162 DAG.getIntPtrConstant(Offset, DL));
1165 MemOpChains.push_back(
1166 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1170 // Join the stores, which are independent of one another.
1171 if (!MemOpChains.empty())
1172 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1174 // Accept direct calls by converting symbolic call addresses to the
1175 // associated Target* opcodes. Force %r1 to be used for indirect
1178 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1179 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1180 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1181 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1182 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1183 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1184 } else if (IsTailCall) {
1185 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1186 Glue = Chain.getValue(1);
1187 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1190 // Build a sequence of copy-to-reg nodes, chained and glued together.
1191 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1192 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1193 RegsToPass[I].second, Glue);
1194 Glue = Chain.getValue(1);
1197 // The first call operand is the chain and the second is the target address.
1198 SmallVector<SDValue, 8> Ops;
1199 Ops.push_back(Chain);
1200 Ops.push_back(Callee);
1202 // Add argument registers to the end of the list so that they are
1203 // known live into the call.
1204 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1205 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1206 RegsToPass[I].second.getValueType()));
1208 // Add a register mask operand representing the call-preserved registers.
1209 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1210 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1211 assert(Mask && "Missing call preserved mask for calling convention");
1212 Ops.push_back(DAG.getRegisterMask(Mask));
1214 // Glue the call to the argument copies, if any.
1216 Ops.push_back(Glue);
1219 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1221 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1222 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1223 Glue = Chain.getValue(1);
1225 // Mark the end of the call, which is glued to the call itself.
1226 Chain = DAG.getCALLSEQ_END(Chain,
1227 DAG.getConstant(NumBytes, DL, PtrVT, true),
1228 DAG.getConstant(0, DL, PtrVT, true),
1230 Glue = Chain.getValue(1);
1232 // Assign locations to each value returned by this call.
1233 SmallVector<CCValAssign, 16> RetLocs;
1234 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1235 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1237 // Copy all of the result registers out of their specified physreg.
1238 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1239 CCValAssign &VA = RetLocs[I];
1241 // Copy the value out, gluing the copy to the end of the call sequence.
1242 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1243 VA.getLocVT(), Glue);
1244 Chain = RetValue.getValue(1);
1245 Glue = RetValue.getValue(2);
1247 // Convert the value of the return register into the value that's
1249 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1255 bool SystemZTargetLowering::
1256 CanLowerReturn(CallingConv::ID CallConv,
1257 MachineFunction &MF, bool isVarArg,
1258 const SmallVectorImpl<ISD::OutputArg> &Outs,
1259 LLVMContext &Context) const {
1260 // Detect unsupported vector return types.
1261 if (Subtarget.hasVector())
1262 VerifyVectorTypes(Outs);
1264 // Special case that we cannot easily detect in RetCC_SystemZ since
1265 // i128 is not a legal type.
1266 for (auto &Out : Outs)
1267 if (Out.ArgVT == MVT::i128)
1270 SmallVector<CCValAssign, 16> RetLocs;
1271 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1272 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1276 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1278 const SmallVectorImpl<ISD::OutputArg> &Outs,
1279 const SmallVectorImpl<SDValue> &OutVals,
1280 const SDLoc &DL, SelectionDAG &DAG) const {
1281 MachineFunction &MF = DAG.getMachineFunction();
1283 // Detect unsupported vector return types.
1284 if (Subtarget.hasVector())
1285 VerifyVectorTypes(Outs);
1287 // Assign locations to each returned value.
1288 SmallVector<CCValAssign, 16> RetLocs;
1289 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1290 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1292 // Quick exit for void returns
1293 if (RetLocs.empty())
1294 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1296 // Copy the result values into the output registers.
1298 SmallVector<SDValue, 4> RetOps;
1299 RetOps.push_back(Chain);
1300 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1301 CCValAssign &VA = RetLocs[I];
1302 SDValue RetValue = OutVals[I];
1304 // Make the return register live on exit.
1305 assert(VA.isRegLoc() && "Can only return in registers!");
1307 // Promote the value as required.
1308 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1310 // Chain and glue the copies together.
1311 unsigned Reg = VA.getLocReg();
1312 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1313 Glue = Chain.getValue(1);
1314 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1317 // Update chain and glue.
1320 RetOps.push_back(Glue);
1322 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1325 // Return true if Op is an intrinsic node with chain that returns the CC value
1326 // as its only (other) argument. Provide the associated SystemZISD opcode and
1327 // the mask of valid CC values if so.
1328 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1329 unsigned &CCValid) {
1330 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1332 case Intrinsic::s390_tbegin:
1333 Opcode = SystemZISD::TBEGIN;
1334 CCValid = SystemZ::CCMASK_TBEGIN;
1337 case Intrinsic::s390_tbegin_nofloat:
1338 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1339 CCValid = SystemZ::CCMASK_TBEGIN;
1342 case Intrinsic::s390_tend:
1343 Opcode = SystemZISD::TEND;
1344 CCValid = SystemZ::CCMASK_TEND;
1352 // Return true if Op is an intrinsic node without chain that returns the
1353 // CC value as its final argument. Provide the associated SystemZISD
1354 // opcode and the mask of valid CC values if so.
1355 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1356 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1358 case Intrinsic::s390_vpkshs:
1359 case Intrinsic::s390_vpksfs:
1360 case Intrinsic::s390_vpksgs:
1361 Opcode = SystemZISD::PACKS_CC;
1362 CCValid = SystemZ::CCMASK_VCMP;
1365 case Intrinsic::s390_vpklshs:
1366 case Intrinsic::s390_vpklsfs:
1367 case Intrinsic::s390_vpklsgs:
1368 Opcode = SystemZISD::PACKLS_CC;
1369 CCValid = SystemZ::CCMASK_VCMP;
1372 case Intrinsic::s390_vceqbs:
1373 case Intrinsic::s390_vceqhs:
1374 case Intrinsic::s390_vceqfs:
1375 case Intrinsic::s390_vceqgs:
1376 Opcode = SystemZISD::VICMPES;
1377 CCValid = SystemZ::CCMASK_VCMP;
1380 case Intrinsic::s390_vchbs:
1381 case Intrinsic::s390_vchhs:
1382 case Intrinsic::s390_vchfs:
1383 case Intrinsic::s390_vchgs:
1384 Opcode = SystemZISD::VICMPHS;
1385 CCValid = SystemZ::CCMASK_VCMP;
1388 case Intrinsic::s390_vchlbs:
1389 case Intrinsic::s390_vchlhs:
1390 case Intrinsic::s390_vchlfs:
1391 case Intrinsic::s390_vchlgs:
1392 Opcode = SystemZISD::VICMPHLS;
1393 CCValid = SystemZ::CCMASK_VCMP;
1396 case Intrinsic::s390_vtm:
1397 Opcode = SystemZISD::VTM;
1398 CCValid = SystemZ::CCMASK_VCMP;
1401 case Intrinsic::s390_vfaebs:
1402 case Intrinsic::s390_vfaehs:
1403 case Intrinsic::s390_vfaefs:
1404 Opcode = SystemZISD::VFAE_CC;
1405 CCValid = SystemZ::CCMASK_ANY;
1408 case Intrinsic::s390_vfaezbs:
1409 case Intrinsic::s390_vfaezhs:
1410 case Intrinsic::s390_vfaezfs:
1411 Opcode = SystemZISD::VFAEZ_CC;
1412 CCValid = SystemZ::CCMASK_ANY;
1415 case Intrinsic::s390_vfeebs:
1416 case Intrinsic::s390_vfeehs:
1417 case Intrinsic::s390_vfeefs:
1418 Opcode = SystemZISD::VFEE_CC;
1419 CCValid = SystemZ::CCMASK_ANY;
1422 case Intrinsic::s390_vfeezbs:
1423 case Intrinsic::s390_vfeezhs:
1424 case Intrinsic::s390_vfeezfs:
1425 Opcode = SystemZISD::VFEEZ_CC;
1426 CCValid = SystemZ::CCMASK_ANY;
1429 case Intrinsic::s390_vfenebs:
1430 case Intrinsic::s390_vfenehs:
1431 case Intrinsic::s390_vfenefs:
1432 Opcode = SystemZISD::VFENE_CC;
1433 CCValid = SystemZ::CCMASK_ANY;
1436 case Intrinsic::s390_vfenezbs:
1437 case Intrinsic::s390_vfenezhs:
1438 case Intrinsic::s390_vfenezfs:
1439 Opcode = SystemZISD::VFENEZ_CC;
1440 CCValid = SystemZ::CCMASK_ANY;
1443 case Intrinsic::s390_vistrbs:
1444 case Intrinsic::s390_vistrhs:
1445 case Intrinsic::s390_vistrfs:
1446 Opcode = SystemZISD::VISTR_CC;
1447 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
1450 case Intrinsic::s390_vstrcbs:
1451 case Intrinsic::s390_vstrchs:
1452 case Intrinsic::s390_vstrcfs:
1453 Opcode = SystemZISD::VSTRC_CC;
1454 CCValid = SystemZ::CCMASK_ANY;
1457 case Intrinsic::s390_vstrczbs:
1458 case Intrinsic::s390_vstrczhs:
1459 case Intrinsic::s390_vstrczfs:
1460 Opcode = SystemZISD::VSTRCZ_CC;
1461 CCValid = SystemZ::CCMASK_ANY;
1464 case Intrinsic::s390_vfcedbs:
1465 Opcode = SystemZISD::VFCMPES;
1466 CCValid = SystemZ::CCMASK_VCMP;
1469 case Intrinsic::s390_vfchdbs:
1470 Opcode = SystemZISD::VFCMPHS;
1471 CCValid = SystemZ::CCMASK_VCMP;
1474 case Intrinsic::s390_vfchedbs:
1475 Opcode = SystemZISD::VFCMPHES;
1476 CCValid = SystemZ::CCMASK_VCMP;
1479 case Intrinsic::s390_vftcidb:
1480 Opcode = SystemZISD::VFTCI;
1481 CCValid = SystemZ::CCMASK_VCMP;
1484 case Intrinsic::s390_tdc:
1485 Opcode = SystemZISD::TDC;
1486 CCValid = SystemZ::CCMASK_TDC;
1494 // Emit an intrinsic with chain with a glued value instead of its CC result.
1495 static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op,
1497 // Copy all operands except the intrinsic ID.
1498 unsigned NumOps = Op.getNumOperands();
1499 SmallVector<SDValue, 6> Ops;
1500 Ops.reserve(NumOps - 1);
1501 Ops.push_back(Op.getOperand(0));
1502 for (unsigned I = 2; I < NumOps; ++I)
1503 Ops.push_back(Op.getOperand(I));
1505 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
1506 SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1507 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1508 SDValue OldChain = SDValue(Op.getNode(), 1);
1509 SDValue NewChain = SDValue(Intr.getNode(), 0);
1510 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1514 // Emit an intrinsic with a glued value instead of its CC result.
1515 static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op,
1517 // Copy all operands except the intrinsic ID.
1518 unsigned NumOps = Op.getNumOperands();
1519 SmallVector<SDValue, 6> Ops;
1520 Ops.reserve(NumOps - 1);
1521 for (unsigned I = 1; I < NumOps; ++I)
1522 Ops.push_back(Op.getOperand(I));
1524 if (Op->getNumValues() == 1)
1525 return DAG.getNode(Opcode, SDLoc(Op), MVT::Glue, Ops);
1526 assert(Op->getNumValues() == 2 && "Expected exactly one non-CC result");
1527 SDVTList RawVTs = DAG.getVTList(Op->getValueType(0), MVT::Glue);
1528 return DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1531 // CC is a comparison that will be implemented using an integer or
1532 // floating-point comparison. Return the condition code mask for
1533 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1534 // unsigned comparisons and clear for signed ones. In the floating-point
1535 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1536 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1538 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1539 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1540 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1544 llvm_unreachable("Invalid integer condition!");
1553 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1554 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1559 // Return a sequence for getting a 1 from an IPM result when CC has a
1560 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1561 // The handling of CC values outside CCValid doesn't matter.
1562 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1563 // Deal with cases where the result can be taken directly from a bit
1564 // of the IPM result.
1565 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1566 return IPMConversion(0, 0, SystemZ::IPM_CC);
1567 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1568 return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1570 // Deal with cases where we can add a value to force the sign bit
1571 // to contain the right value. Putting the bit in 31 means we can
1572 // use SRL rather than RISBG(L), and also makes it easier to get a
1573 // 0/-1 value, so it has priority over the other tests below.
1575 // These sequences rely on the fact that the upper two bits of the
1576 // IPM result are zero.
1577 uint64_t TopBit = uint64_t(1) << 31;
1578 if (CCMask == (CCValid & SystemZ::CCMASK_0))
1579 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1580 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1581 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1582 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1584 | SystemZ::CCMASK_2)))
1585 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1586 if (CCMask == (CCValid & SystemZ::CCMASK_3))
1587 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1588 if (CCMask == (CCValid & (SystemZ::CCMASK_1
1590 | SystemZ::CCMASK_3)))
1591 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1593 // Next try inverting the value and testing a bit. 0/1 could be
1594 // handled this way too, but we dealt with that case above.
1595 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1596 return IPMConversion(-1, 0, SystemZ::IPM_CC);
1598 // Handle cases where adding a value forces a non-sign bit to contain
1600 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1601 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1602 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1603 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1605 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
1606 // can be done by inverting the low CC bit and applying one of the
1607 // sign-based extractions above.
1608 if (CCMask == (CCValid & SystemZ::CCMASK_1))
1609 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1610 if (CCMask == (CCValid & SystemZ::CCMASK_2))
1611 return IPMConversion(1 << SystemZ::IPM_CC,
1612 TopBit - (3 << SystemZ::IPM_CC), 31);
1613 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1615 | SystemZ::CCMASK_3)))
1616 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1617 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1619 | SystemZ::CCMASK_3)))
1620 return IPMConversion(1 << SystemZ::IPM_CC,
1621 TopBit - (1 << SystemZ::IPM_CC), 31);
1623 llvm_unreachable("Unexpected CC combination");
1626 // If C can be converted to a comparison against zero, adjust the operands
1628 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
1629 if (C.ICmpType == SystemZICMP::UnsignedOnly)
1632 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1636 int64_t Value = ConstOp1->getSExtValue();
1637 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
1638 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
1639 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
1640 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
1641 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1642 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
1646 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1647 // adjust the operands as necessary.
1648 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
1650 // For us to make any changes, it must a comparison between a single-use
1651 // load and a constant.
1652 if (!C.Op0.hasOneUse() ||
1653 C.Op0.getOpcode() != ISD::LOAD ||
1654 C.Op1.getOpcode() != ISD::Constant)
1657 // We must have an 8- or 16-bit load.
1658 auto *Load = cast<LoadSDNode>(C.Op0);
1659 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1660 if (NumBits != 8 && NumBits != 16)
1663 // The load must be an extending one and the constant must be within the
1664 // range of the unextended value.
1665 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1666 uint64_t Value = ConstOp1->getZExtValue();
1667 uint64_t Mask = (1 << NumBits) - 1;
1668 if (Load->getExtensionType() == ISD::SEXTLOAD) {
1669 // Make sure that ConstOp1 is in range of C.Op0.
1670 int64_t SignedValue = ConstOp1->getSExtValue();
1671 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1673 if (C.ICmpType != SystemZICMP::SignedOnly) {
1674 // Unsigned comparison between two sign-extended values is equivalent
1675 // to unsigned comparison between two zero-extended values.
1677 } else if (NumBits == 8) {
1678 // Try to treat the comparison as unsigned, so that we can use CLI.
1679 // Adjust CCMask and Value as necessary.
1680 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
1681 // Test whether the high bit of the byte is set.
1682 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
1683 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
1684 // Test whether the high bit of the byte is clear.
1685 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
1687 // No instruction exists for this combination.
1689 C.ICmpType = SystemZICMP::UnsignedOnly;
1691 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1694 // If the constant is in range, we can use any comparison.
1695 C.ICmpType = SystemZICMP::Any;
1699 // Make sure that the first operand is an i32 of the right extension type.
1700 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
1703 if (C.Op0.getValueType() != MVT::i32 ||
1704 Load->getExtensionType() != ExtType)
1705 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
1706 Load->getBasePtr(), Load->getPointerInfo(),
1707 Load->getMemoryVT(), Load->getAlignment(),
1708 Load->getMemOperand()->getFlags());
1710 // Make sure that the second operand is an i32 with the right value.
1711 if (C.Op1.getValueType() != MVT::i32 ||
1712 Value != ConstOp1->getZExtValue())
1713 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
1716 // Return true if Op is either an unextended load, or a load suitable
1717 // for integer register-memory comparisons of type ICmpType.
1718 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
1719 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
1721 // There are no instructions to compare a register with a memory byte.
1722 if (Load->getMemoryVT() == MVT::i8)
1724 // Otherwise decide on extension type.
1725 switch (Load->getExtensionType()) {
1726 case ISD::NON_EXTLOAD:
1729 return ICmpType != SystemZICMP::UnsignedOnly;
1731 return ICmpType != SystemZICMP::SignedOnly;
1739 // Return true if it is better to swap the operands of C.
1740 static bool shouldSwapCmpOperands(const Comparison &C) {
1741 // Leave f128 comparisons alone, since they have no memory forms.
1742 if (C.Op0.getValueType() == MVT::f128)
1745 // Always keep a floating-point constant second, since comparisons with
1746 // zero can use LOAD TEST and comparisons with other constants make a
1747 // natural memory operand.
1748 if (isa<ConstantFPSDNode>(C.Op1))
1751 // Never swap comparisons with zero since there are many ways to optimize
1753 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
1754 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
1757 // Also keep natural memory operands second if the loaded value is
1758 // only used here. Several comparisons have memory forms.
1759 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
1762 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1763 // In that case we generally prefer the memory to be second.
1764 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
1765 // The only exceptions are when the second operand is a constant and
1766 // we can use things like CHHSI.
1769 // The unsigned memory-immediate instructions can handle 16-bit
1770 // unsigned integers.
1771 if (C.ICmpType != SystemZICMP::SignedOnly &&
1772 isUInt<16>(ConstOp1->getZExtValue()))
1774 // The signed memory-immediate instructions can handle 16-bit
1776 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
1777 isInt<16>(ConstOp1->getSExtValue()))
1782 // Try to promote the use of CGFR and CLGFR.
1783 unsigned Opcode0 = C.Op0.getOpcode();
1784 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
1786 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
1788 if (C.ICmpType != SystemZICMP::SignedOnly &&
1789 Opcode0 == ISD::AND &&
1790 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
1791 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
1797 // Return a version of comparison CC mask CCMask in which the LT and GT
1798 // actions are swapped.
1799 static unsigned reverseCCMask(unsigned CCMask) {
1800 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1801 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1802 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1803 (CCMask & SystemZ::CCMASK_CMP_UO));
1806 // Check whether C tests for equality between X and Y and whether X - Y
1807 // or Y - X is also computed. In that case it's better to compare the
1808 // result of the subtraction against zero.
1809 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
1811 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
1812 C.CCMask == SystemZ::CCMASK_CMP_NE) {
1813 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1815 if (N->getOpcode() == ISD::SUB &&
1816 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
1817 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
1818 C.Op0 = SDValue(N, 0);
1819 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
1826 // Check whether C compares a floating-point value with zero and if that
1827 // floating-point value is also negated. In this case we can use the
1828 // negation to set CC, so avoiding separate LOAD AND TEST and
1829 // LOAD (NEGATIVE/COMPLEMENT) instructions.
1830 static void adjustForFNeg(Comparison &C) {
1831 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
1832 if (C1 && C1->isZero()) {
1833 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1835 if (N->getOpcode() == ISD::FNEG) {
1836 C.Op0 = SDValue(N, 0);
1837 C.CCMask = reverseCCMask(C.CCMask);
1844 // Check whether C compares (shl X, 32) with 0 and whether X is
1845 // also sign-extended. In that case it is better to test the result
1846 // of the sign extension using LTGFR.
1848 // This case is important because InstCombine transforms a comparison
1849 // with (sext (trunc X)) into a comparison with (shl X, 32).
1850 static void adjustForLTGFR(Comparison &C) {
1851 // Check for a comparison between (shl X, 32) and 0.
1852 if (C.Op0.getOpcode() == ISD::SHL &&
1853 C.Op0.getValueType() == MVT::i64 &&
1854 C.Op1.getOpcode() == ISD::Constant &&
1855 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1856 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
1857 if (C1 && C1->getZExtValue() == 32) {
1858 SDValue ShlOp0 = C.Op0.getOperand(0);
1859 // See whether X has any SIGN_EXTEND_INREG uses.
1860 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
1862 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
1863 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
1864 C.Op0 = SDValue(N, 0);
1872 // If C compares the truncation of an extending load, try to compare
1873 // the untruncated value instead. This exposes more opportunities to
1875 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
1877 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
1878 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
1879 C.Op1.getOpcode() == ISD::Constant &&
1880 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1881 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
1882 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
1883 unsigned Type = L->getExtensionType();
1884 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
1885 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
1886 C.Op0 = C.Op0.getOperand(0);
1887 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
1893 // Return true if shift operation N has an in-range constant shift value.
1894 // Store it in ShiftVal if so.
1895 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
1896 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
1900 uint64_t Amount = Shift->getZExtValue();
1901 if (Amount >= N.getValueSizeInBits())
1908 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
1909 // instruction and whether the CC value is descriptive enough to handle
1910 // a comparison of type Opcode between the AND result and CmpVal.
1911 // CCMask says which comparison result is being tested and BitSize is
1912 // the number of bits in the operands. If TEST UNDER MASK can be used,
1913 // return the corresponding CC mask, otherwise return 0.
1914 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
1915 uint64_t Mask, uint64_t CmpVal,
1916 unsigned ICmpType) {
1917 assert(Mask != 0 && "ANDs with zero should have been removed by now");
1919 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
1920 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
1921 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
1924 // Work out the masks for the lowest and highest bits.
1925 unsigned HighShift = 63 - countLeadingZeros(Mask);
1926 uint64_t High = uint64_t(1) << HighShift;
1927 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
1929 // Signed ordered comparisons are effectively unsigned if the sign
1931 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
1933 // Check for equality comparisons with 0, or the equivalent.
1935 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1936 return SystemZ::CCMASK_TM_ALL_0;
1937 if (CCMask == SystemZ::CCMASK_CMP_NE)
1938 return SystemZ::CCMASK_TM_SOME_1;
1940 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
1941 if (CCMask == SystemZ::CCMASK_CMP_LT)
1942 return SystemZ::CCMASK_TM_ALL_0;
1943 if (CCMask == SystemZ::CCMASK_CMP_GE)
1944 return SystemZ::CCMASK_TM_SOME_1;
1946 if (EffectivelyUnsigned && CmpVal < Low) {
1947 if (CCMask == SystemZ::CCMASK_CMP_LE)
1948 return SystemZ::CCMASK_TM_ALL_0;
1949 if (CCMask == SystemZ::CCMASK_CMP_GT)
1950 return SystemZ::CCMASK_TM_SOME_1;
1953 // Check for equality comparisons with the mask, or the equivalent.
1954 if (CmpVal == Mask) {
1955 if (CCMask == SystemZ::CCMASK_CMP_EQ)
1956 return SystemZ::CCMASK_TM_ALL_1;
1957 if (CCMask == SystemZ::CCMASK_CMP_NE)
1958 return SystemZ::CCMASK_TM_SOME_0;
1960 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
1961 if (CCMask == SystemZ::CCMASK_CMP_GT)
1962 return SystemZ::CCMASK_TM_ALL_1;
1963 if (CCMask == SystemZ::CCMASK_CMP_LE)
1964 return SystemZ::CCMASK_TM_SOME_0;
1966 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
1967 if (CCMask == SystemZ::CCMASK_CMP_GE)
1968 return SystemZ::CCMASK_TM_ALL_1;
1969 if (CCMask == SystemZ::CCMASK_CMP_LT)
1970 return SystemZ::CCMASK_TM_SOME_0;
1973 // Check for ordered comparisons with the top bit.
1974 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
1975 if (CCMask == SystemZ::CCMASK_CMP_LE)
1976 return SystemZ::CCMASK_TM_MSB_0;
1977 if (CCMask == SystemZ::CCMASK_CMP_GT)
1978 return SystemZ::CCMASK_TM_MSB_1;
1980 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
1981 if (CCMask == SystemZ::CCMASK_CMP_LT)
1982 return SystemZ::CCMASK_TM_MSB_0;
1983 if (CCMask == SystemZ::CCMASK_CMP_GE)
1984 return SystemZ::CCMASK_TM_MSB_1;
1987 // If there are just two bits, we can do equality checks for Low and High
1989 if (Mask == Low + High) {
1990 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
1991 return SystemZ::CCMASK_TM_MIXED_MSB_0;
1992 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
1993 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
1994 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
1995 return SystemZ::CCMASK_TM_MIXED_MSB_1;
1996 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
1997 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
2000 // Looks like we've exhausted our options.
2004 // See whether C can be implemented as a TEST UNDER MASK instruction.
2005 // Update the arguments with the TM version if so.
2006 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2008 // Check that we have a comparison with a constant.
2009 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2012 uint64_t CmpVal = ConstOp1->getZExtValue();
2014 // Check whether the nonconstant input is an AND with a constant mask.
2017 ConstantSDNode *Mask = nullptr;
2018 if (C.Op0.getOpcode() == ISD::AND) {
2019 NewC.Op0 = C.Op0.getOperand(0);
2020 NewC.Op1 = C.Op0.getOperand(1);
2021 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2024 MaskVal = Mask->getZExtValue();
2026 // There is no instruction to compare with a 64-bit immediate
2027 // so use TMHH instead if possible. We need an unsigned ordered
2028 // comparison with an i64 immediate.
2029 if (NewC.Op0.getValueType() != MVT::i64 ||
2030 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2031 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2032 NewC.ICmpType == SystemZICMP::SignedOnly)
2034 // Convert LE and GT comparisons into LT and GE.
2035 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2036 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2037 if (CmpVal == uint64_t(-1))
2040 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2042 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2043 // be masked off without changing the result.
2044 MaskVal = -(CmpVal & -CmpVal);
2045 NewC.ICmpType = SystemZICMP::UnsignedOnly;
2050 // Check whether the combination of mask, comparison value and comparison
2051 // type are suitable.
2052 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2053 unsigned NewCCMask, ShiftVal;
2054 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2055 NewC.Op0.getOpcode() == ISD::SHL &&
2056 isSimpleShift(NewC.Op0, ShiftVal) &&
2057 (MaskVal >> ShiftVal != 0) &&
2058 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2059 MaskVal >> ShiftVal,
2061 SystemZICMP::Any))) {
2062 NewC.Op0 = NewC.Op0.getOperand(0);
2063 MaskVal >>= ShiftVal;
2064 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2065 NewC.Op0.getOpcode() == ISD::SRL &&
2066 isSimpleShift(NewC.Op0, ShiftVal) &&
2067 (MaskVal << ShiftVal != 0) &&
2068 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2069 MaskVal << ShiftVal,
2071 SystemZICMP::UnsignedOnly))) {
2072 NewC.Op0 = NewC.Op0.getOperand(0);
2073 MaskVal <<= ShiftVal;
2075 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2081 // Go ahead and make the change.
2082 C.Opcode = SystemZISD::TM;
2084 if (Mask && Mask->getZExtValue() == MaskVal)
2085 C.Op1 = SDValue(Mask, 0);
2087 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2088 C.CCValid = SystemZ::CCMASK_TM;
2089 C.CCMask = NewCCMask;
2092 // Return a Comparison that tests the condition-code result of intrinsic
2093 // node Call against constant integer CC using comparison code Cond.
2094 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2095 // and CCValid is the set of possible condition-code results.
2096 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2097 SDValue Call, unsigned CCValid, uint64_t CC,
2098 ISD::CondCode Cond) {
2099 Comparison C(Call, SDValue());
2101 C.CCValid = CCValid;
2102 if (Cond == ISD::SETEQ)
2103 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2104 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2105 else if (Cond == ISD::SETNE)
2106 // ...and the inverse of that.
2107 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2108 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2109 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2110 // always true for CC>3.
2111 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2112 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2113 // ...and the inverse of that.
2114 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2115 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2116 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2117 // always true for CC>3.
2118 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2119 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2120 // ...and the inverse of that.
2121 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2123 llvm_unreachable("Unexpected integer comparison type");
2124 C.CCMask &= CCValid;
2128 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2129 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2130 ISD::CondCode Cond, const SDLoc &DL) {
2131 if (CmpOp1.getOpcode() == ISD::Constant) {
2132 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2133 unsigned Opcode, CCValid;
2134 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2135 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2136 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2137 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2138 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2139 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2140 isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2141 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2143 Comparison C(CmpOp0, CmpOp1);
2144 C.CCMask = CCMaskForCondCode(Cond);
2145 if (C.Op0.getValueType().isFloatingPoint()) {
2146 C.CCValid = SystemZ::CCMASK_FCMP;
2147 C.Opcode = SystemZISD::FCMP;
2150 C.CCValid = SystemZ::CCMASK_ICMP;
2151 C.Opcode = SystemZISD::ICMP;
2152 // Choose the type of comparison. Equality and inequality tests can
2153 // use either signed or unsigned comparisons. The choice also doesn't
2154 // matter if both sign bits are known to be clear. In those cases we
2155 // want to give the main isel code the freedom to choose whichever
2157 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2158 C.CCMask == SystemZ::CCMASK_CMP_NE ||
2159 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2160 C.ICmpType = SystemZICMP::Any;
2161 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2162 C.ICmpType = SystemZICMP::UnsignedOnly;
2164 C.ICmpType = SystemZICMP::SignedOnly;
2165 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2166 adjustZeroCmp(DAG, DL, C);
2167 adjustSubwordCmp(DAG, DL, C);
2168 adjustForSubtraction(DAG, DL, C);
2170 adjustICmpTruncate(DAG, DL, C);
2173 if (shouldSwapCmpOperands(C)) {
2174 std::swap(C.Op0, C.Op1);
2175 C.CCMask = reverseCCMask(C.CCMask);
2178 adjustForTestUnderMask(DAG, DL, C);
2182 // Emit the comparison instruction described by C.
2183 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2184 if (!C.Op1.getNode()) {
2186 switch (C.Op0.getOpcode()) {
2187 case ISD::INTRINSIC_W_CHAIN:
2188 Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode);
2190 case ISD::INTRINSIC_WO_CHAIN:
2191 Op = emitIntrinsicWithGlue(DAG, C.Op0, C.Opcode);
2194 llvm_unreachable("Invalid comparison operands");
2196 return SDValue(Op.getNode(), Op->getNumValues() - 1);
2198 if (C.Opcode == SystemZISD::ICMP)
2199 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
2200 DAG.getConstant(C.ICmpType, DL, MVT::i32));
2201 if (C.Opcode == SystemZISD::TM) {
2202 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2203 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2204 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
2205 DAG.getConstant(RegisterOnly, DL, MVT::i32));
2207 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
2210 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2211 // 64 bits. Extend is the extension type to use. Store the high part
2212 // in Hi and the low part in Lo.
2213 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2214 SDValue Op0, SDValue Op1, SDValue &Hi,
2216 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2217 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2218 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2219 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2220 DAG.getConstant(32, DL, MVT::i64));
2221 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2222 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2225 // Lower a binary operation that produces two VT results, one in each
2226 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2227 // and Opcode performs the GR128 operation. Store the even register result
2228 // in Even and the odd register result in Odd.
2229 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2230 unsigned Opcode, SDValue Op0, SDValue Op1,
2231 SDValue &Even, SDValue &Odd) {
2232 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
2233 bool Is32Bit = is32Bit(VT);
2234 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2235 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2238 // Return an i32 value that is 1 if the CC value produced by Glue is
2239 // in the mask CCMask and 0 otherwise. CC is known to have a value
2240 // in CCValid, so other values can be ignored.
2241 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue,
2242 unsigned CCValid, unsigned CCMask) {
2243 IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
2244 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
2246 if (Conversion.XORValue)
2247 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
2248 DAG.getConstant(Conversion.XORValue, DL, MVT::i32));
2250 if (Conversion.AddValue)
2251 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
2252 DAG.getConstant(Conversion.AddValue, DL, MVT::i32));
2254 // The SHR/AND sequence should get optimized to an RISBG.
2255 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
2256 DAG.getConstant(Conversion.Bit, DL, MVT::i32));
2257 if (Conversion.Bit != 31)
2258 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
2259 DAG.getConstant(1, DL, MVT::i32));
2263 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2264 // be done directly. IsFP is true if CC is for a floating-point rather than
2265 // integer comparison.
2266 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
2270 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;
2274 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);
2278 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;
2281 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;
2288 // Return the SystemZISD vector comparison operation for CC or its inverse,
2289 // or 0 if neither can be done directly. Indicate in Invert whether the
2290 // result is for the inverse of CC. IsFP is true if CC is for a
2291 // floating-point rather than integer comparison.
2292 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
2294 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2299 CC = ISD::getSetCCInverse(CC, !IsFP);
2300 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2308 // Return a v2f64 that contains the extended form of elements Start and Start+1
2309 // of v4f32 value Op.
2310 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2312 int Mask[] = { Start, -1, Start + 1, -1 };
2313 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2314 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2317 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2318 // producing a result of type VT.
2319 static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &DL,
2320 EVT VT, SDValue CmpOp0, SDValue CmpOp1) {
2321 // There is no hardware support for v4f32, so extend the vector into
2322 // two v2f64s and compare those.
2323 if (CmpOp0.getValueType() == MVT::v4f32) {
2324 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
2325 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
2326 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
2327 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
2328 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2329 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2330 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2332 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2335 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2336 // an integer mask of type VT.
2337 static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2338 ISD::CondCode CC, SDValue CmpOp0,
2340 bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2341 bool Invert = false;
2344 // Handle tests for order using (or (ogt y x) (oge x y)).
2349 assert(IsFP && "Unexpected integer comparison");
2350 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2351 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
2352 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2356 // Handle <> tests using (or (ogt y x) (ogt x y)).
2361 assert(IsFP && "Unexpected integer comparison");
2362 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2363 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
2364 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2368 // Otherwise a single comparison is enough. It doesn't really
2369 // matter whether we try the inversion or the swap first, since
2370 // there are no cases where both work.
2372 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2373 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2375 CC = ISD::getSetCCSwappedOperands(CC);
2376 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2377 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2379 llvm_unreachable("Unhandled comparison");
2384 SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
2385 DAG.getConstant(65535, DL, MVT::i32));
2386 Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask);
2387 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2392 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2393 SelectionDAG &DAG) const {
2394 SDValue CmpOp0 = Op.getOperand(0);
2395 SDValue CmpOp1 = Op.getOperand(1);
2396 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2398 EVT VT = Op.getValueType();
2400 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2402 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2403 SDValue Glue = emitCmp(DAG, DL, C);
2404 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
2407 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2408 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2409 SDValue CmpOp0 = Op.getOperand(2);
2410 SDValue CmpOp1 = Op.getOperand(3);
2411 SDValue Dest = Op.getOperand(4);
2414 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2415 SDValue Glue = emitCmp(DAG, DL, C);
2416 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
2417 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
2418 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue);
2421 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2422 // allowing Pos and Neg to be wider than CmpOp.
2423 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2424 return (Neg.getOpcode() == ISD::SUB &&
2425 Neg.getOperand(0).getOpcode() == ISD::Constant &&
2426 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2427 Neg.getOperand(1) == Pos &&
2429 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2430 Pos.getOperand(0) == CmpOp)));
2433 // Return the absolute or negative absolute of Op; IsNegative decides which.
2434 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2436 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2438 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2439 DAG.getConstant(0, DL, Op.getValueType()), Op);
2443 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2444 SelectionDAG &DAG) const {
2445 SDValue CmpOp0 = Op.getOperand(0);
2446 SDValue CmpOp1 = Op.getOperand(1);
2447 SDValue TrueOp = Op.getOperand(2);
2448 SDValue FalseOp = Op.getOperand(3);
2449 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2452 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2454 // Check for absolute and negative-absolute selections, including those
2455 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2456 // This check supplements the one in DAGCombiner.
2457 if (C.Opcode == SystemZISD::ICMP &&
2458 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2459 C.CCMask != SystemZ::CCMASK_CMP_NE &&
2460 C.Op1.getOpcode() == ISD::Constant &&
2461 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2462 if (isAbsolute(C.Op0, TrueOp, FalseOp))
2463 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2464 if (isAbsolute(C.Op0, FalseOp, TrueOp))
2465 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2468 SDValue Glue = emitCmp(DAG, DL, C);
2470 // Special case for handling -1/0 results. The shifts we use here
2471 // should get optimized with the IPM conversion sequence.
2472 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp);
2473 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp);
2474 if (TrueC && FalseC) {
2475 int64_t TrueVal = TrueC->getSExtValue();
2476 int64_t FalseVal = FalseC->getSExtValue();
2477 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
2478 // Invert the condition if we want -1 on false.
2480 C.CCMask ^= C.CCValid;
2481 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
2482 EVT VT = Op.getValueType();
2483 // Extend the result to VT. Upper bits are ignored.
2485 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
2486 // Sign-extend from the low bit.
2487 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32);
2488 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
2489 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
2493 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
2494 DAG.getConstant(C.CCMask, DL, MVT::i32), Glue};
2496 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2497 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
2500 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
2501 SelectionDAG &DAG) const {
2503 const GlobalValue *GV = Node->getGlobal();
2504 int64_t Offset = Node->getOffset();
2505 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2506 CodeModel::Model CM = DAG.getTarget().getCodeModel();
2509 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2510 // Assign anchors at 1<<12 byte boundaries.
2511 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2512 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
2513 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2515 // The offset can be folded into the address if it is aligned to a halfword.
2517 if (Offset != 0 && (Offset & 1) == 0) {
2518 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
2519 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
2523 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
2524 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2525 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2526 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2529 // If there was a non-zero offset that we didn't fold, create an explicit
2532 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2533 DAG.getConstant(Offset, DL, PtrVT));
2538 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
2541 SDValue GOTOffset) const {
2543 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2544 SDValue Chain = DAG.getEntryNode();
2547 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2548 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2549 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2550 Glue = Chain.getValue(1);
2551 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2552 Glue = Chain.getValue(1);
2554 // The first call operand is the chain and the second is the TLS symbol.
2555 SmallVector<SDValue, 8> Ops;
2556 Ops.push_back(Chain);
2557 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
2558 Node->getValueType(0),
2561 // Add argument registers to the end of the list so that they are
2562 // known live into the call.
2563 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
2564 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
2566 // Add a register mask operand representing the call-preserved registers.
2567 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2568 const uint32_t *Mask =
2569 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
2570 assert(Mask && "Missing call preserved mask for calling convention");
2571 Ops.push_back(DAG.getRegisterMask(Mask));
2573 // Glue the call to the argument copies.
2574 Ops.push_back(Glue);
2577 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2578 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
2579 Glue = Chain.getValue(1);
2581 // Copy the return value from %r2.
2582 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
2585 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
2586 SelectionDAG &DAG) const {
2587 SDValue Chain = DAG.getEntryNode();
2588 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2590 // The high part of the thread pointer is in access register 0.
2591 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
2592 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
2594 // The low part of the thread pointer is in access register 1.
2595 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
2596 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
2598 // Merge them into a single 64-bit address.
2599 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
2600 DAG.getConstant(32, DL, PtrVT));
2601 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
2604 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
2605 SelectionDAG &DAG) const {
2606 if (DAG.getTarget().Options.EmulatedTLS)
2607 return LowerToTLSEmulatedModel(Node, DAG);
2609 const GlobalValue *GV = Node->getGlobal();
2610 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2611 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
2613 SDValue TP = lowerThreadPointer(DL, DAG);
2615 // Get the offset of GA from the thread pointer, based on the TLS model.
2618 case TLSModel::GeneralDynamic: {
2619 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2620 SystemZConstantPoolValue *CPV =
2621 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
2623 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2624 Offset = DAG.getLoad(
2625 PtrVT, DL, DAG.getEntryNode(), Offset,
2626 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2628 // Call __tls_get_offset to retrieve the offset.
2629 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
2633 case TLSModel::LocalDynamic: {
2634 // Load the GOT offset of the module ID.
2635 SystemZConstantPoolValue *CPV =
2636 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
2638 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2639 Offset = DAG.getLoad(
2640 PtrVT, DL, DAG.getEntryNode(), Offset,
2641 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2643 // Call __tls_get_offset to retrieve the module base offset.
2644 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
2646 // Note: The SystemZLDCleanupPass will remove redundant computations
2647 // of the module base offset. Count total number of local-dynamic
2648 // accesses to trigger execution of that pass.
2649 SystemZMachineFunctionInfo* MFI =
2650 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
2651 MFI->incNumLocalDynamicTLSAccesses();
2653 // Add the per-symbol offset.
2654 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
2656 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
2657 DTPOffset = DAG.getLoad(
2658 PtrVT, DL, DAG.getEntryNode(), DTPOffset,
2659 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2661 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
2665 case TLSModel::InitialExec: {
2666 // Load the offset from the GOT.
2667 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2668 SystemZII::MO_INDNTPOFF);
2669 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
2671 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
2672 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2676 case TLSModel::LocalExec: {
2677 // Force the offset into the constant pool and load it from there.
2678 SystemZConstantPoolValue *CPV =
2679 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
2681 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2682 Offset = DAG.getLoad(
2683 PtrVT, DL, DAG.getEntryNode(), Offset,
2684 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2689 // Add the base and offset together.
2690 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
2693 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
2694 SelectionDAG &DAG) const {
2696 const BlockAddress *BA = Node->getBlockAddress();
2697 int64_t Offset = Node->getOffset();
2698 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2700 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
2701 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2705 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
2706 SelectionDAG &DAG) const {
2708 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2709 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2711 // Use LARL to load the address of the table.
2712 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2715 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
2716 SelectionDAG &DAG) const {
2718 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2721 if (CP->isMachineConstantPoolEntry())
2722 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2723 CP->getAlignment());
2725 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2726 CP->getAlignment(), CP->getOffset());
2728 // Use LARL to load the address of the constant pool entry.
2729 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2732 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
2733 SelectionDAG &DAG) const {
2734 MachineFunction &MF = DAG.getMachineFunction();
2735 MachineFrameInfo &MFI = MF.getFrameInfo();
2736 MFI.setFrameAddressIsTaken(true);
2739 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2740 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2742 // If the back chain frame index has not been allocated yet, do so.
2743 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
2744 int BackChainIdx = FI->getFramePointerSaveIndex();
2745 if (!BackChainIdx) {
2746 // By definition, the frame address is the address of the back chain.
2747 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
2748 FI->setFramePointerSaveIndex(BackChainIdx);
2750 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
2752 // FIXME The frontend should detect this case.
2754 report_fatal_error("Unsupported stack frame traversal count");
2760 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
2761 SelectionDAG &DAG) const {
2762 MachineFunction &MF = DAG.getMachineFunction();
2763 MachineFrameInfo &MFI = MF.getFrameInfo();
2764 MFI.setReturnAddressIsTaken(true);
2766 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2770 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2771 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2773 // FIXME The frontend should detect this case.
2775 report_fatal_error("Unsupported stack frame traversal count");
2778 // Return R14D, which has the return address. Mark it an implicit live-in.
2779 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
2780 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
2783 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
2784 SelectionDAG &DAG) const {
2786 SDValue In = Op.getOperand(0);
2787 EVT InVT = In.getValueType();
2788 EVT ResVT = Op.getValueType();
2790 // Convert loads directly. This is normally done by DAGCombiner,
2791 // but we need this case for bitcasts that are created during lowering
2792 // and which are then lowered themselves.
2793 if (auto *LoadN = dyn_cast<LoadSDNode>(In))
2794 if (ISD::isNormalLoad(LoadN))
2795 return DAG.getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(),
2796 LoadN->getMemOperand());
2798 if (InVT == MVT::i32 && ResVT == MVT::f32) {
2800 if (Subtarget.hasHighWord()) {
2801 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
2803 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
2804 MVT::i64, SDValue(U64, 0), In);
2806 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
2807 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
2808 DAG.getConstant(32, DL, MVT::i64));
2810 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
2811 return DAG.getTargetExtractSubreg(SystemZ::subreg_r32,
2812 DL, MVT::f32, Out64);
2814 if (InVT == MVT::f32 && ResVT == MVT::i32) {
2815 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
2816 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_r32, DL,
2817 MVT::f64, SDValue(U64, 0), In);
2818 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
2819 if (Subtarget.hasHighWord())
2820 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
2822 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
2823 DAG.getConstant(32, DL, MVT::i64));
2824 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
2826 llvm_unreachable("Unexpected bitcast combination");
2829 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
2830 SelectionDAG &DAG) const {
2831 MachineFunction &MF = DAG.getMachineFunction();
2832 SystemZMachineFunctionInfo *FuncInfo =
2833 MF.getInfo<SystemZMachineFunctionInfo>();
2834 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2836 SDValue Chain = Op.getOperand(0);
2837 SDValue Addr = Op.getOperand(1);
2838 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2841 // The initial values of each field.
2842 const unsigned NumFields = 4;
2843 SDValue Fields[NumFields] = {
2844 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
2845 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
2846 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
2847 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
2850 // Store each field into its respective slot.
2851 SDValue MemOps[NumFields];
2852 unsigned Offset = 0;
2853 for (unsigned I = 0; I < NumFields; ++I) {
2854 SDValue FieldAddr = Addr;
2856 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
2857 DAG.getIntPtrConstant(Offset, DL));
2858 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
2859 MachinePointerInfo(SV, Offset));
2862 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
2865 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
2866 SelectionDAG &DAG) const {
2867 SDValue Chain = Op.getOperand(0);
2868 SDValue DstPtr = Op.getOperand(1);
2869 SDValue SrcPtr = Op.getOperand(2);
2870 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2871 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
2874 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
2875 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
2876 /*isTailCall*/false,
2877 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
2880 SDValue SystemZTargetLowering::
2881 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
2882 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
2883 MachineFunction &MF = DAG.getMachineFunction();
2884 bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack");
2885 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
2887 SDValue Chain = Op.getOperand(0);
2888 SDValue Size = Op.getOperand(1);
2889 SDValue Align = Op.getOperand(2);
2892 // If user has set the no alignment function attribute, ignore
2893 // alloca alignments.
2894 uint64_t AlignVal = (RealignOpt ?
2895 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
2897 uint64_t StackAlign = TFI->getStackAlignment();
2898 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
2899 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
2901 unsigned SPReg = getStackPointerRegisterToSaveRestore();
2902 SDValue NeededSpace = Size;
2904 // Get a reference to the stack pointer.
2905 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
2907 // If we need a backchain, save it now.
2910 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
2912 // Add extra space for alignment if needed.
2913 if (ExtraAlignSpace)
2914 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
2915 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
2917 // Get the new stack pointer value.
2918 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
2920 // Copy the new stack pointer back.
2921 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
2923 // The allocated data lives above the 160 bytes allocated for the standard
2924 // frame, plus any outgoing stack arguments. We don't know how much that
2925 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
2926 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
2927 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
2929 // Dynamically realign if needed.
2930 if (RequiredAlign > StackAlign) {
2932 DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
2933 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
2935 DAG.getNode(ISD::AND, DL, MVT::i64, Result,
2936 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
2940 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
2942 SDValue Ops[2] = { Result, Chain };
2943 return DAG.getMergeValues(Ops, DL);
2946 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
2947 SDValue Op, SelectionDAG &DAG) const {
2950 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
2953 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
2954 SelectionDAG &DAG) const {
2955 EVT VT = Op.getValueType();
2959 // Just do a normal 64-bit multiplication and extract the results.
2960 // We define this so that it can be used for constant division.
2961 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
2962 Op.getOperand(1), Ops[1], Ops[0]);
2964 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
2966 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
2968 // but using the fact that the upper halves are either all zeros
2971 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
2973 // and grouping the right terms together since they are quicker than the
2976 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
2977 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
2978 SDValue LL = Op.getOperand(0);
2979 SDValue RL = Op.getOperand(1);
2980 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
2981 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
2982 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
2983 // the high result in the even register. ISD::SMUL_LOHI is defined to
2984 // return the low half first, so the results are in reverse order.
2985 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
2986 LL, RL, Ops[1], Ops[0]);
2987 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
2988 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
2989 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
2990 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
2992 return DAG.getMergeValues(Ops, DL);
2995 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
2996 SelectionDAG &DAG) const {
2997 EVT VT = Op.getValueType();
3001 // Just do a normal 64-bit multiplication and extract the results.
3002 // We define this so that it can be used for constant division.
3003 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
3004 Op.getOperand(1), Ops[1], Ops[0]);
3006 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3007 // the high result in the even register. ISD::UMUL_LOHI is defined to
3008 // return the low half first, so the results are in reverse order.
3009 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3010 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3011 return DAG.getMergeValues(Ops, DL);
3014 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3015 SelectionDAG &DAG) const {
3016 SDValue Op0 = Op.getOperand(0);
3017 SDValue Op1 = Op.getOperand(1);
3018 EVT VT = Op.getValueType();
3021 // We use DSGF for 32-bit division. This means the first operand must
3022 // always be 64-bit, and the second operand should be 32-bit whenever
3023 // that is possible, to improve performance.
3025 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3026 else if (DAG.ComputeNumSignBits(Op1) > 32)
3027 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3029 // DSG(F) returns the remainder in the even register and the
3030 // quotient in the odd register.
3032 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
3033 return DAG.getMergeValues(Ops, DL);
3036 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3037 SelectionDAG &DAG) const {
3038 EVT VT = Op.getValueType();
3041 // DL(G) returns the remainder in the even register and the
3042 // quotient in the odd register.
3044 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
3045 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3046 return DAG.getMergeValues(Ops, DL);
3049 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3050 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
3052 // Get the known-zero masks for each operand.
3053 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
3055 DAG.computeKnownBits(Ops[0], Known[0]);
3056 DAG.computeKnownBits(Ops[1], Known[1]);
3058 // See if the upper 32 bits of one operand and the lower 32 bits of the
3059 // other are known zero. They are the low and high operands respectively.
3060 uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
3061 Known[1].Zero.getZExtValue() };
3063 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3065 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3070 SDValue LowOp = Ops[Low];
3071 SDValue HighOp = Ops[High];
3073 // If the high part is a constant, we're better off using IILH.
3074 if (HighOp.getOpcode() == ISD::Constant)
3077 // If the low part is a constant that is outside the range of LHI,
3078 // then we're better off using IILF.
3079 if (LowOp.getOpcode() == ISD::Constant) {
3080 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3081 if (!isInt<16>(Value))
3085 // Check whether the high part is an AND that doesn't change the
3086 // high 32 bits and just masks out low bits. We can skip it if so.
3087 if (HighOp.getOpcode() == ISD::AND &&
3088 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3089 SDValue HighOp0 = HighOp.getOperand(0);
3090 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3091 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3095 // Take advantage of the fact that all GR32 operations only change the
3096 // low 32 bits by truncating Low to an i32 and inserting it directly
3097 // using a subreg. The interesting cases are those where the truncation
3100 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3101 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3102 MVT::i64, HighOp, Low32);
3105 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3106 SelectionDAG &DAG) const {
3107 EVT VT = Op.getValueType();
3109 Op = Op.getOperand(0);
3111 // Handle vector types via VPOPCT.
3112 if (VT.isVector()) {
3113 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3114 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3115 switch (VT.getScalarSizeInBits()) {
3119 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3120 SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3121 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3122 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3123 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3127 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
3128 DAG.getConstant(0, DL, MVT::i32));
3129 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3133 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
3134 DAG.getConstant(0, DL, MVT::i32));
3135 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3136 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3140 llvm_unreachable("Unexpected type");
3145 // Get the known-zero mask for the operand.
3147 DAG.computeKnownBits(Op, Known);
3148 unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
3149 if (NumSignificantBits == 0)
3150 return DAG.getConstant(0, DL, VT);
3152 // Skip known-zero high parts of the operand.
3153 int64_t OrigBitSize = VT.getSizeInBits();
3154 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3155 BitSize = std::min(BitSize, OrigBitSize);
3157 // The POPCNT instruction counts the number of bits in each byte.
3158 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3159 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3160 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3162 // Add up per-byte counts in a binary tree. All bits of Op at
3163 // position larger than BitSize remain zero throughout.
3164 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3165 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3166 if (BitSize != OrigBitSize)
3167 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3168 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3169 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3172 // Extract overall result from high byte.
3174 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3175 DAG.getConstant(BitSize - 8, DL, VT));
3180 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3181 SelectionDAG &DAG) const {
3183 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3184 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3185 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
3186 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3188 // The only fence that needs an instruction is a sequentially-consistent
3189 // cross-thread fence.
3190 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3191 FenceSSID == SyncScope::System) {
3192 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3197 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3198 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3201 // Op is an atomic load. Lower it into a serialization followed
3202 // by a normal volatile load.
3203 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3204 SelectionDAG &DAG) const {
3205 auto *Node = cast<AtomicSDNode>(Op.getNode());
3206 SDValue Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
3207 MVT::Other, Node->getChain()), 0);
3208 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3209 Chain, Node->getBasePtr(),
3210 Node->getMemoryVT(), Node->getMemOperand());
3213 // Op is an atomic store. Lower it into a normal volatile store followed
3214 // by a serialization.
3215 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3216 SelectionDAG &DAG) const {
3217 auto *Node = cast<AtomicSDNode>(Op.getNode());
3218 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3219 Node->getBasePtr(), Node->getMemoryVT(),
3220 Node->getMemOperand());
3221 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other,
3225 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3226 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3227 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3229 unsigned Opcode) const {
3230 auto *Node = cast<AtomicSDNode>(Op.getNode());
3232 // 32-bit operations need no code outside the main loop.
3233 EVT NarrowVT = Node->getMemoryVT();
3234 EVT WideVT = MVT::i32;
3235 if (NarrowVT == WideVT)
3238 int64_t BitSize = NarrowVT.getSizeInBits();
3239 SDValue ChainIn = Node->getChain();
3240 SDValue Addr = Node->getBasePtr();
3241 SDValue Src2 = Node->getVal();
3242 MachineMemOperand *MMO = Node->getMemOperand();
3244 EVT PtrVT = Addr.getValueType();
3246 // Convert atomic subtracts of constants into additions.
3247 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3248 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3249 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
3250 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3253 // Get the address of the containing word.
3254 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3255 DAG.getConstant(-4, DL, PtrVT));
3257 // Get the number of bits that the word must be rotated left in order
3258 // to bring the field to the top bits of a GR32.
3259 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3260 DAG.getConstant(3, DL, PtrVT));
3261 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3263 // Get the complementing shift amount, for rotating a field in the top
3264 // bits back to its proper position.
3265 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3266 DAG.getConstant(0, DL, WideVT), BitShift);
3268 // Extend the source operand to 32 bits and prepare it for the inner loop.
3269 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3270 // operations require the source to be shifted in advance. (This shift
3271 // can be folded if the source is constant.) For AND and NAND, the lower
3272 // bits must be set, while for other opcodes they should be left clear.
3273 if (Opcode != SystemZISD::ATOMIC_SWAPW)
3274 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3275 DAG.getConstant(32 - BitSize, DL, WideVT));
3276 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3277 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
3278 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3279 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3281 // Construct the ATOMIC_LOADW_* node.
3282 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3283 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3284 DAG.getConstant(BitSize, DL, WideVT) };
3285 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3288 // Rotate the result of the final CS so that the field is in the lower
3289 // bits of a GR32, then truncate it.
3290 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3291 DAG.getConstant(BitSize, DL, WideVT));
3292 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3294 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3295 return DAG.getMergeValues(RetOps, DL);
3298 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3299 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3300 // operations into additions.
3301 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3302 SelectionDAG &DAG) const {
3303 auto *Node = cast<AtomicSDNode>(Op.getNode());
3304 EVT MemVT = Node->getMemoryVT();
3305 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3306 // A full-width operation.
3307 assert(Op.getValueType() == MemVT && "Mismatched VTs");
3308 SDValue Src2 = Node->getVal();
3312 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3313 // Use an addition if the operand is constant and either LAA(G) is
3314 // available or the negative value is in the range of A(G)FHI.
3315 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3316 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3317 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3318 } else if (Subtarget.hasInterlockedAccess1())
3319 // Use LAA(G) if available.
3320 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3323 if (NegSrc2.getNode())
3324 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3325 Node->getChain(), Node->getBasePtr(), NegSrc2,
3326 Node->getMemOperand());
3328 // Use the node as-is.
3332 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3335 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
3336 // into a fullword ATOMIC_CMP_SWAPW operation.
3337 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3338 SelectionDAG &DAG) const {
3339 auto *Node = cast<AtomicSDNode>(Op.getNode());
3341 // We have native support for 32-bit compare and swap.
3342 EVT NarrowVT = Node->getMemoryVT();
3343 EVT WideVT = MVT::i32;
3344 if (NarrowVT == WideVT)
3347 int64_t BitSize = NarrowVT.getSizeInBits();
3348 SDValue ChainIn = Node->getOperand(0);
3349 SDValue Addr = Node->getOperand(1);
3350 SDValue CmpVal = Node->getOperand(2);
3351 SDValue SwapVal = Node->getOperand(3);
3352 MachineMemOperand *MMO = Node->getMemOperand();
3354 EVT PtrVT = Addr.getValueType();
3356 // Get the address of the containing word.
3357 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3358 DAG.getConstant(-4, DL, PtrVT));
3360 // Get the number of bits that the word must be rotated left in order
3361 // to bring the field to the top bits of a GR32.
3362 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3363 DAG.getConstant(3, DL, PtrVT));
3364 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3366 // Get the complementing shift amount, for rotating a field in the top
3367 // bits back to its proper position.
3368 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3369 DAG.getConstant(0, DL, WideVT), BitShift);
3371 // Construct the ATOMIC_CMP_SWAPW node.
3372 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3373 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3374 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
3375 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
3376 VTList, Ops, NarrowVT, MMO);
3380 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
3381 SelectionDAG &DAG) const {
3382 MachineFunction &MF = DAG.getMachineFunction();
3383 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3384 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
3385 SystemZ::R15D, Op.getValueType());
3388 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
3389 SelectionDAG &DAG) const {
3390 MachineFunction &MF = DAG.getMachineFunction();
3391 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3392 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
3394 SDValue Chain = Op.getOperand(0);
3395 SDValue NewSP = Op.getOperand(1);
3399 if (StoreBackchain) {
3400 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
3401 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3404 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3407 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3412 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
3413 SelectionDAG &DAG) const {
3414 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3416 // Just preserve the chain.
3417 return Op.getOperand(0);
3420 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3421 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
3422 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
3425 DAG.getConstant(Code, DL, MVT::i32),
3428 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
3429 Node->getVTList(), Ops,
3430 Node->getMemoryVT(), Node->getMemOperand());
3433 // Return an i32 that contains the value of CC immediately after After,
3434 // whose final operand must be MVT::Glue.
3435 static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) {
3437 SDValue Glue = SDValue(After, After->getNumValues() - 1);
3438 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
3439 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
3440 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
3444 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
3445 SelectionDAG &DAG) const {
3446 unsigned Opcode, CCValid;
3447 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
3448 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
3449 SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode);
3450 SDValue CC = getCCResult(DAG, Glued.getNode());
3451 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
3459 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
3460 SelectionDAG &DAG) const {
3461 unsigned Opcode, CCValid;
3462 if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
3463 SDValue Glued = emitIntrinsicWithGlue(DAG, Op, Opcode);
3464 SDValue CC = getCCResult(DAG, Glued.getNode());
3465 if (Op->getNumValues() == 1)
3467 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
3468 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), Glued,
3472 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3474 case Intrinsic::thread_pointer:
3475 return lowerThreadPointer(SDLoc(Op), DAG);
3477 case Intrinsic::s390_vpdi:
3478 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
3479 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3481 case Intrinsic::s390_vperm:
3482 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
3483 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3485 case Intrinsic::s390_vuphb:
3486 case Intrinsic::s390_vuphh:
3487 case Intrinsic::s390_vuphf:
3488 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
3491 case Intrinsic::s390_vuplhb:
3492 case Intrinsic::s390_vuplhh:
3493 case Intrinsic::s390_vuplhf:
3494 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
3497 case Intrinsic::s390_vuplb:
3498 case Intrinsic::s390_vuplhw:
3499 case Intrinsic::s390_vuplf:
3500 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
3503 case Intrinsic::s390_vupllb:
3504 case Intrinsic::s390_vupllh:
3505 case Intrinsic::s390_vupllf:
3506 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
3509 case Intrinsic::s390_vsumb:
3510 case Intrinsic::s390_vsumh:
3511 case Intrinsic::s390_vsumgh:
3512 case Intrinsic::s390_vsumgf:
3513 case Intrinsic::s390_vsumqf:
3514 case Intrinsic::s390_vsumqg:
3515 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
3516 Op.getOperand(1), Op.getOperand(2));
3523 // Says that SystemZISD operation Opcode can be used to perform the equivalent
3524 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
3525 // Operand is the constant third operand, otherwise it is the number of
3526 // bytes in each element of the result.
3530 unsigned char Bytes[SystemZ::VectorBytes];
3534 static const Permute PermuteForms[] = {
3536 { SystemZISD::MERGE_HIGH, 8,
3537 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3539 { SystemZISD::MERGE_HIGH, 4,
3540 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
3542 { SystemZISD::MERGE_HIGH, 2,
3543 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
3545 { SystemZISD::MERGE_HIGH, 1,
3546 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
3548 { SystemZISD::MERGE_LOW, 8,
3549 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
3551 { SystemZISD::MERGE_LOW, 4,
3552 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
3554 { SystemZISD::MERGE_LOW, 2,
3555 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
3557 { SystemZISD::MERGE_LOW, 1,
3558 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
3560 { SystemZISD::PACK, 4,
3561 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
3563 { SystemZISD::PACK, 2,
3564 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
3566 { SystemZISD::PACK, 1,
3567 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
3568 // VPDI V1, V2, 4 (low half of V1, high half of V2)
3569 { SystemZISD::PERMUTE_DWORDS, 4,
3570 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
3571 // VPDI V1, V2, 1 (high half of V1, low half of V2)
3572 { SystemZISD::PERMUTE_DWORDS, 1,
3573 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
3576 // Called after matching a vector shuffle against a particular pattern.
3577 // Both the original shuffle and the pattern have two vector operands.
3578 // OpNos[0] is the operand of the original shuffle that should be used for
3579 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
3580 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
3581 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
3582 // for operands 0 and 1 of the pattern.
3583 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
3587 OpNo0 = OpNo1 = OpNos[1];
3588 } else if (OpNos[1] < 0) {
3589 OpNo0 = OpNo1 = OpNos[0];
3597 // Bytes is a VPERM-like permute vector, except that -1 is used for
3598 // undefined bytes. Return true if the VPERM can be implemented using P.
3599 // When returning true set OpNo0 to the VPERM operand that should be
3600 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
3602 // For example, if swapping the VPERM operands allows P to match, OpNo0
3603 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
3604 // operand, but rewriting it to use two duplicated operands allows it to
3605 // match P, then OpNo0 and OpNo1 will be the same.
3606 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
3607 unsigned &OpNo0, unsigned &OpNo1) {
3608 int OpNos[] = { -1, -1 };
3609 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
3612 // Make sure that the two permute vectors use the same suboperand
3613 // byte number. Only the operand numbers (the high bits) are
3614 // allowed to differ.
3615 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
3617 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
3618 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
3619 // Make sure that the operand mappings are consistent with previous
3621 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3623 OpNos[ModelOpNo] = RealOpNo;
3626 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
3629 // As above, but search for a matching permute.
3630 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
3631 unsigned &OpNo0, unsigned &OpNo1) {
3632 for (auto &P : PermuteForms)
3633 if (matchPermute(Bytes, P, OpNo0, OpNo1))
3638 // Bytes is a VPERM-like permute vector, except that -1 is used for
3639 // undefined bytes. This permute is an operand of an outer permute.
3640 // See whether redistributing the -1 bytes gives a shuffle that can be
3641 // implemented using P. If so, set Transform to a VPERM-like permute vector
3642 // that, when applied to the result of P, gives the original permute in Bytes.
3643 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
3645 SmallVectorImpl<int> &Transform) {
3647 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
3648 int Elt = Bytes[From];
3650 // Byte number From of the result is undefined.
3651 Transform[From] = -1;
3653 while (P.Bytes[To] != Elt) {
3655 if (To == SystemZ::VectorBytes)
3658 Transform[From] = To;
3664 // As above, but search for a matching permute.
3665 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
3666 SmallVectorImpl<int> &Transform) {
3667 for (auto &P : PermuteForms)
3668 if (matchDoublePermute(Bytes, P, Transform))
3673 // Convert the mask of the given VECTOR_SHUFFLE into a byte-level mask,
3674 // as if it had type vNi8.
3675 static void getVPermMask(ShuffleVectorSDNode *VSN,
3676 SmallVectorImpl<int> &Bytes) {
3677 EVT VT = VSN->getValueType(0);
3678 unsigned NumElements = VT.getVectorNumElements();
3679 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3680 Bytes.resize(NumElements * BytesPerElement, -1);
3681 for (unsigned I = 0; I < NumElements; ++I) {
3682 int Index = VSN->getMaskElt(I);
3684 for (unsigned J = 0; J < BytesPerElement; ++J)
3685 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
3689 // Bytes is a VPERM-like permute vector, except that -1 is used for
3690 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
3691 // the result come from a contiguous sequence of bytes from one input.
3692 // Set Base to the selector for the first byte if so.
3693 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
3694 unsigned BytesPerElement, int &Base) {
3696 for (unsigned I = 0; I < BytesPerElement; ++I) {
3697 if (Bytes[Start + I] >= 0) {
3698 unsigned Elem = Bytes[Start + I];
3701 // Make sure the bytes would come from one input operand.
3702 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
3704 } else if (unsigned(Base) != Elem - I)
3711 // Bytes is a VPERM-like permute vector, except that -1 is used for
3712 // undefined bytes. Return true if it can be performed using VSLDI.
3713 // When returning true, set StartIndex to the shift amount and OpNo0
3714 // and OpNo1 to the VPERM operands that should be used as the first
3715 // and second shift operand respectively.
3716 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
3717 unsigned &StartIndex, unsigned &OpNo0,
3719 int OpNos[] = { -1, -1 };
3721 for (unsigned I = 0; I < 16; ++I) {
3722 int Index = Bytes[I];
3724 int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
3725 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
3726 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
3728 Shift = ExpectedShift;
3729 else if (Shift != ExpectedShift)
3731 // Make sure that the operand mappings are consistent with previous
3733 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3735 OpNos[ModelOpNo] = RealOpNo;
3739 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
3742 // Create a node that performs P on operands Op0 and Op1, casting the
3743 // operands to the appropriate type. The type of the result is determined by P.
3744 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
3745 const Permute &P, SDValue Op0, SDValue Op1) {
3746 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
3747 // elements of a PACK are twice as wide as the outputs.
3748 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
3749 P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
3751 // Cast both operands to the appropriate type.
3752 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
3753 SystemZ::VectorBytes / InBytes);
3754 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
3755 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
3757 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
3758 SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32);
3759 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
3760 } else if (P.Opcode == SystemZISD::PACK) {
3761 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
3762 SystemZ::VectorBytes / P.Operand);
3763 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
3765 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
3770 // Bytes is a VPERM-like permute vector, except that -1 is used for
3771 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
3773 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
3775 const SmallVectorImpl<int> &Bytes) {
3776 for (unsigned I = 0; I < 2; ++I)
3777 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
3779 // First see whether VSLDI can be used.
3780 unsigned StartIndex, OpNo0, OpNo1;
3781 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
3782 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
3783 Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32));
3785 // Fall back on VPERM. Construct an SDNode for the permute vector.
3786 SDValue IndexNodes[SystemZ::VectorBytes];
3787 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
3789 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
3791 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
3792 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
3793 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
3797 // Describes a general N-operand vector shuffle.
3798 struct GeneralShuffle {
3799 GeneralShuffle(EVT vt) : VT(vt) {}
3801 bool add(SDValue, unsigned);
3802 SDValue getNode(SelectionDAG &, const SDLoc &);
3804 // The operands of the shuffle.
3805 SmallVector<SDValue, SystemZ::VectorBytes> Ops;
3807 // Index I is -1 if byte I of the result is undefined. Otherwise the
3808 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
3809 // Bytes[I] / SystemZ::VectorBytes.
3810 SmallVector<int, SystemZ::VectorBytes> Bytes;
3812 // The type of the shuffle result.
3817 // Add an extra undefined element to the shuffle.
3818 void GeneralShuffle::addUndef() {
3819 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3820 for (unsigned I = 0; I < BytesPerElement; ++I)
3821 Bytes.push_back(-1);
3824 // Add an extra element to the shuffle, taking it from element Elem of Op.
3825 // A null Op indicates a vector input whose value will be calculated later;
3826 // there is at most one such input per shuffle and it always has the same
3827 // type as the result. Aborts and returns false if the source vector elements
3828 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
3829 // LLVM they become implicitly extended, but this is rare and not optimized.
3830 bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
3831 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3833 // The source vector can have wider elements than the result,
3834 // either through an explicit TRUNCATE or because of type legalization.
3835 // We want the least significant part.
3836 EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
3837 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
3839 // Return false if the source elements are smaller than their destination
3841 if (FromBytesPerElement < BytesPerElement)
3844 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
3845 (FromBytesPerElement - BytesPerElement));
3847 // Look through things like shuffles and bitcasts.
3848 while (Op.getNode()) {
3849 if (Op.getOpcode() == ISD::BITCAST)
3850 Op = Op.getOperand(0);
3851 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
3852 // See whether the bytes we need come from a contiguous part of one
3854 SmallVector<int, SystemZ::VectorBytes> OpBytes;
3855 getVPermMask(cast<ShuffleVectorSDNode>(Op), OpBytes);
3857 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
3863 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
3864 Byte = unsigned(NewByte) % SystemZ::VectorBytes;
3865 } else if (Op.isUndef()) {
3872 // Make sure that the source of the extraction is in Ops.
3874 for (; OpNo < Ops.size(); ++OpNo)
3875 if (Ops[OpNo] == Op)
3877 if (OpNo == Ops.size())
3880 // Add the element to Bytes.
3881 unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
3882 for (unsigned I = 0; I < BytesPerElement; ++I)
3883 Bytes.push_back(Base + I);
3888 // Return SDNodes for the completed shuffle.
3889 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
3890 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
3892 if (Ops.size() == 0)
3893 return DAG.getUNDEF(VT);
3895 // Make sure that there are at least two shuffle operands.
3896 if (Ops.size() == 1)
3897 Ops.push_back(DAG.getUNDEF(MVT::v16i8));
3899 // Create a tree of shuffles, deferring root node until after the loop.
3900 // Try to redistribute the undefined elements of non-root nodes so that
3901 // the non-root shuffles match something like a pack or merge, then adjust
3902 // the parent node's permute vector to compensate for the new order.
3903 // Among other things, this copes with vectors like <2 x i16> that were
3904 // padded with undefined elements during type legalization.
3906 // In the best case this redistribution will lead to the whole tree
3907 // using packs and merges. It should rarely be a loss in other cases.
3908 unsigned Stride = 1;
3909 for (; Stride * 2 < Ops.size(); Stride *= 2) {
3910 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
3911 SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
3913 // Create a mask for just these two operands.
3914 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
3915 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
3916 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
3917 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
3920 else if (OpNo == I + Stride)
3921 NewBytes[J] = SystemZ::VectorBytes + Byte;
3925 // See if it would be better to reorganize NewMask to avoid using VPERM.
3926 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
3927 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
3928 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
3929 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
3930 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
3931 if (NewBytes[J] >= 0) {
3932 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
3933 "Invalid double permute");
3934 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
3936 assert(NewBytesMap[J] < 0 && "Invalid double permute");
3939 // Just use NewBytes on the operands.
3940 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
3941 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
3942 if (NewBytes[J] >= 0)
3943 Bytes[J] = I * SystemZ::VectorBytes + J;
3948 // Now we just have 2 inputs. Put the second operand in Ops[1].
3950 Ops[1] = Ops[Stride];
3951 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
3952 if (Bytes[I] >= int(SystemZ::VectorBytes))
3953 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
3956 // Look for an instruction that can do the permute without resorting
3958 unsigned OpNo0, OpNo1;
3960 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
3961 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
3963 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
3964 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
3967 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
3968 static bool isScalarToVector(SDValue Op) {
3969 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
3970 if (!Op.getOperand(I).isUndef())
3975 // Return a vector of type VT that contains Value in the first element.
3976 // The other elements don't matter.
3977 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
3979 // If we have a constant, replicate it to all elements and let the
3980 // BUILD_VECTOR lowering take care of it.
3981 if (Value.getOpcode() == ISD::Constant ||
3982 Value.getOpcode() == ISD::ConstantFP) {
3983 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
3984 return DAG.getBuildVector(VT, DL, Ops);
3986 if (Value.isUndef())
3987 return DAG.getUNDEF(VT);
3988 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
3991 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
3992 // element 1. Used for cases in which replication is cheap.
3993 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
3994 SDValue Op0, SDValue Op1) {
3995 if (Op0.isUndef()) {
3997 return DAG.getUNDEF(VT);
3998 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
4001 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
4002 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
4003 buildScalarToVector(DAG, DL, VT, Op0),
4004 buildScalarToVector(DAG, DL, VT, Op1));
4007 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4009 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4011 if (Op0.isUndef() && Op1.isUndef())
4012 return DAG.getUNDEF(MVT::v2i64);
4013 // If one of the two inputs is undefined then replicate the other one,
4014 // in order to avoid using another register unnecessarily.
4016 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4017 else if (Op1.isUndef())
4018 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4020 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4021 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4023 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4026 // Try to represent constant BUILD_VECTOR node BVN using a
4027 // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask
4029 static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
4030 EVT ElemVT = BVN->getValueType(0).getVectorElementType();
4031 unsigned BytesPerElement = ElemVT.getStoreSize();
4032 for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) {
4033 SDValue Op = BVN->getOperand(I);
4034 if (!Op.isUndef()) {
4036 if (Op.getOpcode() == ISD::Constant)
4037 Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue();
4038 else if (Op.getOpcode() == ISD::ConstantFP)
4039 Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
4043 for (unsigned J = 0; J < BytesPerElement; ++J) {
4044 uint64_t Byte = (Value >> (J * 8)) & 0xff;
4046 Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J);
4055 // Try to load a vector constant in which BitsPerElement-bit value Value
4056 // is replicated to fill the vector. VT is the type of the resulting
4057 // constant, which may have elements of a different size from BitsPerElement.
4058 // Return the SDValue of the constant on success, otherwise return
4060 static SDValue tryBuildVectorReplicate(SelectionDAG &DAG,
4061 const SystemZInstrInfo *TII,
4062 const SDLoc &DL, EVT VT, uint64_t Value,
4063 unsigned BitsPerElement) {
4064 // Signed 16-bit values can be replicated using VREPI.
4065 int64_t SignedValue = SignExtend64(Value, BitsPerElement);
4066 if (isInt<16>(SignedValue)) {
4067 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
4068 SystemZ::VectorBits / BitsPerElement);
4069 SDValue Op = DAG.getNode(SystemZISD::REPLICATE, DL, VecVT,
4070 DAG.getConstant(SignedValue, DL, MVT::i32));
4071 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4073 // See whether rotating the constant left some N places gives a value that
4074 // is one less than a power of 2 (i.e. all zeros followed by all ones).
4075 // If so we can use VGM.
4076 unsigned Start, End;
4077 if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) {
4078 // isRxSBGMask returns the bit numbers for a full 64-bit value,
4079 // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to
4080 // bit numbers for an BitsPerElement value, so that 0 denotes
4081 // 1 << (BitsPerElement-1).
4082 Start -= 64 - BitsPerElement;
4083 End -= 64 - BitsPerElement;
4084 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
4085 SystemZ::VectorBits / BitsPerElement);
4086 SDValue Op = DAG.getNode(SystemZISD::ROTATE_MASK, DL, VecVT,
4087 DAG.getConstant(Start, DL, MVT::i32),
4088 DAG.getConstant(End, DL, MVT::i32));
4089 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4094 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4095 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4096 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4097 // would benefit from this representation and return it if so.
4098 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
4099 BuildVectorSDNode *BVN) {
4100 EVT VT = BVN->getValueType(0);
4101 unsigned NumElements = VT.getVectorNumElements();
4103 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4104 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4105 // need a BUILD_VECTOR, add an additional placeholder operand for that
4106 // BUILD_VECTOR and store its operands in ResidueOps.
4107 GeneralShuffle GS(VT);
4108 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
4109 bool FoundOne = false;
4110 for (unsigned I = 0; I < NumElements; ++I) {
4111 SDValue Op = BVN->getOperand(I);
4112 if (Op.getOpcode() == ISD::TRUNCATE)
4113 Op = Op.getOperand(0);
4114 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4115 Op.getOperand(1).getOpcode() == ISD::Constant) {
4116 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4117 if (!GS.add(Op.getOperand(0), Elem))
4120 } else if (Op.isUndef()) {
4123 if (!GS.add(SDValue(), ResidueOps.size()))
4125 ResidueOps.push_back(BVN->getOperand(I));
4129 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4133 // Create the BUILD_VECTOR for the remaining elements, if any.
4134 if (!ResidueOps.empty()) {
4135 while (ResidueOps.size() < NumElements)
4136 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4137 for (auto &Op : GS.Ops) {
4138 if (!Op.getNode()) {
4139 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4144 return GS.getNode(DAG, SDLoc(BVN));
4147 // Combine GPR scalar values Elems into a vector of type VT.
4148 static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4149 SmallVectorImpl<SDValue> &Elems) {
4150 // See whether there is a single replicated value.
4152 unsigned int NumElements = Elems.size();
4153 unsigned int Count = 0;
4154 for (auto Elem : Elems) {
4155 if (!Elem.isUndef()) {
4156 if (!Single.getNode())
4158 else if (Elem != Single) {
4165 // There are three cases here:
4167 // - if the only defined element is a loaded one, the best sequence
4168 // is a replicating load.
4170 // - otherwise, if the only defined element is an i64 value, we will
4171 // end up with the same VLVGP sequence regardless of whether we short-cut
4172 // for replication or fall through to the later code.
4174 // - otherwise, if the only defined element is an i32 or smaller value,
4175 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4176 // This is only a win if the single defined element is used more than once.
4177 // In other cases we're better off using a single VLVGx.
4178 if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD))
4179 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
4181 // If all elements are loads, use VLREP/VLEs (below).
4182 bool AllLoads = true;
4183 for (auto Elem : Elems)
4184 if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) {
4189 // The best way of building a v2i64 from two i64s is to use VLVGP.
4190 if (VT == MVT::v2i64 && !AllLoads)
4191 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4193 // Use a 64-bit merge high to combine two doubles.
4194 if (VT == MVT::v2f64 && !AllLoads)
4195 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4197 // Build v4f32 values directly from the FPRs:
4199 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4204 if (VT == MVT::v4f32 && !AllLoads) {
4205 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4206 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
4207 // Avoid unnecessary undefs by reusing the other operand.
4210 else if (Op23.isUndef())
4212 // Merging identical replications is a no-op.
4213 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
4215 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
4216 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
4217 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
4218 DL, MVT::v2i64, Op01, Op23);
4219 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4222 // Collect the constant terms.
4223 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
4224 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
4226 unsigned NumConstants = 0;
4227 for (unsigned I = 0; I < NumElements; ++I) {
4228 SDValue Elem = Elems[I];
4229 if (Elem.getOpcode() == ISD::Constant ||
4230 Elem.getOpcode() == ISD::ConstantFP) {
4232 Constants[I] = Elem;
4236 // If there was at least one constant, fill in the other elements of
4237 // Constants with undefs to get a full vector constant and use that
4238 // as the starting point.
4240 if (NumConstants > 0) {
4241 for (unsigned I = 0; I < NumElements; ++I)
4242 if (!Constants[I].getNode())
4243 Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
4244 Result = DAG.getBuildVector(VT, DL, Constants);
4246 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
4247 // avoid a false dependency on any previous contents of the vector
4250 // Use a VLREP if at least one element is a load.
4251 unsigned LoadElIdx = UINT_MAX;
4252 for (unsigned I = 0; I < NumElements; ++I)
4253 if (Elems[I].getOpcode() == ISD::LOAD &&
4254 cast<LoadSDNode>(Elems[I])->isUnindexed()) {
4258 if (LoadElIdx != UINT_MAX) {
4259 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, Elems[LoadElIdx]);
4260 Done[LoadElIdx] = true;
4262 // Try to use VLVGP.
4263 unsigned I1 = NumElements / 2 - 1;
4264 unsigned I2 = NumElements - 1;
4265 bool Def1 = !Elems[I1].isUndef();
4266 bool Def2 = !Elems[I2].isUndef();
4268 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4269 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4270 Result = DAG.getNode(ISD::BITCAST, DL, VT,
4271 joinDwords(DAG, DL, Elem1, Elem2));
4275 Result = DAG.getUNDEF(VT);
4279 // Use VLVGx to insert the other elements.
4280 for (unsigned I = 0; I < NumElements; ++I)
4281 if (!Done[I] && !Elems[I].isUndef())
4282 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
4283 DAG.getConstant(I, DL, MVT::i32));
4287 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
4288 SelectionDAG &DAG) const {
4289 const SystemZInstrInfo *TII =
4290 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
4291 auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
4293 EVT VT = Op.getValueType();
4295 if (BVN->isConstant()) {
4296 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
4297 // preferred way of creating all-zero and all-one vectors so give it
4298 // priority over other methods below.
4300 if (tryBuildVectorByteMask(BVN, Mask)) {
4301 SDValue Op = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
4302 DAG.getConstant(Mask, DL, MVT::i32));
4303 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4306 // Try using some form of replication.
4307 APInt SplatBits, SplatUndef;
4308 unsigned SplatBitSize;
4310 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4312 SplatBitSize <= 64) {
4313 // First try assuming that any undefined bits above the highest set bit
4314 // and below the lowest set bit are 1s. This increases the likelihood of
4315 // being able to use a sign-extended element value in VECTOR REPLICATE
4316 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
4317 uint64_t SplatBitsZ = SplatBits.getZExtValue();
4318 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
4319 uint64_t Lower = (SplatUndefZ
4320 & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
4321 uint64_t Upper = (SplatUndefZ
4322 & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
4323 uint64_t Value = SplatBitsZ | Upper | Lower;
4324 SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value,
4329 // Now try assuming that any undefined bits between the first and
4330 // last defined set bits are set. This increases the chances of
4331 // using a non-wraparound mask.
4332 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
4333 Value = SplatBitsZ | Middle;
4334 Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize);
4339 // Fall back to loading it from memory.
4343 // See if we should use shuffles to construct the vector from other vectors.
4344 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
4347 // Detect SCALAR_TO_VECTOR conversions.
4348 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
4349 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
4351 // Otherwise use buildVector to build the vector up from GPRs.
4352 unsigned NumElements = Op.getNumOperands();
4353 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
4354 for (unsigned I = 0; I < NumElements; ++I)
4355 Ops[I] = Op.getOperand(I);
4356 return buildVector(DAG, DL, VT, Ops);
4359 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4360 SelectionDAG &DAG) const {
4361 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
4363 EVT VT = Op.getValueType();
4364 unsigned NumElements = VT.getVectorNumElements();
4366 if (VSN->isSplat()) {
4367 SDValue Op0 = Op.getOperand(0);
4368 unsigned Index = VSN->getSplatIndex();
4369 assert(Index < VT.getVectorNumElements() &&
4370 "Splat index should be defined and in first operand");
4371 // See whether the value we're splatting is directly available as a scalar.
4372 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4373 Op0.getOpcode() == ISD::BUILD_VECTOR)
4374 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
4375 // Otherwise keep it as a vector-to-vector operation.
4376 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
4377 DAG.getConstant(Index, DL, MVT::i32));
4380 GeneralShuffle GS(VT);
4381 for (unsigned I = 0; I < NumElements; ++I) {
4382 int Elt = VSN->getMaskElt(I);
4385 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
4386 unsigned(Elt) % NumElements))
4389 return GS.getNode(DAG, SDLoc(VSN));
4392 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
4393 SelectionDAG &DAG) const {
4395 // Just insert the scalar into element 0 of an undefined vector.
4396 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
4397 Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
4398 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
4401 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4402 SelectionDAG &DAG) const {
4403 // Handle insertions of floating-point values.
4405 SDValue Op0 = Op.getOperand(0);
4406 SDValue Op1 = Op.getOperand(1);
4407 SDValue Op2 = Op.getOperand(2);
4408 EVT VT = Op.getValueType();
4410 // Insertions into constant indices of a v2f64 can be done using VPDI.
4411 // However, if the inserted value is a bitcast or a constant then it's
4412 // better to use GPRs, as below.
4413 if (VT == MVT::v2f64 &&
4414 Op1.getOpcode() != ISD::BITCAST &&
4415 Op1.getOpcode() != ISD::ConstantFP &&
4416 Op2.getOpcode() == ISD::Constant) {
4417 uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue();
4418 unsigned Mask = VT.getVectorNumElements() - 1;
4423 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4424 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
4425 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
4426 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
4427 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
4428 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
4429 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4433 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4434 SelectionDAG &DAG) const {
4435 // Handle extractions of floating-point values.
4437 SDValue Op0 = Op.getOperand(0);
4438 SDValue Op1 = Op.getOperand(1);
4439 EVT VT = Op.getValueType();
4440 EVT VecVT = Op0.getValueType();
4442 // Extractions of constant indices can be done directly.
4443 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
4444 uint64_t Index = CIndexN->getZExtValue();
4445 unsigned Mask = VecVT.getVectorNumElements() - 1;
4450 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
4451 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
4452 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
4453 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
4454 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
4455 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4459 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
4460 unsigned UnpackHigh) const {
4461 SDValue PackedOp = Op.getOperand(0);
4462 EVT OutVT = Op.getValueType();
4463 EVT InVT = PackedOp.getValueType();
4464 unsigned ToBits = OutVT.getScalarSizeInBits();
4465 unsigned FromBits = InVT.getScalarSizeInBits();
4468 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
4469 SystemZ::VectorBits / FromBits);
4470 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
4471 } while (FromBits != ToBits);
4475 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
4476 unsigned ByScalar) const {
4477 // Look for cases where a vector shift can use the *_BY_SCALAR form.
4478 SDValue Op0 = Op.getOperand(0);
4479 SDValue Op1 = Op.getOperand(1);
4481 EVT VT = Op.getValueType();
4482 unsigned ElemBitSize = VT.getScalarSizeInBits();
4484 // See whether the shift vector is a splat represented as BUILD_VECTOR.
4485 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
4486 APInt SplatBits, SplatUndef;
4487 unsigned SplatBitSize;
4489 // Check for constant splats. Use ElemBitSize as the minimum element
4490 // width and reject splats that need wider elements.
4491 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4492 ElemBitSize, true) &&
4493 SplatBitSize == ElemBitSize) {
4494 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
4496 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4498 // Check for variable splats.
4499 BitVector UndefElements;
4500 SDValue Splat = BVN->getSplatValue(&UndefElements);
4502 // Since i32 is the smallest legal type, we either need a no-op
4504 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
4505 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4509 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
4510 // and the shift amount is directly available in a GPR.
4511 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
4512 if (VSN->isSplat()) {
4513 SDValue VSNOp0 = VSN->getOperand(0);
4514 unsigned Index = VSN->getSplatIndex();
4515 assert(Index < VT.getVectorNumElements() &&
4516 "Splat index should be defined and in first operand");
4517 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4518 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
4519 // Since i32 is the smallest legal type, we either need a no-op
4521 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
4522 VSNOp0.getOperand(Index));
4523 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4528 // Otherwise just treat the current form as legal.
4532 SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
4533 SelectionDAG &DAG) const {
4534 switch (Op.getOpcode()) {
4535 case ISD::FRAMEADDR:
4536 return lowerFRAMEADDR(Op, DAG);
4537 case ISD::RETURNADDR:
4538 return lowerRETURNADDR(Op, DAG);
4540 return lowerBR_CC(Op, DAG);
4541 case ISD::SELECT_CC:
4542 return lowerSELECT_CC(Op, DAG);
4544 return lowerSETCC(Op, DAG);
4545 case ISD::GlobalAddress:
4546 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
4547 case ISD::GlobalTLSAddress:
4548 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
4549 case ISD::BlockAddress:
4550 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
4551 case ISD::JumpTable:
4552 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
4553 case ISD::ConstantPool:
4554 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
4556 return lowerBITCAST(Op, DAG);
4558 return lowerVASTART(Op, DAG);
4560 return lowerVACOPY(Op, DAG);
4561 case ISD::DYNAMIC_STACKALLOC:
4562 return lowerDYNAMIC_STACKALLOC(Op, DAG);
4563 case ISD::GET_DYNAMIC_AREA_OFFSET:
4564 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
4565 case ISD::SMUL_LOHI:
4566 return lowerSMUL_LOHI(Op, DAG);
4567 case ISD::UMUL_LOHI:
4568 return lowerUMUL_LOHI(Op, DAG);
4570 return lowerSDIVREM(Op, DAG);
4572 return lowerUDIVREM(Op, DAG);
4574 return lowerOR(Op, DAG);
4576 return lowerCTPOP(Op, DAG);
4577 case ISD::ATOMIC_FENCE:
4578 return lowerATOMIC_FENCE(Op, DAG);
4579 case ISD::ATOMIC_SWAP:
4580 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
4581 case ISD::ATOMIC_STORE:
4582 return lowerATOMIC_STORE(Op, DAG);
4583 case ISD::ATOMIC_LOAD:
4584 return lowerATOMIC_LOAD(Op, DAG);
4585 case ISD::ATOMIC_LOAD_ADD:
4586 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
4587 case ISD::ATOMIC_LOAD_SUB:
4588 return lowerATOMIC_LOAD_SUB(Op, DAG);
4589 case ISD::ATOMIC_LOAD_AND:
4590 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
4591 case ISD::ATOMIC_LOAD_OR:
4592 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
4593 case ISD::ATOMIC_LOAD_XOR:
4594 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
4595 case ISD::ATOMIC_LOAD_NAND:
4596 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
4597 case ISD::ATOMIC_LOAD_MIN:
4598 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
4599 case ISD::ATOMIC_LOAD_MAX:
4600 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
4601 case ISD::ATOMIC_LOAD_UMIN:
4602 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
4603 case ISD::ATOMIC_LOAD_UMAX:
4604 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
4605 case ISD::ATOMIC_CMP_SWAP:
4606 return lowerATOMIC_CMP_SWAP(Op, DAG);
4607 case ISD::STACKSAVE:
4608 return lowerSTACKSAVE(Op, DAG);
4609 case ISD::STACKRESTORE:
4610 return lowerSTACKRESTORE(Op, DAG);
4612 return lowerPREFETCH(Op, DAG);
4613 case ISD::INTRINSIC_W_CHAIN:
4614 return lowerINTRINSIC_W_CHAIN(Op, DAG);
4615 case ISD::INTRINSIC_WO_CHAIN:
4616 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
4617 case ISD::BUILD_VECTOR:
4618 return lowerBUILD_VECTOR(Op, DAG);
4619 case ISD::VECTOR_SHUFFLE:
4620 return lowerVECTOR_SHUFFLE(Op, DAG);
4621 case ISD::SCALAR_TO_VECTOR:
4622 return lowerSCALAR_TO_VECTOR(Op, DAG);
4623 case ISD::INSERT_VECTOR_ELT:
4624 return lowerINSERT_VECTOR_ELT(Op, DAG);
4625 case ISD::EXTRACT_VECTOR_ELT:
4626 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4627 case ISD::SIGN_EXTEND_VECTOR_INREG:
4628 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
4629 case ISD::ZERO_EXTEND_VECTOR_INREG:
4630 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
4632 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
4634 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
4636 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
4638 llvm_unreachable("Unexpected node to lower");
4642 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
4643 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
4644 switch ((SystemZISD::NodeType)Opcode) {
4645 case SystemZISD::FIRST_NUMBER: break;
4651 OPCODE(PCREL_WRAPPER);
4652 OPCODE(PCREL_OFFSET);
4658 OPCODE(SELECT_CCMASK);
4659 OPCODE(ADJDYNALLOC);
4676 OPCODE(SEARCH_STRING);
4680 OPCODE(TBEGIN_NOFLOAT);
4683 OPCODE(ROTATE_MASK);
4685 OPCODE(JOIN_DWORDS);
4690 OPCODE(PERMUTE_DWORDS);
4695 OPCODE(UNPACK_HIGH);
4696 OPCODE(UNPACKL_HIGH);
4698 OPCODE(UNPACKL_LOW);
4699 OPCODE(VSHL_BY_SCALAR);
4700 OPCODE(VSRL_BY_SCALAR);
4701 OPCODE(VSRA_BY_SCALAR);
4729 OPCODE(ATOMIC_SWAPW);
4730 OPCODE(ATOMIC_LOADW_ADD);
4731 OPCODE(ATOMIC_LOADW_SUB);
4732 OPCODE(ATOMIC_LOADW_AND);
4733 OPCODE(ATOMIC_LOADW_OR);
4734 OPCODE(ATOMIC_LOADW_XOR);
4735 OPCODE(ATOMIC_LOADW_NAND);
4736 OPCODE(ATOMIC_LOADW_MIN);
4737 OPCODE(ATOMIC_LOADW_MAX);
4738 OPCODE(ATOMIC_LOADW_UMIN);
4739 OPCODE(ATOMIC_LOADW_UMAX);
4740 OPCODE(ATOMIC_CMP_SWAPW);
4749 // Return true if VT is a vector whose elements are a whole number of bytes
4750 // in width. Also check for presence of vector support.
4751 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
4752 if (!Subtarget.hasVector())
4755 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
4758 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
4759 // producing a result of type ResVT. Op is a possibly bitcast version
4760 // of the input vector and Index is the index (based on type VecVT) that
4761 // should be extracted. Return the new extraction if a simplification
4762 // was possible or if Force is true.
4763 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
4764 EVT VecVT, SDValue Op,
4766 DAGCombinerInfo &DCI,
4768 SelectionDAG &DAG = DCI.DAG;
4770 // The number of bytes being extracted.
4771 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
4774 unsigned Opcode = Op.getOpcode();
4775 if (Opcode == ISD::BITCAST)
4776 // Look through bitcasts.
4777 Op = Op.getOperand(0);
4778 else if (Opcode == ISD::VECTOR_SHUFFLE &&
4779 canTreatAsByteVector(Op.getValueType())) {
4780 // Get a VPERM-like permute mask and see whether the bytes covered
4781 // by the extracted element are a contiguous sequence from one
4783 SmallVector<int, SystemZ::VectorBytes> Bytes;
4784 getVPermMask(cast<ShuffleVectorSDNode>(Op), Bytes);
4786 if (!getShuffleInput(Bytes, Index * BytesPerElement,
4787 BytesPerElement, First))
4790 return DAG.getUNDEF(ResVT);
4791 // Make sure the contiguous sequence starts at a multiple of the
4792 // original element size.
4793 unsigned Byte = unsigned(First) % Bytes.size();
4794 if (Byte % BytesPerElement != 0)
4796 // We can get the extracted value directly from an input.
4797 Index = Byte / BytesPerElement;
4798 Op = Op.getOperand(unsigned(First) / Bytes.size());
4800 } else if (Opcode == ISD::BUILD_VECTOR &&
4801 canTreatAsByteVector(Op.getValueType())) {
4802 // We can only optimize this case if the BUILD_VECTOR elements are
4803 // at least as wide as the extracted value.
4804 EVT OpVT = Op.getValueType();
4805 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
4806 if (OpBytesPerElement < BytesPerElement)
4808 // Make sure that the least-significant bit of the extracted value
4809 // is the least significant bit of an input.
4810 unsigned End = (Index + 1) * BytesPerElement;
4811 if (End % OpBytesPerElement != 0)
4813 // We're extracting the low part of one operand of the BUILD_VECTOR.
4814 Op = Op.getOperand(End / OpBytesPerElement - 1);
4815 if (!Op.getValueType().isInteger()) {
4816 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
4817 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
4818 DCI.AddToWorklist(Op.getNode());
4820 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
4821 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
4823 DCI.AddToWorklist(Op.getNode());
4824 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
4827 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
4828 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
4829 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
4830 canTreatAsByteVector(Op.getValueType()) &&
4831 canTreatAsByteVector(Op.getOperand(0).getValueType())) {
4832 // Make sure that only the unextended bits are significant.
4833 EVT ExtVT = Op.getValueType();
4834 EVT OpVT = Op.getOperand(0).getValueType();
4835 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
4836 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
4837 unsigned Byte = Index * BytesPerElement;
4838 unsigned SubByte = Byte % ExtBytesPerElement;
4839 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
4840 if (SubByte < MinSubByte ||
4841 SubByte + BytesPerElement > ExtBytesPerElement)
4843 // Get the byte offset of the unextended element
4844 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
4845 // ...then add the byte offset relative to that element.
4846 Byte += SubByte - MinSubByte;
4847 if (Byte % BytesPerElement != 0)
4849 Op = Op.getOperand(0);
4850 Index = Byte / BytesPerElement;
4856 if (Op.getValueType() != VecVT) {
4857 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
4858 DCI.AddToWorklist(Op.getNode());
4860 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
4861 DAG.getConstant(Index, DL, MVT::i32));
4866 // Optimize vector operations in scalar value Op on the basis that Op
4867 // is truncated to TruncVT.
4868 SDValue SystemZTargetLowering::combineTruncateExtract(
4869 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
4870 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
4871 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
4873 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4874 TruncVT.getSizeInBits() % 8 == 0) {
4875 SDValue Vec = Op.getOperand(0);
4876 EVT VecVT = Vec.getValueType();
4877 if (canTreatAsByteVector(VecVT)) {
4878 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
4879 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
4880 unsigned TruncBytes = TruncVT.getStoreSize();
4881 if (BytesPerElement % TruncBytes == 0) {
4882 // Calculate the value of Y' in the above description. We are
4883 // splitting the original elements into Scale equal-sized pieces
4884 // and for truncation purposes want the last (least-significant)
4885 // of these pieces for IndexN. This is easiest to do by calculating
4886 // the start index of the following element and then subtracting 1.
4887 unsigned Scale = BytesPerElement / TruncBytes;
4888 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
4890 // Defer the creation of the bitcast from X to combineExtract,
4891 // which might be able to optimize the extraction.
4892 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
4893 VecVT.getStoreSize() / TruncBytes);
4894 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
4895 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
4903 SDValue SystemZTargetLowering::combineSIGN_EXTEND(
4904 SDNode *N, DAGCombinerInfo &DCI) const {
4905 // Convert (sext (ashr (shl X, C1), C2)) to
4906 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
4907 // cheap as narrower ones.
4908 SelectionDAG &DAG = DCI.DAG;
4909 SDValue N0 = N->getOperand(0);
4910 EVT VT = N->getValueType(0);
4911 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
4912 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4913 SDValue Inner = N0.getOperand(0);
4914 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
4915 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
4916 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
4917 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
4918 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
4919 EVT ShiftVT = N0.getOperand(1).getValueType();
4920 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
4921 Inner.getOperand(0));
4922 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
4923 DAG.getConstant(NewShlAmt, SDLoc(Inner),
4925 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
4926 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
4933 SDValue SystemZTargetLowering::combineMERGE(
4934 SDNode *N, DAGCombinerInfo &DCI) const {
4935 SelectionDAG &DAG = DCI.DAG;
4936 unsigned Opcode = N->getOpcode();
4937 SDValue Op0 = N->getOperand(0);
4938 SDValue Op1 = N->getOperand(1);
4939 if (Op0.getOpcode() == ISD::BITCAST)
4940 Op0 = Op0.getOperand(0);
4941 if (Op0.getOpcode() == SystemZISD::BYTE_MASK &&
4942 cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) {
4943 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
4945 if (Op1 == N->getOperand(0))
4947 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
4948 EVT VT = Op1.getValueType();
4949 unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
4950 if (ElemBytes <= 4) {
4951 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
4952 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
4953 EVT InVT = VT.changeVectorElementTypeToInteger();
4954 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
4955 SystemZ::VectorBytes / ElemBytes / 2);
4957 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
4958 DCI.AddToWorklist(Op1.getNode());
4960 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
4961 DCI.AddToWorklist(Op.getNode());
4962 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
4968 SDValue SystemZTargetLowering::combineSTORE(
4969 SDNode *N, DAGCombinerInfo &DCI) const {
4970 SelectionDAG &DAG = DCI.DAG;
4971 auto *SN = cast<StoreSDNode>(N);
4972 auto &Op1 = N->getOperand(1);
4973 EVT MemVT = SN->getMemoryVT();
4974 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
4975 // for the extraction to be done on a vMiN value, so that we can use VSTE.
4976 // If X has wider elements then convert it to:
4977 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
4978 if (MemVT.isInteger()) {
4980 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
4981 DCI.AddToWorklist(Value.getNode());
4983 // Rewrite the store with the new form of stored value.
4984 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
4985 SN->getBasePtr(), SN->getMemoryVT(),
4986 SN->getMemOperand());
4989 // Combine STORE (BSWAP) into STRVH/STRV/STRVG
4990 // See comment in combineBSWAP about volatile accesses.
4991 if (!SN->isVolatile() &&
4992 Op1.getOpcode() == ISD::BSWAP &&
4993 Op1.getNode()->hasOneUse() &&
4994 (Op1.getValueType() == MVT::i16 ||
4995 Op1.getValueType() == MVT::i32 ||
4996 Op1.getValueType() == MVT::i64)) {
4998 SDValue BSwapOp = Op1.getOperand(0);
5000 if (BSwapOp.getValueType() == MVT::i16)
5001 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
5004 N->getOperand(0), BSwapOp, N->getOperand(2),
5005 DAG.getValueType(Op1.getValueType())
5009 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
5010 Ops, MemVT, SN->getMemOperand());
5015 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5016 SDNode *N, DAGCombinerInfo &DCI) const {
5018 if (!Subtarget.hasVector())
5021 // Try to simplify a vector extraction.
5022 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
5023 SDValue Op0 = N->getOperand(0);
5024 EVT VecVT = Op0.getValueType();
5025 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
5026 IndexN->getZExtValue(), DCI, false);
5031 SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5032 SDNode *N, DAGCombinerInfo &DCI) const {
5033 SelectionDAG &DAG = DCI.DAG;
5034 // (join_dwords X, X) == (replicate X)
5035 if (N->getOperand(0) == N->getOperand(1))
5036 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
5041 SDValue SystemZTargetLowering::combineFP_ROUND(
5042 SDNode *N, DAGCombinerInfo &DCI) const {
5043 // (fpround (extract_vector_elt X 0))
5044 // (fpround (extract_vector_elt X 1)) ->
5045 // (extract_vector_elt (VROUND X) 0)
5046 // (extract_vector_elt (VROUND X) 1)
5048 // This is a special case since the target doesn't really support v2f32s.
5049 SelectionDAG &DAG = DCI.DAG;
5050 SDValue Op0 = N->getOperand(0);
5051 if (N->getValueType(0) == MVT::f32 &&
5053 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5054 Op0.getOperand(0).getValueType() == MVT::v2f64 &&
5055 Op0.getOperand(1).getOpcode() == ISD::Constant &&
5056 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
5057 SDValue Vec = Op0.getOperand(0);
5058 for (auto *U : Vec->uses()) {
5059 if (U != Op0.getNode() &&
5061 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5062 U->getOperand(0) == Vec &&
5063 U->getOperand(1).getOpcode() == ISD::Constant &&
5064 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5065 SDValue OtherRound = SDValue(*U->use_begin(), 0);
5066 if (OtherRound.getOpcode() == ISD::FP_ROUND &&
5067 OtherRound.getOperand(0) == SDValue(U, 0) &&
5068 OtherRound.getValueType() == MVT::f32) {
5069 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
5071 DCI.AddToWorklist(VRound.getNode());
5073 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
5074 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
5075 DCI.AddToWorklist(Extract1.getNode());
5076 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
5078 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
5079 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
5088 SDValue SystemZTargetLowering::combineBSWAP(
5089 SDNode *N, DAGCombinerInfo &DCI) const {
5090 SelectionDAG &DAG = DCI.DAG;
5091 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG
5092 // These loads are allowed to access memory multiple times, and so we must check
5093 // that the loads are not volatile before performing the combine.
5094 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5095 N->getOperand(0).hasOneUse() &&
5096 (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 ||
5097 N->getValueType(0) == MVT::i64) &&
5098 !cast<LoadSDNode>(N->getOperand(0))->isVolatile()) {
5099 SDValue Load = N->getOperand(0);
5100 LoadSDNode *LD = cast<LoadSDNode>(Load);
5102 // Create the byte-swapping load.
5104 LD->getChain(), // Chain
5105 LD->getBasePtr(), // Ptr
5106 DAG.getValueType(N->getValueType(0)) // VT
5109 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
5110 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
5111 MVT::i64 : MVT::i32, MVT::Other),
5112 Ops, LD->getMemoryVT(), LD->getMemOperand());
5114 // If this is an i16 load, insert the truncate.
5115 SDValue ResVal = BSLoad;
5116 if (N->getValueType(0) == MVT::i16)
5117 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
5119 // First, combine the bswap away. This makes the value produced by the
5121 DCI.CombineTo(N, ResVal);
5123 // Next, combine the load away, we give it a bogus result value but a real
5124 // chain result. The result value is dead because the bswap is dead.
5125 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
5127 // Return N so it doesn't get rechecked!
5128 return SDValue(N, 0);
5133 SDValue SystemZTargetLowering::combineSHIFTROT(
5134 SDNode *N, DAGCombinerInfo &DCI) const {
5136 SelectionDAG &DAG = DCI.DAG;
5138 // Shift/rotate instructions only use the last 6 bits of the second operand
5139 // register. If the second operand is the result of an AND with an immediate
5140 // value that has its last 6 bits set, we can safely remove the AND operation.
5142 // If the AND operation doesn't have the last 6 bits set, we can't remove it
5143 // entirely, but we can still truncate it to a 16-bit value. This prevents
5144 // us from ending up with a NILL with a signed operand, which will cause the
5145 // instruction printer to abort.
5146 SDValue N1 = N->getOperand(1);
5147 if (N1.getOpcode() == ISD::AND) {
5148 SDValue AndMaskOp = N1->getOperand(1);
5149 auto *AndMask = dyn_cast<ConstantSDNode>(AndMaskOp);
5151 // The AND mask is constant
5153 auto AmtVal = AndMask->getZExtValue();
5155 // Bottom 6 bits are set
5156 if ((AmtVal & 0x3f) == 0x3f) {
5157 SDValue AndOp = N1->getOperand(0);
5159 // This is the only use, so remove the node
5160 if (N1.hasOneUse()) {
5161 // Combine the AND away
5162 DCI.CombineTo(N1.getNode(), AndOp);
5164 // Return N so it isn't rechecked
5165 return SDValue(N, 0);
5167 // The node will be reused, so create a new node for this one use
5169 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
5170 N->getValueType(0), N->getOperand(0),
5172 DCI.AddToWorklist(Replace.getNode());
5177 // We can't remove the AND, but we can use NILL here (normally we would
5178 // use NILF). Only keep the last 16 bits of the mask. The actual
5179 // transformation will be handled by .td definitions.
5180 } else if (AmtVal >> 16 != 0) {
5181 SDValue AndOp = N1->getOperand(0);
5183 auto NewMask = DAG.getConstant(AndMask->getZExtValue() & 0x0000ffff,
5185 AndMaskOp.getValueType());
5187 auto NewAnd = DAG.getNode(N1.getOpcode(), SDLoc(N1), N1.getValueType(),
5190 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
5191 N->getValueType(0), N->getOperand(0),
5193 DCI.AddToWorklist(Replace.getNode());
5203 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
5204 DAGCombinerInfo &DCI) const {
5205 switch(N->getOpcode()) {
5207 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI);
5208 case SystemZISD::MERGE_HIGH:
5209 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI);
5210 case ISD::STORE: return combineSTORE(N, DCI);
5211 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
5212 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
5213 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI);
5214 case ISD::BSWAP: return combineBSWAP(N, DCI);
5218 case ISD::ROTL: return combineSHIFTROT(N, DCI);
5224 //===----------------------------------------------------------------------===//
5226 //===----------------------------------------------------------------------===//
5228 // Create a new basic block after MBB.
5229 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
5230 MachineFunction &MF = *MBB->getParent();
5231 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
5232 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
5236 // Split MBB after MI and return the new block (the one that contains
5237 // instructions after MI).
5238 static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI,
5239 MachineBasicBlock *MBB) {
5240 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
5241 NewMBB->splice(NewMBB->begin(), MBB,
5242 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
5243 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
5247 // Split MBB before MI and return the new block (the one that contains MI).
5248 static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
5249 MachineBasicBlock *MBB) {
5250 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
5251 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
5252 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
5256 // Force base value Base into a register before MI. Return the register.
5257 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
5258 const SystemZInstrInfo *TII) {
5260 return Base.getReg();
5262 MachineBasicBlock *MBB = MI.getParent();
5263 MachineFunction &MF = *MBB->getParent();
5264 MachineRegisterInfo &MRI = MF.getRegInfo();
5266 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5267 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
5274 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
5276 SystemZTargetLowering::emitSelect(MachineInstr &MI,
5277 MachineBasicBlock *MBB,
5278 unsigned LOCROpcode) const {
5279 const SystemZInstrInfo *TII =
5280 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5282 unsigned DestReg = MI.getOperand(0).getReg();
5283 unsigned TrueReg = MI.getOperand(1).getReg();
5284 unsigned FalseReg = MI.getOperand(2).getReg();
5285 unsigned CCValid = MI.getOperand(3).getImm();
5286 unsigned CCMask = MI.getOperand(4).getImm();
5287 DebugLoc DL = MI.getDebugLoc();
5289 // Use LOCROpcode if possible.
5290 if (LOCROpcode && Subtarget.hasLoadStoreOnCond()) {
5291 BuildMI(*MBB, MI, DL, TII->get(LOCROpcode), DestReg)
5292 .addReg(FalseReg).addReg(TrueReg)
5293 .addImm(CCValid).addImm(CCMask);
5294 MI.eraseFromParent();
5298 MachineBasicBlock *StartMBB = MBB;
5299 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
5300 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
5303 // BRC CCMask, JoinMBB
5304 // # fallthrough to FalseMBB
5306 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5307 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
5308 MBB->addSuccessor(JoinMBB);
5309 MBB->addSuccessor(FalseMBB);
5312 // # fallthrough to JoinMBB
5314 MBB->addSuccessor(JoinMBB);
5317 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
5320 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
5321 .addReg(TrueReg).addMBB(StartMBB)
5322 .addReg(FalseReg).addMBB(FalseMBB);
5324 MI.eraseFromParent();
5328 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
5329 // StoreOpcode is the store to use and Invert says whether the store should
5330 // happen when the condition is false rather than true. If a STORE ON
5331 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
5332 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
5333 MachineBasicBlock *MBB,
5334 unsigned StoreOpcode,
5335 unsigned STOCOpcode,
5336 bool Invert) const {
5337 const SystemZInstrInfo *TII =
5338 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5340 unsigned SrcReg = MI.getOperand(0).getReg();
5341 MachineOperand Base = MI.getOperand(1);
5342 int64_t Disp = MI.getOperand(2).getImm();
5343 unsigned IndexReg = MI.getOperand(3).getReg();
5344 unsigned CCValid = MI.getOperand(4).getImm();
5345 unsigned CCMask = MI.getOperand(5).getImm();
5346 DebugLoc DL = MI.getDebugLoc();
5348 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
5350 // Use STOCOpcode if possible. We could use different store patterns in
5351 // order to avoid matching the index register, but the performance trade-offs
5352 // might be more complicated in that case.
5353 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
5357 // ISel pattern matching also adds a load memory operand of the same
5358 // address, so take special care to find the storing memory operand.
5359 MachineMemOperand *MMO = nullptr;
5360 for (auto *I : MI.memoperands())
5366 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
5372 .addMemOperand(MMO);
5374 MI.eraseFromParent();
5378 // Get the condition needed to branch around the store.
5382 MachineBasicBlock *StartMBB = MBB;
5383 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
5384 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
5387 // BRC CCMask, JoinMBB
5388 // # fallthrough to FalseMBB
5390 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5391 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
5392 MBB->addSuccessor(JoinMBB);
5393 MBB->addSuccessor(FalseMBB);
5396 // store %SrcReg, %Disp(%Index,%Base)
5397 // # fallthrough to JoinMBB
5399 BuildMI(MBB, DL, TII->get(StoreOpcode))
5404 MBB->addSuccessor(JoinMBB);
5406 MI.eraseFromParent();
5410 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
5411 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
5412 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
5413 // BitSize is the width of the field in bits, or 0 if this is a partword
5414 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
5415 // is one of the operands. Invert says whether the field should be
5416 // inverted after performing BinOpcode (e.g. for NAND).
5417 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
5418 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
5419 unsigned BitSize, bool Invert) const {
5420 MachineFunction &MF = *MBB->getParent();
5421 const SystemZInstrInfo *TII =
5422 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5423 MachineRegisterInfo &MRI = MF.getRegInfo();
5424 bool IsSubWord = (BitSize < 32);
5426 // Extract the operands. Base can be a register or a frame index.
5427 // Src2 can be a register or immediate.
5428 unsigned Dest = MI.getOperand(0).getReg();
5429 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5430 int64_t Disp = MI.getOperand(2).getImm();
5431 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
5432 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
5433 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
5434 DebugLoc DL = MI.getDebugLoc();
5436 BitSize = MI.getOperand(6).getImm();
5438 // Subword operations use 32-bit registers.
5439 const TargetRegisterClass *RC = (BitSize <= 32 ?
5440 &SystemZ::GR32BitRegClass :
5441 &SystemZ::GR64BitRegClass);
5442 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
5443 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5445 // Get the right opcodes for the displacement.
5446 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
5447 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
5448 assert(LOpcode && CSOpcode && "Displacement out of range");
5450 // Create virtual registers for temporary results.
5451 unsigned OrigVal = MRI.createVirtualRegister(RC);
5452 unsigned OldVal = MRI.createVirtualRegister(RC);
5453 unsigned NewVal = (BinOpcode || IsSubWord ?
5454 MRI.createVirtualRegister(RC) : Src2.getReg());
5455 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
5456 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
5458 // Insert a basic block for the main loop.
5459 MachineBasicBlock *StartMBB = MBB;
5460 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5461 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5465 // %OrigVal = L Disp(%Base)
5466 // # fall through to LoopMMB
5468 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
5469 MBB->addSuccessor(LoopMBB);
5472 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
5473 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
5474 // %RotatedNewVal = OP %RotatedOldVal, %Src2
5475 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
5476 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
5478 // # fall through to DoneMMB
5480 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5481 .addReg(OrigVal).addMBB(StartMBB)
5482 .addReg(Dest).addMBB(LoopMBB);
5484 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5485 .addReg(OldVal).addReg(BitShift).addImm(0);
5487 // Perform the operation normally and then invert every bit of the field.
5488 unsigned Tmp = MRI.createVirtualRegister(RC);
5489 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2);
5491 // XILF with the upper BitSize bits set.
5492 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
5493 .addReg(Tmp).addImm(-1U << (32 - BitSize));
5495 // Use LCGR and add -1 to the result, which is more compact than
5496 // an XILF, XILH pair.
5497 unsigned Tmp2 = MRI.createVirtualRegister(RC);
5498 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
5499 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
5500 .addReg(Tmp2).addImm(-1);
5502 } else if (BinOpcode)
5503 // A simply binary operation.
5504 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
5505 .addReg(RotatedOldVal)
5508 // Use RISBG to rotate Src2 into position and use it to replace the
5509 // field in RotatedOldVal.
5510 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
5511 .addReg(RotatedOldVal).addReg(Src2.getReg())
5512 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
5514 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5515 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
5516 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5521 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5522 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5523 MBB->addSuccessor(LoopMBB);
5524 MBB->addSuccessor(DoneMBB);
5526 MI.eraseFromParent();
5530 // Implement EmitInstrWithCustomInserter for pseudo
5531 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
5532 // instruction that should be used to compare the current field with the
5533 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
5534 // for when the current field should be kept. BitSize is the width of
5535 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
5536 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
5537 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
5538 unsigned KeepOldMask, unsigned BitSize) const {
5539 MachineFunction &MF = *MBB->getParent();
5540 const SystemZInstrInfo *TII =
5541 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5542 MachineRegisterInfo &MRI = MF.getRegInfo();
5543 bool IsSubWord = (BitSize < 32);
5545 // Extract the operands. Base can be a register or a frame index.
5546 unsigned Dest = MI.getOperand(0).getReg();
5547 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5548 int64_t Disp = MI.getOperand(2).getImm();
5549 unsigned Src2 = MI.getOperand(3).getReg();
5550 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
5551 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
5552 DebugLoc DL = MI.getDebugLoc();
5554 BitSize = MI.getOperand(6).getImm();
5556 // Subword operations use 32-bit registers.
5557 const TargetRegisterClass *RC = (BitSize <= 32 ?
5558 &SystemZ::GR32BitRegClass :
5559 &SystemZ::GR64BitRegClass);
5560 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
5561 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5563 // Get the right opcodes for the displacement.
5564 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
5565 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
5566 assert(LOpcode && CSOpcode && "Displacement out of range");
5568 // Create virtual registers for temporary results.
5569 unsigned OrigVal = MRI.createVirtualRegister(RC);
5570 unsigned OldVal = MRI.createVirtualRegister(RC);
5571 unsigned NewVal = MRI.createVirtualRegister(RC);
5572 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
5573 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
5574 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
5576 // Insert 3 basic blocks for the loop.
5577 MachineBasicBlock *StartMBB = MBB;
5578 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5579 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5580 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
5581 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
5585 // %OrigVal = L Disp(%Base)
5586 // # fall through to LoopMMB
5588 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
5589 MBB->addSuccessor(LoopMBB);
5592 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
5593 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
5594 // CompareOpcode %RotatedOldVal, %Src2
5595 // BRC KeepOldMask, UpdateMBB
5597 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5598 .addReg(OrigVal).addMBB(StartMBB)
5599 .addReg(Dest).addMBB(UpdateMBB);
5601 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5602 .addReg(OldVal).addReg(BitShift).addImm(0);
5603 BuildMI(MBB, DL, TII->get(CompareOpcode))
5604 .addReg(RotatedOldVal).addReg(Src2);
5605 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5606 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
5607 MBB->addSuccessor(UpdateMBB);
5608 MBB->addSuccessor(UseAltMBB);
5611 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
5612 // # fall through to UpdateMMB
5615 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
5616 .addReg(RotatedOldVal).addReg(Src2)
5617 .addImm(32).addImm(31 + BitSize).addImm(0);
5618 MBB->addSuccessor(UpdateMBB);
5621 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
5622 // [ %RotatedAltVal, UseAltMBB ]
5623 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
5624 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
5626 // # fall through to DoneMMB
5628 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
5629 .addReg(RotatedOldVal).addMBB(LoopMBB)
5630 .addReg(RotatedAltVal).addMBB(UseAltMBB);
5632 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5633 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
5634 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5639 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5640 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5641 MBB->addSuccessor(LoopMBB);
5642 MBB->addSuccessor(DoneMBB);
5644 MI.eraseFromParent();
5648 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
5651 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
5652 MachineBasicBlock *MBB) const {
5654 MachineFunction &MF = *MBB->getParent();
5655 const SystemZInstrInfo *TII =
5656 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5657 MachineRegisterInfo &MRI = MF.getRegInfo();
5659 // Extract the operands. Base can be a register or a frame index.
5660 unsigned Dest = MI.getOperand(0).getReg();
5661 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5662 int64_t Disp = MI.getOperand(2).getImm();
5663 unsigned OrigCmpVal = MI.getOperand(3).getReg();
5664 unsigned OrigSwapVal = MI.getOperand(4).getReg();
5665 unsigned BitShift = MI.getOperand(5).getReg();
5666 unsigned NegBitShift = MI.getOperand(6).getReg();
5667 int64_t BitSize = MI.getOperand(7).getImm();
5668 DebugLoc DL = MI.getDebugLoc();
5670 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
5672 // Get the right opcodes for the displacement.
5673 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
5674 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
5675 assert(LOpcode && CSOpcode && "Displacement out of range");
5677 // Create virtual registers for temporary results.
5678 unsigned OrigOldVal = MRI.createVirtualRegister(RC);
5679 unsigned OldVal = MRI.createVirtualRegister(RC);
5680 unsigned CmpVal = MRI.createVirtualRegister(RC);
5681 unsigned SwapVal = MRI.createVirtualRegister(RC);
5682 unsigned StoreVal = MRI.createVirtualRegister(RC);
5683 unsigned RetryOldVal = MRI.createVirtualRegister(RC);
5684 unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
5685 unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
5687 // Insert 2 basic blocks for the loop.
5688 MachineBasicBlock *StartMBB = MBB;
5689 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5690 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5691 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
5695 // %OrigOldVal = L Disp(%Base)
5696 // # fall through to LoopMMB
5698 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
5702 MBB->addSuccessor(LoopMBB);
5705 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
5706 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
5707 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
5708 // %Dest = RLL %OldVal, BitSize(%BitShift)
5709 // ^^ The low BitSize bits contain the field
5711 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
5712 // ^^ Replace the upper 32-BitSize bits of the
5713 // comparison value with those that we loaded,
5714 // so that we can use a full word comparison.
5715 // CR %Dest, %RetryCmpVal
5717 // # Fall through to SetMBB
5719 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5720 .addReg(OrigOldVal).addMBB(StartMBB)
5721 .addReg(RetryOldVal).addMBB(SetMBB);
5722 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
5723 .addReg(OrigCmpVal).addMBB(StartMBB)
5724 .addReg(RetryCmpVal).addMBB(SetMBB);
5725 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
5726 .addReg(OrigSwapVal).addMBB(StartMBB)
5727 .addReg(RetrySwapVal).addMBB(SetMBB);
5728 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
5729 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
5730 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
5731 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
5732 BuildMI(MBB, DL, TII->get(SystemZ::CR))
5733 .addReg(Dest).addReg(RetryCmpVal);
5734 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5735 .addImm(SystemZ::CCMASK_ICMP)
5736 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
5737 MBB->addSuccessor(DoneMBB);
5738 MBB->addSuccessor(SetMBB);
5741 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
5742 // ^^ Replace the upper 32-BitSize bits of the new
5743 // value with those that we loaded.
5744 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
5745 // ^^ Rotate the new field to its proper position.
5746 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
5748 // # fall through to ExitMMB
5750 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
5751 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
5752 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
5753 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
5754 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
5759 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5760 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5761 MBB->addSuccessor(LoopMBB);
5762 MBB->addSuccessor(DoneMBB);
5764 MI.eraseFromParent();
5768 // Emit an extension from a GR64 to a GR128. ClearEven is true
5769 // if the high register of the GR128 value must be cleared or false if
5770 // it's "don't care".
5771 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
5772 MachineBasicBlock *MBB,
5773 bool ClearEven) const {
5774 MachineFunction &MF = *MBB->getParent();
5775 const SystemZInstrInfo *TII =
5776 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5777 MachineRegisterInfo &MRI = MF.getRegInfo();
5778 DebugLoc DL = MI.getDebugLoc();
5780 unsigned Dest = MI.getOperand(0).getReg();
5781 unsigned Src = MI.getOperand(1).getReg();
5782 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
5784 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
5786 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
5787 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
5789 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
5791 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
5792 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
5795 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
5796 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64);
5798 MI.eraseFromParent();
5802 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
5803 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
5804 MachineFunction &MF = *MBB->getParent();
5805 const SystemZInstrInfo *TII =
5806 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5807 MachineRegisterInfo &MRI = MF.getRegInfo();
5808 DebugLoc DL = MI.getDebugLoc();
5810 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
5811 uint64_t DestDisp = MI.getOperand(1).getImm();
5812 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
5813 uint64_t SrcDisp = MI.getOperand(3).getImm();
5814 uint64_t Length = MI.getOperand(4).getImm();
5816 // When generating more than one CLC, all but the last will need to
5817 // branch to the end when a difference is found.
5818 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
5819 splitBlockAfter(MI, MBB) : nullptr);
5821 // Check for the loop form, in which operand 5 is the trip count.
5822 if (MI.getNumExplicitOperands() > 5) {
5823 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
5825 uint64_t StartCountReg = MI.getOperand(5).getReg();
5826 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
5827 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
5828 forceReg(MI, DestBase, TII));
5830 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
5831 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
5832 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
5833 MRI.createVirtualRegister(RC));
5834 uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
5835 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
5836 MRI.createVirtualRegister(RC));
5838 RC = &SystemZ::GR64BitRegClass;
5839 uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
5840 uint64_t NextCountReg = MRI.createVirtualRegister(RC);
5842 MachineBasicBlock *StartMBB = MBB;
5843 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5844 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5845 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
5848 // # fall through to LoopMMB
5849 MBB->addSuccessor(LoopMBB);
5852 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
5853 // [ %NextDestReg, NextMBB ]
5854 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
5855 // [ %NextSrcReg, NextMBB ]
5856 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
5857 // [ %NextCountReg, NextMBB ]
5858 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
5859 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
5862 // The prefetch is used only for MVC. The JLH is used only for CLC.
5865 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
5866 .addReg(StartDestReg).addMBB(StartMBB)
5867 .addReg(NextDestReg).addMBB(NextMBB);
5868 if (!HaveSingleBase)
5869 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
5870 .addReg(StartSrcReg).addMBB(StartMBB)
5871 .addReg(NextSrcReg).addMBB(NextMBB);
5872 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
5873 .addReg(StartCountReg).addMBB(StartMBB)
5874 .addReg(NextCountReg).addMBB(NextMBB);
5875 if (Opcode == SystemZ::MVC)
5876 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
5877 .addImm(SystemZ::PFD_WRITE)
5878 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
5879 BuildMI(MBB, DL, TII->get(Opcode))
5880 .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
5881 .addReg(ThisSrcReg).addImm(SrcDisp);
5883 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5884 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5886 MBB->addSuccessor(EndMBB);
5887 MBB->addSuccessor(NextMBB);
5891 // %NextDestReg = LA 256(%ThisDestReg)
5892 // %NextSrcReg = LA 256(%ThisSrcReg)
5893 // %NextCountReg = AGHI %ThisCountReg, -1
5894 // CGHI %NextCountReg, 0
5896 // # fall through to DoneMMB
5898 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
5901 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
5902 .addReg(ThisDestReg).addImm(256).addReg(0);
5903 if (!HaveSingleBase)
5904 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
5905 .addReg(ThisSrcReg).addImm(256).addReg(0);
5906 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
5907 .addReg(ThisCountReg).addImm(-1);
5908 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
5909 .addReg(NextCountReg).addImm(0);
5910 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5911 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5913 MBB->addSuccessor(LoopMBB);
5914 MBB->addSuccessor(DoneMBB);
5916 DestBase = MachineOperand::CreateReg(NextDestReg, false);
5917 SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
5921 // Handle any remaining bytes with straight-line code.
5922 while (Length > 0) {
5923 uint64_t ThisLength = std::min(Length, uint64_t(256));
5924 // The previous iteration might have created out-of-range displacements.
5925 // Apply them using LAY if so.
5926 if (!isUInt<12>(DestDisp)) {
5927 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5928 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
5932 DestBase = MachineOperand::CreateReg(Reg, false);
5935 if (!isUInt<12>(SrcDisp)) {
5936 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5937 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
5941 SrcBase = MachineOperand::CreateReg(Reg, false);
5944 BuildMI(*MBB, MI, DL, TII->get(Opcode))
5950 ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
5951 DestDisp += ThisLength;
5952 SrcDisp += ThisLength;
5953 Length -= ThisLength;
5954 // If there's another CLC to go, branch to the end if a difference
5956 if (EndMBB && Length > 0) {
5957 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
5958 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5959 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5961 MBB->addSuccessor(EndMBB);
5962 MBB->addSuccessor(NextMBB);
5967 MBB->addSuccessor(EndMBB);
5969 MBB->addLiveIn(SystemZ::CC);
5972 MI.eraseFromParent();
5976 // Decompose string pseudo-instruction MI into a loop that continually performs
5977 // Opcode until CC != 3.
5978 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
5979 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
5980 MachineFunction &MF = *MBB->getParent();
5981 const SystemZInstrInfo *TII =
5982 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5983 MachineRegisterInfo &MRI = MF.getRegInfo();
5984 DebugLoc DL = MI.getDebugLoc();
5986 uint64_t End1Reg = MI.getOperand(0).getReg();
5987 uint64_t Start1Reg = MI.getOperand(1).getReg();
5988 uint64_t Start2Reg = MI.getOperand(2).getReg();
5989 uint64_t CharReg = MI.getOperand(3).getReg();
5991 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
5992 uint64_t This1Reg = MRI.createVirtualRegister(RC);
5993 uint64_t This2Reg = MRI.createVirtualRegister(RC);
5994 uint64_t End2Reg = MRI.createVirtualRegister(RC);
5996 MachineBasicBlock *StartMBB = MBB;
5997 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5998 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
6001 // # fall through to LoopMMB
6002 MBB->addSuccessor(LoopMBB);
6005 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
6006 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
6008 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
6010 // # fall through to DoneMMB
6012 // The load of R0L can be hoisted by post-RA LICM.
6015 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
6016 .addReg(Start1Reg).addMBB(StartMBB)
6017 .addReg(End1Reg).addMBB(LoopMBB);
6018 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
6019 .addReg(Start2Reg).addMBB(StartMBB)
6020 .addReg(End2Reg).addMBB(LoopMBB);
6021 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
6022 BuildMI(MBB, DL, TII->get(Opcode))
6023 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
6024 .addReg(This1Reg).addReg(This2Reg);
6025 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6026 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
6027 MBB->addSuccessor(LoopMBB);
6028 MBB->addSuccessor(DoneMBB);
6030 DoneMBB->addLiveIn(SystemZ::CC);
6032 MI.eraseFromParent();
6036 // Update TBEGIN instruction with final opcode and register clobbers.
6037 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
6038 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
6039 bool NoFloat) const {
6040 MachineFunction &MF = *MBB->getParent();
6041 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
6042 const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
6045 MI.setDesc(TII->get(Opcode));
6047 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
6048 // Make sure to add the corresponding GRSM bits if they are missing.
6049 uint64_t Control = MI.getOperand(2).getImm();
6050 static const unsigned GPRControlBit[16] = {
6051 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
6052 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
6054 Control |= GPRControlBit[15];
6056 Control |= GPRControlBit[11];
6057 MI.getOperand(2).setImm(Control);
6059 // Add GPR clobbers.
6060 for (int I = 0; I < 16; I++) {
6061 if ((Control & GPRControlBit[I]) == 0) {
6062 unsigned Reg = SystemZMC::GR64Regs[I];
6063 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6067 // Add FPR/VR clobbers.
6068 if (!NoFloat && (Control & 4) != 0) {
6069 if (Subtarget.hasVector()) {
6070 for (int I = 0; I < 32; I++) {
6071 unsigned Reg = SystemZMC::VR128Regs[I];
6072 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6075 for (int I = 0; I < 16; I++) {
6076 unsigned Reg = SystemZMC::FP64Regs[I];
6077 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6085 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
6086 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
6087 MachineFunction &MF = *MBB->getParent();
6088 MachineRegisterInfo *MRI = &MF.getRegInfo();
6089 const SystemZInstrInfo *TII =
6090 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
6091 DebugLoc DL = MI.getDebugLoc();
6093 unsigned SrcReg = MI.getOperand(0).getReg();
6095 // Create new virtual register of the same class as source.
6096 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
6097 unsigned DstReg = MRI->createVirtualRegister(RC);
6099 // Replace pseudo with a normal load-and-test that models the def as
6101 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
6103 MI.eraseFromParent();
6108 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
6109 MachineInstr &MI, MachineBasicBlock *MBB) const {
6110 switch (MI.getOpcode()) {
6111 case SystemZ::Select32Mux:
6112 return emitSelect(MI, MBB,
6113 Subtarget.hasLoadStoreOnCond2()? SystemZ::LOCRMux : 0);
6114 case SystemZ::Select32:
6115 return emitSelect(MI, MBB, SystemZ::LOCR);
6116 case SystemZ::Select64:
6117 return emitSelect(MI, MBB, SystemZ::LOCGR);
6118 case SystemZ::SelectF32:
6119 case SystemZ::SelectF64:
6120 case SystemZ::SelectF128:
6121 return emitSelect(MI, MBB, 0);
6123 case SystemZ::CondStore8Mux:
6124 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
6125 case SystemZ::CondStore8MuxInv:
6126 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
6127 case SystemZ::CondStore16Mux:
6128 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
6129 case SystemZ::CondStore16MuxInv:
6130 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
6131 case SystemZ::CondStore32Mux:
6132 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false);
6133 case SystemZ::CondStore32MuxInv:
6134 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true);
6135 case SystemZ::CondStore8:
6136 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
6137 case SystemZ::CondStore8Inv:
6138 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
6139 case SystemZ::CondStore16:
6140 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
6141 case SystemZ::CondStore16Inv:
6142 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
6143 case SystemZ::CondStore32:
6144 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
6145 case SystemZ::CondStore32Inv:
6146 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
6147 case SystemZ::CondStore64:
6148 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
6149 case SystemZ::CondStore64Inv:
6150 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
6151 case SystemZ::CondStoreF32:
6152 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
6153 case SystemZ::CondStoreF32Inv:
6154 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
6155 case SystemZ::CondStoreF64:
6156 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
6157 case SystemZ::CondStoreF64Inv:
6158 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
6160 case SystemZ::AEXT128:
6161 return emitExt128(MI, MBB, false);
6162 case SystemZ::ZEXT128:
6163 return emitExt128(MI, MBB, true);
6165 case SystemZ::ATOMIC_SWAPW:
6166 return emitAtomicLoadBinary(MI, MBB, 0, 0);
6167 case SystemZ::ATOMIC_SWAP_32:
6168 return emitAtomicLoadBinary(MI, MBB, 0, 32);
6169 case SystemZ::ATOMIC_SWAP_64:
6170 return emitAtomicLoadBinary(MI, MBB, 0, 64);
6172 case SystemZ::ATOMIC_LOADW_AR:
6173 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
6174 case SystemZ::ATOMIC_LOADW_AFI:
6175 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
6176 case SystemZ::ATOMIC_LOAD_AR:
6177 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
6178 case SystemZ::ATOMIC_LOAD_AHI:
6179 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
6180 case SystemZ::ATOMIC_LOAD_AFI:
6181 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
6182 case SystemZ::ATOMIC_LOAD_AGR:
6183 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
6184 case SystemZ::ATOMIC_LOAD_AGHI:
6185 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
6186 case SystemZ::ATOMIC_LOAD_AGFI:
6187 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
6189 case SystemZ::ATOMIC_LOADW_SR:
6190 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
6191 case SystemZ::ATOMIC_LOAD_SR:
6192 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
6193 case SystemZ::ATOMIC_LOAD_SGR:
6194 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
6196 case SystemZ::ATOMIC_LOADW_NR:
6197 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
6198 case SystemZ::ATOMIC_LOADW_NILH:
6199 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
6200 case SystemZ::ATOMIC_LOAD_NR:
6201 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
6202 case SystemZ::ATOMIC_LOAD_NILL:
6203 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
6204 case SystemZ::ATOMIC_LOAD_NILH:
6205 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
6206 case SystemZ::ATOMIC_LOAD_NILF:
6207 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
6208 case SystemZ::ATOMIC_LOAD_NGR:
6209 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
6210 case SystemZ::ATOMIC_LOAD_NILL64:
6211 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
6212 case SystemZ::ATOMIC_LOAD_NILH64:
6213 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
6214 case SystemZ::ATOMIC_LOAD_NIHL64:
6215 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
6216 case SystemZ::ATOMIC_LOAD_NIHH64:
6217 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
6218 case SystemZ::ATOMIC_LOAD_NILF64:
6219 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
6220 case SystemZ::ATOMIC_LOAD_NIHF64:
6221 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
6223 case SystemZ::ATOMIC_LOADW_OR:
6224 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
6225 case SystemZ::ATOMIC_LOADW_OILH:
6226 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
6227 case SystemZ::ATOMIC_LOAD_OR:
6228 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
6229 case SystemZ::ATOMIC_LOAD_OILL:
6230 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
6231 case SystemZ::ATOMIC_LOAD_OILH:
6232 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
6233 case SystemZ::ATOMIC_LOAD_OILF:
6234 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
6235 case SystemZ::ATOMIC_LOAD_OGR:
6236 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
6237 case SystemZ::ATOMIC_LOAD_OILL64:
6238 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
6239 case SystemZ::ATOMIC_LOAD_OILH64:
6240 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
6241 case SystemZ::ATOMIC_LOAD_OIHL64:
6242 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
6243 case SystemZ::ATOMIC_LOAD_OIHH64:
6244 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
6245 case SystemZ::ATOMIC_LOAD_OILF64:
6246 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
6247 case SystemZ::ATOMIC_LOAD_OIHF64:
6248 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
6250 case SystemZ::ATOMIC_LOADW_XR:
6251 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
6252 case SystemZ::ATOMIC_LOADW_XILF:
6253 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
6254 case SystemZ::ATOMIC_LOAD_XR:
6255 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
6256 case SystemZ::ATOMIC_LOAD_XILF:
6257 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
6258 case SystemZ::ATOMIC_LOAD_XGR:
6259 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
6260 case SystemZ::ATOMIC_LOAD_XILF64:
6261 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
6262 case SystemZ::ATOMIC_LOAD_XIHF64:
6263 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
6265 case SystemZ::ATOMIC_LOADW_NRi:
6266 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
6267 case SystemZ::ATOMIC_LOADW_NILHi:
6268 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
6269 case SystemZ::ATOMIC_LOAD_NRi:
6270 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
6271 case SystemZ::ATOMIC_LOAD_NILLi:
6272 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
6273 case SystemZ::ATOMIC_LOAD_NILHi:
6274 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
6275 case SystemZ::ATOMIC_LOAD_NILFi:
6276 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
6277 case SystemZ::ATOMIC_LOAD_NGRi:
6278 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
6279 case SystemZ::ATOMIC_LOAD_NILL64i:
6280 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
6281 case SystemZ::ATOMIC_LOAD_NILH64i:
6282 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
6283 case SystemZ::ATOMIC_LOAD_NIHL64i:
6284 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
6285 case SystemZ::ATOMIC_LOAD_NIHH64i:
6286 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
6287 case SystemZ::ATOMIC_LOAD_NILF64i:
6288 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
6289 case SystemZ::ATOMIC_LOAD_NIHF64i:
6290 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
6292 case SystemZ::ATOMIC_LOADW_MIN:
6293 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6294 SystemZ::CCMASK_CMP_LE, 0);
6295 case SystemZ::ATOMIC_LOAD_MIN_32:
6296 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6297 SystemZ::CCMASK_CMP_LE, 32);
6298 case SystemZ::ATOMIC_LOAD_MIN_64:
6299 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6300 SystemZ::CCMASK_CMP_LE, 64);
6302 case SystemZ::ATOMIC_LOADW_MAX:
6303 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6304 SystemZ::CCMASK_CMP_GE, 0);
6305 case SystemZ::ATOMIC_LOAD_MAX_32:
6306 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6307 SystemZ::CCMASK_CMP_GE, 32);
6308 case SystemZ::ATOMIC_LOAD_MAX_64:
6309 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6310 SystemZ::CCMASK_CMP_GE, 64);
6312 case SystemZ::ATOMIC_LOADW_UMIN:
6313 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6314 SystemZ::CCMASK_CMP_LE, 0);
6315 case SystemZ::ATOMIC_LOAD_UMIN_32:
6316 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6317 SystemZ::CCMASK_CMP_LE, 32);
6318 case SystemZ::ATOMIC_LOAD_UMIN_64:
6319 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6320 SystemZ::CCMASK_CMP_LE, 64);
6322 case SystemZ::ATOMIC_LOADW_UMAX:
6323 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6324 SystemZ::CCMASK_CMP_GE, 0);
6325 case SystemZ::ATOMIC_LOAD_UMAX_32:
6326 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6327 SystemZ::CCMASK_CMP_GE, 32);
6328 case SystemZ::ATOMIC_LOAD_UMAX_64:
6329 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6330 SystemZ::CCMASK_CMP_GE, 64);
6332 case SystemZ::ATOMIC_CMP_SWAPW:
6333 return emitAtomicCmpSwapW(MI, MBB);
6334 case SystemZ::MVCSequence:
6335 case SystemZ::MVCLoop:
6336 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
6337 case SystemZ::NCSequence:
6338 case SystemZ::NCLoop:
6339 return emitMemMemWrapper(MI, MBB, SystemZ::NC);
6340 case SystemZ::OCSequence:
6341 case SystemZ::OCLoop:
6342 return emitMemMemWrapper(MI, MBB, SystemZ::OC);
6343 case SystemZ::XCSequence:
6344 case SystemZ::XCLoop:
6345 return emitMemMemWrapper(MI, MBB, SystemZ::XC);
6346 case SystemZ::CLCSequence:
6347 case SystemZ::CLCLoop:
6348 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
6349 case SystemZ::CLSTLoop:
6350 return emitStringWrapper(MI, MBB, SystemZ::CLST);
6351 case SystemZ::MVSTLoop:
6352 return emitStringWrapper(MI, MBB, SystemZ::MVST);
6353 case SystemZ::SRSTLoop:
6354 return emitStringWrapper(MI, MBB, SystemZ::SRST);
6355 case SystemZ::TBEGIN:
6356 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
6357 case SystemZ::TBEGIN_nofloat:
6358 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
6359 case SystemZ::TBEGINC:
6360 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
6361 case SystemZ::LTEBRCompare_VecPseudo:
6362 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
6363 case SystemZ::LTDBRCompare_VecPseudo:
6364 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
6365 case SystemZ::LTXBRCompare_VecPseudo:
6366 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
6369 llvm_unreachable("Unexpected instr type to insert");
6373 // This is only used by the isel schedulers, and is needed only to prevent
6374 // compiler from crashing when list-ilp is used.
6375 const TargetRegisterClass *
6376 SystemZTargetLowering::getRepRegClassFor(MVT VT) const {
6377 if (VT == MVT::Untyped)
6378 return &SystemZ::ADDR128BitRegClass;
6379 return TargetLowering::getRepRegClassFor(VT);