1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SystemZTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZISelLowering.h"
15 #include "SystemZCallingConv.h"
16 #include "SystemZConstantPoolValue.h"
17 #include "SystemZMachineFunctionInfo.h"
18 #include "SystemZTargetMachine.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/KnownBits.h"
30 #define DEBUG_TYPE "systemz-lower"
33 // Represents a sequence for extracting a 0/1 value from an IPM result:
34 // (((X ^ XORValue) + AddValue) >> Bit)
35 struct IPMConversion {
36 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
37 : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
44 // Represents information about a comparison.
46 Comparison(SDValue Op0In, SDValue Op1In)
47 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
49 // The operands to the comparison.
52 // The opcode that should be used to compare Op0 and Op1.
55 // A SystemZICMP value. Only used for integer comparisons.
58 // The mask of CC values that Opcode can produce.
61 // The mask of CC values for which the original condition is true.
64 } // end anonymous namespace
66 // Classify VT as either 32 or 64 bit.
67 static bool is32Bit(EVT VT) {
68 switch (VT.getSimpleVT().SimpleTy) {
74 llvm_unreachable("Unsupported type");
78 // Return a version of MachineOperand that can be safely used before the
80 static MachineOperand earlyUseOperand(MachineOperand Op) {
86 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
87 const SystemZSubtarget &STI)
88 : TargetLowering(TM), Subtarget(STI) {
89 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
91 // Set up the register classes.
92 if (Subtarget.hasHighWord())
93 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
95 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
96 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
97 if (Subtarget.hasVector()) {
98 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
99 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
101 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
102 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
104 if (Subtarget.hasVectorEnhancements1())
105 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
107 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
109 if (Subtarget.hasVector()) {
110 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
111 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
112 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
113 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
114 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
115 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
118 // Compute derived properties from the register classes
119 computeRegisterProperties(Subtarget.getRegisterInfo());
121 // Set up special registers.
122 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
124 // TODO: It may be better to default to latency-oriented scheduling, however
125 // LLVM's current latency-oriented scheduler can't handle physreg definitions
126 // such as SystemZ has with CC, so set this to the register-pressure
127 // scheduler, because it can.
128 setSchedulingPreference(Sched::RegPressure);
130 setBooleanContents(ZeroOrOneBooleanContent);
131 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
133 // Instructions are strings of 2-byte aligned 2-byte values.
134 setMinFunctionAlignment(2);
136 // Handle operations that are handled in a similar way for all types.
137 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
138 I <= MVT::LAST_FP_VALUETYPE;
140 MVT VT = MVT::SimpleValueType(I);
141 if (isTypeLegal(VT)) {
142 // Lower SET_CC into an IPM-based sequence.
143 setOperationAction(ISD::SETCC, VT, Custom);
145 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
146 setOperationAction(ISD::SELECT, VT, Expand);
148 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
149 setOperationAction(ISD::SELECT_CC, VT, Custom);
150 setOperationAction(ISD::BR_CC, VT, Custom);
154 // Expand jump table branches as address arithmetic followed by an
156 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
158 // Expand BRCOND into a BR_CC (see above).
159 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
161 // Handle integer types.
162 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
163 I <= MVT::LAST_INTEGER_VALUETYPE;
165 MVT VT = MVT::SimpleValueType(I);
166 if (isTypeLegal(VT)) {
167 // Expand individual DIV and REMs into DIVREMs.
168 setOperationAction(ISD::SDIV, VT, Expand);
169 setOperationAction(ISD::UDIV, VT, Expand);
170 setOperationAction(ISD::SREM, VT, Expand);
171 setOperationAction(ISD::UREM, VT, Expand);
172 setOperationAction(ISD::SDIVREM, VT, Custom);
173 setOperationAction(ISD::UDIVREM, VT, Custom);
175 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
176 // stores, putting a serialization instruction after the stores.
177 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
178 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
180 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
181 // available, or if the operand is constant.
182 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
184 // Use POPCNT on z196 and above.
185 if (Subtarget.hasPopulationCount())
186 setOperationAction(ISD::CTPOP, VT, Custom);
188 setOperationAction(ISD::CTPOP, VT, Expand);
190 // No special instructions for these.
191 setOperationAction(ISD::CTTZ, VT, Expand);
192 setOperationAction(ISD::ROTR, VT, Expand);
194 // Use *MUL_LOHI where possible instead of MULH*.
195 setOperationAction(ISD::MULHS, VT, Expand);
196 setOperationAction(ISD::MULHU, VT, Expand);
197 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
198 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
200 // Only z196 and above have native support for conversions to unsigned.
201 // On z10, promoting to i64 doesn't generate an inexact condition for
202 // values that are outside the i32 range but in the i64 range, so use
203 // the default expansion.
204 if (!Subtarget.hasFPExtension())
205 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
209 // Type legalization will convert 8- and 16-bit atomic operations into
210 // forms that operate on i32s (but still keeping the original memory VT).
211 // Lower them into full i32 operations.
212 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
213 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
214 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
215 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
216 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
217 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
218 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
219 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
220 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
221 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
222 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
223 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
225 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
227 // Traps are legal, as we will convert them to "j .+2".
228 setOperationAction(ISD::TRAP, MVT::Other, Legal);
230 // z10 has instructions for signed but not unsigned FP conversion.
231 // Handle unsigned 32-bit types as signed 64-bit types.
232 if (!Subtarget.hasFPExtension()) {
233 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
234 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
237 // We have native support for a 64-bit CTLZ, via FLOGR.
238 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
239 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
241 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
242 setOperationAction(ISD::OR, MVT::i64, Custom);
244 // FIXME: Can we support these natively?
245 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
246 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
247 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
249 // We have native instructions for i8, i16 and i32 extensions, but not i1.
250 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
251 for (MVT VT : MVT::integer_valuetypes()) {
252 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
253 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
254 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
257 // Handle the various types of symbolic address.
258 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
259 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
260 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
261 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
262 setOperationAction(ISD::JumpTable, PtrVT, Custom);
264 // We need to handle dynamic allocations specially because of the
265 // 160-byte area at the bottom of the stack.
266 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
267 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
269 // Use custom expanders so that we can force the function to use
271 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
272 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
274 // Handle prefetches with PFD or PFDRL.
275 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
277 for (MVT VT : MVT::vector_valuetypes()) {
278 // Assume by default that all vector operations need to be expanded.
279 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
280 if (getOperationAction(Opcode, VT) == Legal)
281 setOperationAction(Opcode, VT, Expand);
283 // Likewise all truncating stores and extending loads.
284 for (MVT InnerVT : MVT::vector_valuetypes()) {
285 setTruncStoreAction(VT, InnerVT, Expand);
286 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
287 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
288 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
291 if (isTypeLegal(VT)) {
292 // These operations are legal for anything that can be stored in a
293 // vector register, even if there is no native support for the format
294 // as such. In particular, we can do these for v4f32 even though there
295 // are no specific instructions for that format.
296 setOperationAction(ISD::LOAD, VT, Legal);
297 setOperationAction(ISD::STORE, VT, Legal);
298 setOperationAction(ISD::VSELECT, VT, Legal);
299 setOperationAction(ISD::BITCAST, VT, Legal);
300 setOperationAction(ISD::UNDEF, VT, Legal);
302 // Likewise, except that we need to replace the nodes with something
304 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
305 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
309 // Handle integer vector types.
310 for (MVT VT : MVT::integer_vector_valuetypes()) {
311 if (isTypeLegal(VT)) {
312 // These operations have direct equivalents.
313 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
314 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
315 setOperationAction(ISD::ADD, VT, Legal);
316 setOperationAction(ISD::SUB, VT, Legal);
317 if (VT != MVT::v2i64)
318 setOperationAction(ISD::MUL, VT, Legal);
319 setOperationAction(ISD::AND, VT, Legal);
320 setOperationAction(ISD::OR, VT, Legal);
321 setOperationAction(ISD::XOR, VT, Legal);
322 if (Subtarget.hasVectorEnhancements1())
323 setOperationAction(ISD::CTPOP, VT, Legal);
325 setOperationAction(ISD::CTPOP, VT, Custom);
326 setOperationAction(ISD::CTTZ, VT, Legal);
327 setOperationAction(ISD::CTLZ, VT, Legal);
329 // Convert a GPR scalar to a vector by inserting it into element 0.
330 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
332 // Use a series of unpacks for extensions.
333 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
334 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
336 // Detect shifts by a scalar amount and convert them into
338 setOperationAction(ISD::SHL, VT, Custom);
339 setOperationAction(ISD::SRA, VT, Custom);
340 setOperationAction(ISD::SRL, VT, Custom);
342 // At present ROTL isn't matched by DAGCombiner. ROTR should be
343 // converted into ROTL.
344 setOperationAction(ISD::ROTL, VT, Expand);
345 setOperationAction(ISD::ROTR, VT, Expand);
347 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
348 // and inverting the result as necessary.
349 setOperationAction(ISD::SETCC, VT, Custom);
353 if (Subtarget.hasVector()) {
354 // There should be no need to check for float types other than v2f64
355 // since <2 x f32> isn't a legal type.
356 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
357 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
358 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
359 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
360 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
361 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
362 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
363 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);
366 // Handle floating-point types.
367 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
368 I <= MVT::LAST_FP_VALUETYPE;
370 MVT VT = MVT::SimpleValueType(I);
371 if (isTypeLegal(VT)) {
372 // We can use FI for FRINT.
373 setOperationAction(ISD::FRINT, VT, Legal);
375 // We can use the extended form of FI for other rounding operations.
376 if (Subtarget.hasFPExtension()) {
377 setOperationAction(ISD::FNEARBYINT, VT, Legal);
378 setOperationAction(ISD::FFLOOR, VT, Legal);
379 setOperationAction(ISD::FCEIL, VT, Legal);
380 setOperationAction(ISD::FTRUNC, VT, Legal);
381 setOperationAction(ISD::FROUND, VT, Legal);
384 // No special instructions for these.
385 setOperationAction(ISD::FSIN, VT, Expand);
386 setOperationAction(ISD::FCOS, VT, Expand);
387 setOperationAction(ISD::FSINCOS, VT, Expand);
388 setOperationAction(ISD::FREM, VT, Expand);
389 setOperationAction(ISD::FPOW, VT, Expand);
393 // Handle floating-point vector types.
394 if (Subtarget.hasVector()) {
395 // Scalar-to-vector conversion is just a subreg.
396 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
397 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
399 // Some insertions and extractions can be done directly but others
400 // need to go via integers.
401 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
402 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
403 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
404 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
406 // These operations have direct equivalents.
407 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
408 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
409 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
410 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
411 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
412 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
413 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
414 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
415 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
416 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
417 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
418 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
419 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
420 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
423 // The vector enhancements facility 1 has instructions for these.
424 if (Subtarget.hasVectorEnhancements1()) {
425 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
426 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
427 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
428 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
429 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
430 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
431 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
432 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
433 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
434 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
435 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
436 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
437 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
438 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
440 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
441 setOperationAction(ISD::FMAXNAN, MVT::f64, Legal);
442 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
443 setOperationAction(ISD::FMINNAN, MVT::f64, Legal);
445 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal);
446 setOperationAction(ISD::FMAXNAN, MVT::v2f64, Legal);
447 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal);
448 setOperationAction(ISD::FMINNAN, MVT::v2f64, Legal);
450 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
451 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal);
452 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
453 setOperationAction(ISD::FMINNAN, MVT::f32, Legal);
455 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
456 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal);
457 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
458 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal);
460 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal);
461 setOperationAction(ISD::FMAXNAN, MVT::f128, Legal);
462 setOperationAction(ISD::FMINNUM, MVT::f128, Legal);
463 setOperationAction(ISD::FMINNAN, MVT::f128, Legal);
466 // We have fused multiply-addition for f32 and f64 but not f128.
467 setOperationAction(ISD::FMA, MVT::f32, Legal);
468 setOperationAction(ISD::FMA, MVT::f64, Legal);
469 if (Subtarget.hasVectorEnhancements1())
470 setOperationAction(ISD::FMA, MVT::f128, Legal);
472 setOperationAction(ISD::FMA, MVT::f128, Expand);
474 // We don't have a copysign instruction on vector registers.
475 if (Subtarget.hasVectorEnhancements1())
476 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
478 // Needed so that we don't try to implement f128 constant loads using
479 // a load-and-extend of a f80 constant (in cases where the constant
480 // would fit in an f80).
481 for (MVT VT : MVT::fp_valuetypes())
482 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
484 // We don't have extending load instruction on vector registers.
485 if (Subtarget.hasVectorEnhancements1()) {
486 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
487 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
490 // Floating-point truncation and stores need to be done separately.
491 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
492 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
493 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
495 // We have 64-bit FPR<->GPR moves, but need special handling for
497 if (!Subtarget.hasVector()) {
498 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
499 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
502 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
503 // structure, but VAEND is a no-op.
504 setOperationAction(ISD::VASTART, MVT::Other, Custom);
505 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
506 setOperationAction(ISD::VAEND, MVT::Other, Expand);
508 // Codes for which we want to perform some z-specific combinations.
509 setTargetDAGCombine(ISD::SIGN_EXTEND);
510 setTargetDAGCombine(ISD::STORE);
511 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
512 setTargetDAGCombine(ISD::FP_ROUND);
513 setTargetDAGCombine(ISD::BSWAP);
514 setTargetDAGCombine(ISD::SHL);
515 setTargetDAGCombine(ISD::SRA);
516 setTargetDAGCombine(ISD::SRL);
517 setTargetDAGCombine(ISD::ROTL);
519 // Handle intrinsics.
520 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
521 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
523 // We want to use MVC in preference to even a single load/store pair.
524 MaxStoresPerMemcpy = 0;
525 MaxStoresPerMemcpyOptSize = 0;
527 // The main memset sequence is a byte store followed by an MVC.
528 // Two STC or MV..I stores win over that, but the kind of fused stores
529 // generated by target-independent code don't when the byte value is
530 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
531 // than "STC;MVC". Handle the choice in target-specific code instead.
532 MaxStoresPerMemset = 0;
533 MaxStoresPerMemsetOptSize = 0;
536 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
537 LLVMContext &, EVT VT) const {
540 return VT.changeVectorElementTypeToInteger();
543 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
544 VT = VT.getScalarType();
549 switch (VT.getSimpleVT().SimpleTy) {
554 return Subtarget.hasVectorEnhancements1();
562 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
563 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
564 return Imm.isZero() || Imm.isNegZero();
567 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
568 // We can use CGFI or CLGFI.
569 return isInt<32>(Imm) || isUInt<32>(Imm);
572 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
573 // We can use ALGFI or SLGFI.
574 return isUInt<32>(Imm) || isUInt<32>(-Imm);
577 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
581 // Unaligned accesses should never be slower than the expanded version.
582 // We check specifically for aligned accesses in the few cases where
583 // they are required.
589 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
590 const AddrMode &AM, Type *Ty,
592 // Punt on globals for now, although they can be used in limited
593 // RELATIVE LONG cases.
597 // Require a 20-bit signed offset.
598 if (!isInt<20>(AM.BaseOffs))
601 // Indexing is OK but no scale factor can be applied.
602 return AM.Scale == 0 || AM.Scale == 1;
605 bool SystemZTargetLowering::isFoldableMemAccessOffset(Instruction *I,
606 int64_t Offset) const {
607 // This only applies to z13.
608 if (!Subtarget.hasVector())
611 // * Use LDE instead of LE/LEY to avoid partial register
612 // dependencies (LDE only supports small offsets).
613 // * Utilize the vector registers to hold floating point
614 // values (vector load / store instructions only support small
617 assert (isa<LoadInst>(I) || isa<StoreInst>(I));
618 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
619 I->getOperand(0)->getType());
620 bool IsFPAccess = MemAccessTy->isFloatingPointTy();
621 bool IsVectorAccess = MemAccessTy->isVectorTy();
623 // A store of an extracted vector element will be combined into a VSTE type
625 if (!IsVectorAccess && isa<StoreInst>(I)) {
626 Value *DataOp = I->getOperand(0);
627 if (isa<ExtractElementInst>(DataOp))
628 IsVectorAccess = true;
631 // A load which gets inserted into a vector element will be combined into a
632 // VLE type instruction.
633 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
634 User *LoadUser = *I->user_begin();
635 if (isa<InsertElementInst>(LoadUser))
636 IsVectorAccess = true;
639 if (!isUInt<12>(Offset) && (IsFPAccess || IsVectorAccess))
645 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
646 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
648 unsigned FromBits = FromType->getPrimitiveSizeInBits();
649 unsigned ToBits = ToType->getPrimitiveSizeInBits();
650 return FromBits > ToBits;
653 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
654 if (!FromVT.isInteger() || !ToVT.isInteger())
656 unsigned FromBits = FromVT.getSizeInBits();
657 unsigned ToBits = ToVT.getSizeInBits();
658 return FromBits > ToBits;
661 //===----------------------------------------------------------------------===//
662 // Inline asm support
663 //===----------------------------------------------------------------------===//
665 TargetLowering::ConstraintType
666 SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
667 if (Constraint.size() == 1) {
668 switch (Constraint[0]) {
669 case 'a': // Address register
670 case 'd': // Data register (equivalent to 'r')
671 case 'f': // Floating-point register
672 case 'h': // High-part register
673 case 'r': // General-purpose register
674 return C_RegisterClass;
676 case 'Q': // Memory with base and unsigned 12-bit displacement
677 case 'R': // Likewise, plus an index
678 case 'S': // Memory with base and signed 20-bit displacement
679 case 'T': // Likewise, plus an index
680 case 'm': // Equivalent to 'T'.
683 case 'I': // Unsigned 8-bit constant
684 case 'J': // Unsigned 12-bit constant
685 case 'K': // Signed 16-bit constant
686 case 'L': // Signed 20-bit displacement (on all targets we support)
687 case 'M': // 0x7fffffff
694 return TargetLowering::getConstraintType(Constraint);
697 TargetLowering::ConstraintWeight SystemZTargetLowering::
698 getSingleConstraintMatchWeight(AsmOperandInfo &info,
699 const char *constraint) const {
700 ConstraintWeight weight = CW_Invalid;
701 Value *CallOperandVal = info.CallOperandVal;
702 // If we don't have a value, we can't do a match,
703 // but allow it at the lowest weight.
706 Type *type = CallOperandVal->getType();
707 // Look at the constraint type.
708 switch (*constraint) {
710 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
713 case 'a': // Address register
714 case 'd': // Data register (equivalent to 'r')
715 case 'h': // High-part register
716 case 'r': // General-purpose register
717 if (CallOperandVal->getType()->isIntegerTy())
718 weight = CW_Register;
721 case 'f': // Floating-point register
722 if (type->isFloatingPointTy())
723 weight = CW_Register;
726 case 'I': // Unsigned 8-bit constant
727 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
728 if (isUInt<8>(C->getZExtValue()))
729 weight = CW_Constant;
732 case 'J': // Unsigned 12-bit constant
733 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
734 if (isUInt<12>(C->getZExtValue()))
735 weight = CW_Constant;
738 case 'K': // Signed 16-bit constant
739 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
740 if (isInt<16>(C->getSExtValue()))
741 weight = CW_Constant;
744 case 'L': // Signed 20-bit displacement (on all targets we support)
745 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
746 if (isInt<20>(C->getSExtValue()))
747 weight = CW_Constant;
750 case 'M': // 0x7fffffff
751 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
752 if (C->getZExtValue() == 0x7fffffff)
753 weight = CW_Constant;
759 // Parse a "{tNNN}" register constraint for which the register type "t"
760 // has already been verified. MC is the class associated with "t" and
761 // Map maps 0-based register numbers to LLVM register numbers.
762 static std::pair<unsigned, const TargetRegisterClass *>
763 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
764 const unsigned *Map) {
765 assert(*(Constraint.end()-1) == '}' && "Missing '}'");
766 if (isdigit(Constraint[2])) {
769 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
770 if (!Failed && Index < 16 && Map[Index])
771 return std::make_pair(Map[Index], RC);
773 return std::make_pair(0U, nullptr);
776 std::pair<unsigned, const TargetRegisterClass *>
777 SystemZTargetLowering::getRegForInlineAsmConstraint(
778 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
779 if (Constraint.size() == 1) {
780 // GCC Constraint Letters
781 switch (Constraint[0]) {
783 case 'd': // Data register (equivalent to 'r')
784 case 'r': // General-purpose register
786 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
787 else if (VT == MVT::i128)
788 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
789 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
791 case 'a': // Address register
793 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
794 else if (VT == MVT::i128)
795 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
796 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
798 case 'h': // High-part register (an LLVM extension)
799 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
801 case 'f': // Floating-point register
803 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
804 else if (VT == MVT::f128)
805 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
806 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
809 if (Constraint.size() > 0 && Constraint[0] == '{') {
810 // We need to override the default register parsing for GPRs and FPRs
811 // because the interpretation depends on VT. The internal names of
812 // the registers are also different from the external names
813 // (F0D and F0S instead of F0, etc.).
814 if (Constraint[1] == 'r') {
816 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
817 SystemZMC::GR32Regs);
819 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
820 SystemZMC::GR128Regs);
821 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
822 SystemZMC::GR64Regs);
824 if (Constraint[1] == 'f') {
826 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
827 SystemZMC::FP32Regs);
829 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
830 SystemZMC::FP128Regs);
831 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
832 SystemZMC::FP64Regs);
835 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
838 void SystemZTargetLowering::
839 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
840 std::vector<SDValue> &Ops,
841 SelectionDAG &DAG) const {
842 // Only support length 1 constraints for now.
843 if (Constraint.length() == 1) {
844 switch (Constraint[0]) {
845 case 'I': // Unsigned 8-bit constant
846 if (auto *C = dyn_cast<ConstantSDNode>(Op))
847 if (isUInt<8>(C->getZExtValue()))
848 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
852 case 'J': // Unsigned 12-bit constant
853 if (auto *C = dyn_cast<ConstantSDNode>(Op))
854 if (isUInt<12>(C->getZExtValue()))
855 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
859 case 'K': // Signed 16-bit constant
860 if (auto *C = dyn_cast<ConstantSDNode>(Op))
861 if (isInt<16>(C->getSExtValue()))
862 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
866 case 'L': // Signed 20-bit displacement (on all targets we support)
867 if (auto *C = dyn_cast<ConstantSDNode>(Op))
868 if (isInt<20>(C->getSExtValue()))
869 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
873 case 'M': // 0x7fffffff
874 if (auto *C = dyn_cast<ConstantSDNode>(Op))
875 if (C->getZExtValue() == 0x7fffffff)
876 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
881 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
884 //===----------------------------------------------------------------------===//
885 // Calling conventions
886 //===----------------------------------------------------------------------===//
888 #include "SystemZGenCallingConv.inc"
890 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
891 Type *ToType) const {
892 return isTruncateFree(FromType, ToType);
895 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
896 return CI->isTailCall();
899 // We do not yet support 128-bit single-element vector types. If the user
900 // attempts to use such types as function argument or return type, prefer
901 // to error out instead of emitting code violating the ABI.
902 static void VerifyVectorType(MVT VT, EVT ArgVT) {
903 if (ArgVT.isVector() && !VT.isVector())
904 report_fatal_error("Unsupported vector argument or return type");
907 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
908 for (unsigned i = 0; i < Ins.size(); ++i)
909 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
912 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
913 for (unsigned i = 0; i < Outs.size(); ++i)
914 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
917 // Value is a value that has been passed to us in the location described by VA
918 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
919 // any loads onto Chain.
920 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
921 CCValAssign &VA, SDValue Chain,
923 // If the argument has been promoted from a smaller type, insert an
924 // assertion to capture this.
925 if (VA.getLocInfo() == CCValAssign::SExt)
926 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
927 DAG.getValueType(VA.getValVT()));
928 else if (VA.getLocInfo() == CCValAssign::ZExt)
929 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
930 DAG.getValueType(VA.getValVT()));
933 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
934 else if (VA.getLocInfo() == CCValAssign::BCvt) {
935 // If this is a short vector argument loaded from the stack,
936 // extend from i64 to full vector size and then bitcast.
937 assert(VA.getLocVT() == MVT::i64);
938 assert(VA.getValVT().isVector());
939 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
940 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
942 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
946 // Value is a value of type VA.getValVT() that we need to copy into
947 // the location described by VA. Return a copy of Value converted to
948 // VA.getValVT(). The caller is responsible for handling indirect values.
949 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
950 CCValAssign &VA, SDValue Value) {
951 switch (VA.getLocInfo()) {
952 case CCValAssign::SExt:
953 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
954 case CCValAssign::ZExt:
955 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
956 case CCValAssign::AExt:
957 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
958 case CCValAssign::BCvt:
959 // If this is a short vector argument to be stored to the stack,
960 // bitcast to v2i64 and then extract first element.
961 assert(VA.getLocVT() == MVT::i64);
962 assert(VA.getValVT().isVector());
963 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
964 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
965 DAG.getConstant(0, DL, MVT::i32));
966 case CCValAssign::Full:
969 llvm_unreachable("Unhandled getLocInfo()");
973 SDValue SystemZTargetLowering::LowerFormalArguments(
974 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
975 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
976 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
977 MachineFunction &MF = DAG.getMachineFunction();
978 MachineFrameInfo &MFI = MF.getFrameInfo();
979 MachineRegisterInfo &MRI = MF.getRegInfo();
980 SystemZMachineFunctionInfo *FuncInfo =
981 MF.getInfo<SystemZMachineFunctionInfo>();
983 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
984 EVT PtrVT = getPointerTy(DAG.getDataLayout());
986 // Detect unsupported vector argument types.
987 if (Subtarget.hasVector())
988 VerifyVectorTypes(Ins);
990 // Assign locations to all of the incoming arguments.
991 SmallVector<CCValAssign, 16> ArgLocs;
992 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
993 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
995 unsigned NumFixedGPRs = 0;
996 unsigned NumFixedFPRs = 0;
997 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
999 CCValAssign &VA = ArgLocs[I];
1000 EVT LocVT = VA.getLocVT();
1001 if (VA.isRegLoc()) {
1002 // Arguments passed in registers
1003 const TargetRegisterClass *RC;
1004 switch (LocVT.getSimpleVT().SimpleTy) {
1006 // Integers smaller than i64 should be promoted to i64.
1007 llvm_unreachable("Unexpected argument type");
1010 RC = &SystemZ::GR32BitRegClass;
1014 RC = &SystemZ::GR64BitRegClass;
1018 RC = &SystemZ::FP32BitRegClass;
1022 RC = &SystemZ::FP64BitRegClass;
1030 RC = &SystemZ::VR128BitRegClass;
1034 unsigned VReg = MRI.createVirtualRegister(RC);
1035 MRI.addLiveIn(VA.getLocReg(), VReg);
1036 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1038 assert(VA.isMemLoc() && "Argument not register or memory");
1040 // Create the frame index object for this incoming parameter.
1041 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1042 VA.getLocMemOffset(), true);
1044 // Create the SelectionDAG nodes corresponding to a load
1045 // from this parameter. Unpromoted ints and floats are
1046 // passed as right-justified 8-byte values.
1047 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1048 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1049 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
1050 DAG.getIntPtrConstant(4, DL));
1051 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
1052 MachinePointerInfo::getFixedStack(MF, FI));
1055 // Convert the value of the argument register into the value that's
1057 if (VA.getLocInfo() == CCValAssign::Indirect) {
1058 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1059 MachinePointerInfo()));
1060 // If the original argument was split (e.g. i128), we need
1061 // to load all parts of it here (using the same address).
1062 unsigned ArgIndex = Ins[I].OrigArgIndex;
1063 assert (Ins[I].PartOffset == 0);
1064 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
1065 CCValAssign &PartVA = ArgLocs[I + 1];
1066 unsigned PartOffset = Ins[I + 1].PartOffset;
1067 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1068 DAG.getIntPtrConstant(PartOffset, DL));
1069 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1070 MachinePointerInfo()));
1074 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1078 // Save the number of non-varargs registers for later use by va_start, etc.
1079 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1080 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1082 // Likewise the address (in the form of a frame index) of where the
1083 // first stack vararg would be. The 1-byte size here is arbitrary.
1084 int64_t StackSize = CCInfo.getNextStackOffset();
1085 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1087 // ...and a similar frame index for the caller-allocated save area
1088 // that will be used to store the incoming registers.
1089 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1090 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1091 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1093 // Store the FPR varargs in the reserved frame slots. (We store the
1094 // GPRs as part of the prologue.)
1095 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
1096 SDValue MemOps[SystemZ::NumArgFPRs];
1097 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1098 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
1099 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
1100 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1101 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1102 &SystemZ::FP64BitRegClass);
1103 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1104 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1105 MachinePointerInfo::getFixedStack(MF, FI));
1107 // Join the stores, which are independent of one another.
1108 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1109 makeArrayRef(&MemOps[NumFixedFPRs],
1110 SystemZ::NumArgFPRs-NumFixedFPRs));
1117 static bool canUseSiblingCall(const CCState &ArgCCInfo,
1118 SmallVectorImpl<CCValAssign> &ArgLocs,
1119 SmallVectorImpl<ISD::OutputArg> &Outs) {
1120 // Punt if there are any indirect or stack arguments, or if the call
1121 // needs the callee-saved argument register R6, or if the call uses
1122 // the callee-saved register arguments SwiftSelf and SwiftError.
1123 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1124 CCValAssign &VA = ArgLocs[I];
1125 if (VA.getLocInfo() == CCValAssign::Indirect)
1129 unsigned Reg = VA.getLocReg();
1130 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1132 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1139 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
1140 SmallVectorImpl<SDValue> &InVals) const {
1141 SelectionDAG &DAG = CLI.DAG;
1143 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1144 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1145 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1146 SDValue Chain = CLI.Chain;
1147 SDValue Callee = CLI.Callee;
1148 bool &IsTailCall = CLI.IsTailCall;
1149 CallingConv::ID CallConv = CLI.CallConv;
1150 bool IsVarArg = CLI.IsVarArg;
1151 MachineFunction &MF = DAG.getMachineFunction();
1152 EVT PtrVT = getPointerTy(MF.getDataLayout());
1154 // Detect unsupported vector argument and return types.
1155 if (Subtarget.hasVector()) {
1156 VerifyVectorTypes(Outs);
1157 VerifyVectorTypes(Ins);
1160 // Analyze the operands of the call, assigning locations to each operand.
1161 SmallVector<CCValAssign, 16> ArgLocs;
1162 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1163 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1165 // We don't support GuaranteedTailCallOpt, only automatically-detected
1167 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
1170 // Get a count of how many bytes are to be pushed on the stack.
1171 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1173 // Mark the start of the call.
1175 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1177 // Copy argument values to their designated locations.
1178 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
1179 SmallVector<SDValue, 8> MemOpChains;
1181 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1182 CCValAssign &VA = ArgLocs[I];
1183 SDValue ArgValue = OutVals[I];
1185 if (VA.getLocInfo() == CCValAssign::Indirect) {
1186 // Store the argument in a stack slot and pass its address.
1187 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1188 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1189 MemOpChains.push_back(
1190 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1191 MachinePointerInfo::getFixedStack(MF, FI)));
1192 // If the original argument was split (e.g. i128), we need
1193 // to store all parts of it here (and pass just one address).
1194 unsigned ArgIndex = Outs[I].OrigArgIndex;
1195 assert (Outs[I].PartOffset == 0);
1196 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1197 SDValue PartValue = OutVals[I + 1];
1198 unsigned PartOffset = Outs[I + 1].PartOffset;
1199 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1200 DAG.getIntPtrConstant(PartOffset, DL));
1201 MemOpChains.push_back(
1202 DAG.getStore(Chain, DL, PartValue, Address,
1203 MachinePointerInfo::getFixedStack(MF, FI)));
1206 ArgValue = SpillSlot;
1208 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1211 // Queue up the argument copies and emit them at the end.
1212 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1214 assert(VA.isMemLoc() && "Argument not register or memory");
1216 // Work out the address of the stack slot. Unpromoted ints and
1217 // floats are passed as right-justified 8-byte values.
1218 if (!StackPtr.getNode())
1219 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1220 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
1221 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1223 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1224 DAG.getIntPtrConstant(Offset, DL));
1227 MemOpChains.push_back(
1228 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1232 // Join the stores, which are independent of one another.
1233 if (!MemOpChains.empty())
1234 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1236 // Accept direct calls by converting symbolic call addresses to the
1237 // associated Target* opcodes. Force %r1 to be used for indirect
1240 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1241 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1242 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1243 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1244 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1245 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1246 } else if (IsTailCall) {
1247 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1248 Glue = Chain.getValue(1);
1249 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1252 // Build a sequence of copy-to-reg nodes, chained and glued together.
1253 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1254 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1255 RegsToPass[I].second, Glue);
1256 Glue = Chain.getValue(1);
1259 // The first call operand is the chain and the second is the target address.
1260 SmallVector<SDValue, 8> Ops;
1261 Ops.push_back(Chain);
1262 Ops.push_back(Callee);
1264 // Add argument registers to the end of the list so that they are
1265 // known live into the call.
1266 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1267 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1268 RegsToPass[I].second.getValueType()));
1270 // Add a register mask operand representing the call-preserved registers.
1271 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1272 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1273 assert(Mask && "Missing call preserved mask for calling convention");
1274 Ops.push_back(DAG.getRegisterMask(Mask));
1276 // Glue the call to the argument copies, if any.
1278 Ops.push_back(Glue);
1281 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1283 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1284 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1285 Glue = Chain.getValue(1);
1287 // Mark the end of the call, which is glued to the call itself.
1288 Chain = DAG.getCALLSEQ_END(Chain,
1289 DAG.getConstant(NumBytes, DL, PtrVT, true),
1290 DAG.getConstant(0, DL, PtrVT, true),
1292 Glue = Chain.getValue(1);
1294 // Assign locations to each value returned by this call.
1295 SmallVector<CCValAssign, 16> RetLocs;
1296 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1297 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1299 // Copy all of the result registers out of their specified physreg.
1300 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1301 CCValAssign &VA = RetLocs[I];
1303 // Copy the value out, gluing the copy to the end of the call sequence.
1304 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1305 VA.getLocVT(), Glue);
1306 Chain = RetValue.getValue(1);
1307 Glue = RetValue.getValue(2);
1309 // Convert the value of the return register into the value that's
1311 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1317 bool SystemZTargetLowering::
1318 CanLowerReturn(CallingConv::ID CallConv,
1319 MachineFunction &MF, bool isVarArg,
1320 const SmallVectorImpl<ISD::OutputArg> &Outs,
1321 LLVMContext &Context) const {
1322 // Detect unsupported vector return types.
1323 if (Subtarget.hasVector())
1324 VerifyVectorTypes(Outs);
1326 // Special case that we cannot easily detect in RetCC_SystemZ since
1327 // i128 is not a legal type.
1328 for (auto &Out : Outs)
1329 if (Out.ArgVT == MVT::i128)
1332 SmallVector<CCValAssign, 16> RetLocs;
1333 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1334 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1338 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1340 const SmallVectorImpl<ISD::OutputArg> &Outs,
1341 const SmallVectorImpl<SDValue> &OutVals,
1342 const SDLoc &DL, SelectionDAG &DAG) const {
1343 MachineFunction &MF = DAG.getMachineFunction();
1345 // Detect unsupported vector return types.
1346 if (Subtarget.hasVector())
1347 VerifyVectorTypes(Outs);
1349 // Assign locations to each returned value.
1350 SmallVector<CCValAssign, 16> RetLocs;
1351 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1352 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1354 // Quick exit for void returns
1355 if (RetLocs.empty())
1356 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1358 // Copy the result values into the output registers.
1360 SmallVector<SDValue, 4> RetOps;
1361 RetOps.push_back(Chain);
1362 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1363 CCValAssign &VA = RetLocs[I];
1364 SDValue RetValue = OutVals[I];
1366 // Make the return register live on exit.
1367 assert(VA.isRegLoc() && "Can only return in registers!");
1369 // Promote the value as required.
1370 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1372 // Chain and glue the copies together.
1373 unsigned Reg = VA.getLocReg();
1374 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1375 Glue = Chain.getValue(1);
1376 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1379 // Update chain and glue.
1382 RetOps.push_back(Glue);
1384 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1387 // Return true if Op is an intrinsic node with chain that returns the CC value
1388 // as its only (other) argument. Provide the associated SystemZISD opcode and
1389 // the mask of valid CC values if so.
1390 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1391 unsigned &CCValid) {
1392 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1394 case Intrinsic::s390_tbegin:
1395 Opcode = SystemZISD::TBEGIN;
1396 CCValid = SystemZ::CCMASK_TBEGIN;
1399 case Intrinsic::s390_tbegin_nofloat:
1400 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1401 CCValid = SystemZ::CCMASK_TBEGIN;
1404 case Intrinsic::s390_tend:
1405 Opcode = SystemZISD::TEND;
1406 CCValid = SystemZ::CCMASK_TEND;
1414 // Return true if Op is an intrinsic node without chain that returns the
1415 // CC value as its final argument. Provide the associated SystemZISD
1416 // opcode and the mask of valid CC values if so.
1417 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1418 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1420 case Intrinsic::s390_vpkshs:
1421 case Intrinsic::s390_vpksfs:
1422 case Intrinsic::s390_vpksgs:
1423 Opcode = SystemZISD::PACKS_CC;
1424 CCValid = SystemZ::CCMASK_VCMP;
1427 case Intrinsic::s390_vpklshs:
1428 case Intrinsic::s390_vpklsfs:
1429 case Intrinsic::s390_vpklsgs:
1430 Opcode = SystemZISD::PACKLS_CC;
1431 CCValid = SystemZ::CCMASK_VCMP;
1434 case Intrinsic::s390_vceqbs:
1435 case Intrinsic::s390_vceqhs:
1436 case Intrinsic::s390_vceqfs:
1437 case Intrinsic::s390_vceqgs:
1438 Opcode = SystemZISD::VICMPES;
1439 CCValid = SystemZ::CCMASK_VCMP;
1442 case Intrinsic::s390_vchbs:
1443 case Intrinsic::s390_vchhs:
1444 case Intrinsic::s390_vchfs:
1445 case Intrinsic::s390_vchgs:
1446 Opcode = SystemZISD::VICMPHS;
1447 CCValid = SystemZ::CCMASK_VCMP;
1450 case Intrinsic::s390_vchlbs:
1451 case Intrinsic::s390_vchlhs:
1452 case Intrinsic::s390_vchlfs:
1453 case Intrinsic::s390_vchlgs:
1454 Opcode = SystemZISD::VICMPHLS;
1455 CCValid = SystemZ::CCMASK_VCMP;
1458 case Intrinsic::s390_vtm:
1459 Opcode = SystemZISD::VTM;
1460 CCValid = SystemZ::CCMASK_VCMP;
1463 case Intrinsic::s390_vfaebs:
1464 case Intrinsic::s390_vfaehs:
1465 case Intrinsic::s390_vfaefs:
1466 Opcode = SystemZISD::VFAE_CC;
1467 CCValid = SystemZ::CCMASK_ANY;
1470 case Intrinsic::s390_vfaezbs:
1471 case Intrinsic::s390_vfaezhs:
1472 case Intrinsic::s390_vfaezfs:
1473 Opcode = SystemZISD::VFAEZ_CC;
1474 CCValid = SystemZ::CCMASK_ANY;
1477 case Intrinsic::s390_vfeebs:
1478 case Intrinsic::s390_vfeehs:
1479 case Intrinsic::s390_vfeefs:
1480 Opcode = SystemZISD::VFEE_CC;
1481 CCValid = SystemZ::CCMASK_ANY;
1484 case Intrinsic::s390_vfeezbs:
1485 case Intrinsic::s390_vfeezhs:
1486 case Intrinsic::s390_vfeezfs:
1487 Opcode = SystemZISD::VFEEZ_CC;
1488 CCValid = SystemZ::CCMASK_ANY;
1491 case Intrinsic::s390_vfenebs:
1492 case Intrinsic::s390_vfenehs:
1493 case Intrinsic::s390_vfenefs:
1494 Opcode = SystemZISD::VFENE_CC;
1495 CCValid = SystemZ::CCMASK_ANY;
1498 case Intrinsic::s390_vfenezbs:
1499 case Intrinsic::s390_vfenezhs:
1500 case Intrinsic::s390_vfenezfs:
1501 Opcode = SystemZISD::VFENEZ_CC;
1502 CCValid = SystemZ::CCMASK_ANY;
1505 case Intrinsic::s390_vistrbs:
1506 case Intrinsic::s390_vistrhs:
1507 case Intrinsic::s390_vistrfs:
1508 Opcode = SystemZISD::VISTR_CC;
1509 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
1512 case Intrinsic::s390_vstrcbs:
1513 case Intrinsic::s390_vstrchs:
1514 case Intrinsic::s390_vstrcfs:
1515 Opcode = SystemZISD::VSTRC_CC;
1516 CCValid = SystemZ::CCMASK_ANY;
1519 case Intrinsic::s390_vstrczbs:
1520 case Intrinsic::s390_vstrczhs:
1521 case Intrinsic::s390_vstrczfs:
1522 Opcode = SystemZISD::VSTRCZ_CC;
1523 CCValid = SystemZ::CCMASK_ANY;
1526 case Intrinsic::s390_vfcedbs:
1527 case Intrinsic::s390_vfcesbs:
1528 Opcode = SystemZISD::VFCMPES;
1529 CCValid = SystemZ::CCMASK_VCMP;
1532 case Intrinsic::s390_vfchdbs:
1533 case Intrinsic::s390_vfchsbs:
1534 Opcode = SystemZISD::VFCMPHS;
1535 CCValid = SystemZ::CCMASK_VCMP;
1538 case Intrinsic::s390_vfchedbs:
1539 case Intrinsic::s390_vfchesbs:
1540 Opcode = SystemZISD::VFCMPHES;
1541 CCValid = SystemZ::CCMASK_VCMP;
1544 case Intrinsic::s390_vftcidb:
1545 case Intrinsic::s390_vftcisb:
1546 Opcode = SystemZISD::VFTCI;
1547 CCValid = SystemZ::CCMASK_VCMP;
1550 case Intrinsic::s390_tdc:
1551 Opcode = SystemZISD::TDC;
1552 CCValid = SystemZ::CCMASK_TDC;
1560 // Emit an intrinsic with chain with a glued value instead of its CC result.
1561 static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op,
1563 // Copy all operands except the intrinsic ID.
1564 unsigned NumOps = Op.getNumOperands();
1565 SmallVector<SDValue, 6> Ops;
1566 Ops.reserve(NumOps - 1);
1567 Ops.push_back(Op.getOperand(0));
1568 for (unsigned I = 2; I < NumOps; ++I)
1569 Ops.push_back(Op.getOperand(I));
1571 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
1572 SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1573 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1574 SDValue OldChain = SDValue(Op.getNode(), 1);
1575 SDValue NewChain = SDValue(Intr.getNode(), 0);
1576 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1580 // Emit an intrinsic with a glued value instead of its CC result.
1581 static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op,
1583 // Copy all operands except the intrinsic ID.
1584 unsigned NumOps = Op.getNumOperands();
1585 SmallVector<SDValue, 6> Ops;
1586 Ops.reserve(NumOps - 1);
1587 for (unsigned I = 1; I < NumOps; ++I)
1588 Ops.push_back(Op.getOperand(I));
1590 if (Op->getNumValues() == 1)
1591 return DAG.getNode(Opcode, SDLoc(Op), MVT::Glue, Ops);
1592 assert(Op->getNumValues() == 2 && "Expected exactly one non-CC result");
1593 SDVTList RawVTs = DAG.getVTList(Op->getValueType(0), MVT::Glue);
1594 return DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1597 // CC is a comparison that will be implemented using an integer or
1598 // floating-point comparison. Return the condition code mask for
1599 // a branch on true. In the integer case, CCMASK_CMP_UO is set for
1600 // unsigned comparisons and clear for signed ones. In the floating-point
1601 // case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1602 static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1604 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1605 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1606 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1610 llvm_unreachable("Invalid integer condition!");
1619 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1620 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1625 // Return a sequence for getting a 1 from an IPM result when CC has a
1626 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1627 // The handling of CC values outside CCValid doesn't matter.
1628 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1629 // Deal with cases where the result can be taken directly from a bit
1630 // of the IPM result.
1631 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1632 return IPMConversion(0, 0, SystemZ::IPM_CC);
1633 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1634 return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1636 // Deal with cases where we can add a value to force the sign bit
1637 // to contain the right value. Putting the bit in 31 means we can
1638 // use SRL rather than RISBG(L), and also makes it easier to get a
1639 // 0/-1 value, so it has priority over the other tests below.
1641 // These sequences rely on the fact that the upper two bits of the
1642 // IPM result are zero.
1643 uint64_t TopBit = uint64_t(1) << 31;
1644 if (CCMask == (CCValid & SystemZ::CCMASK_0))
1645 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1646 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1647 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1648 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1650 | SystemZ::CCMASK_2)))
1651 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1652 if (CCMask == (CCValid & SystemZ::CCMASK_3))
1653 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1654 if (CCMask == (CCValid & (SystemZ::CCMASK_1
1656 | SystemZ::CCMASK_3)))
1657 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1659 // Next try inverting the value and testing a bit. 0/1 could be
1660 // handled this way too, but we dealt with that case above.
1661 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1662 return IPMConversion(-1, 0, SystemZ::IPM_CC);
1664 // Handle cases where adding a value forces a non-sign bit to contain
1666 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1667 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1668 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1669 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1671 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
1672 // can be done by inverting the low CC bit and applying one of the
1673 // sign-based extractions above.
1674 if (CCMask == (CCValid & SystemZ::CCMASK_1))
1675 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1676 if (CCMask == (CCValid & SystemZ::CCMASK_2))
1677 return IPMConversion(1 << SystemZ::IPM_CC,
1678 TopBit - (3 << SystemZ::IPM_CC), 31);
1679 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1681 | SystemZ::CCMASK_3)))
1682 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
1683 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1685 | SystemZ::CCMASK_3)))
1686 return IPMConversion(1 << SystemZ::IPM_CC,
1687 TopBit - (1 << SystemZ::IPM_CC), 31);
1689 llvm_unreachable("Unexpected CC combination");
1692 // If C can be converted to a comparison against zero, adjust the operands
1694 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
1695 if (C.ICmpType == SystemZICMP::UnsignedOnly)
1698 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1702 int64_t Value = ConstOp1->getSExtValue();
1703 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
1704 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
1705 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
1706 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
1707 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1708 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
1712 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1713 // adjust the operands as necessary.
1714 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
1716 // For us to make any changes, it must a comparison between a single-use
1717 // load and a constant.
1718 if (!C.Op0.hasOneUse() ||
1719 C.Op0.getOpcode() != ISD::LOAD ||
1720 C.Op1.getOpcode() != ISD::Constant)
1723 // We must have an 8- or 16-bit load.
1724 auto *Load = cast<LoadSDNode>(C.Op0);
1725 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1726 if (NumBits != 8 && NumBits != 16)
1729 // The load must be an extending one and the constant must be within the
1730 // range of the unextended value.
1731 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1732 uint64_t Value = ConstOp1->getZExtValue();
1733 uint64_t Mask = (1 << NumBits) - 1;
1734 if (Load->getExtensionType() == ISD::SEXTLOAD) {
1735 // Make sure that ConstOp1 is in range of C.Op0.
1736 int64_t SignedValue = ConstOp1->getSExtValue();
1737 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1739 if (C.ICmpType != SystemZICMP::SignedOnly) {
1740 // Unsigned comparison between two sign-extended values is equivalent
1741 // to unsigned comparison between two zero-extended values.
1743 } else if (NumBits == 8) {
1744 // Try to treat the comparison as unsigned, so that we can use CLI.
1745 // Adjust CCMask and Value as necessary.
1746 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
1747 // Test whether the high bit of the byte is set.
1748 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
1749 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
1750 // Test whether the high bit of the byte is clear.
1751 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
1753 // No instruction exists for this combination.
1755 C.ICmpType = SystemZICMP::UnsignedOnly;
1757 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
1760 // If the constant is in range, we can use any comparison.
1761 C.ICmpType = SystemZICMP::Any;
1765 // Make sure that the first operand is an i32 of the right extension type.
1766 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
1769 if (C.Op0.getValueType() != MVT::i32 ||
1770 Load->getExtensionType() != ExtType)
1771 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
1772 Load->getBasePtr(), Load->getPointerInfo(),
1773 Load->getMemoryVT(), Load->getAlignment(),
1774 Load->getMemOperand()->getFlags());
1776 // Make sure that the second operand is an i32 with the right value.
1777 if (C.Op1.getValueType() != MVT::i32 ||
1778 Value != ConstOp1->getZExtValue())
1779 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
1782 // Return true if Op is either an unextended load, or a load suitable
1783 // for integer register-memory comparisons of type ICmpType.
1784 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
1785 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
1787 // There are no instructions to compare a register with a memory byte.
1788 if (Load->getMemoryVT() == MVT::i8)
1790 // Otherwise decide on extension type.
1791 switch (Load->getExtensionType()) {
1792 case ISD::NON_EXTLOAD:
1795 return ICmpType != SystemZICMP::UnsignedOnly;
1797 return ICmpType != SystemZICMP::SignedOnly;
1805 // Return true if it is better to swap the operands of C.
1806 static bool shouldSwapCmpOperands(const Comparison &C) {
1807 // Leave f128 comparisons alone, since they have no memory forms.
1808 if (C.Op0.getValueType() == MVT::f128)
1811 // Always keep a floating-point constant second, since comparisons with
1812 // zero can use LOAD TEST and comparisons with other constants make a
1813 // natural memory operand.
1814 if (isa<ConstantFPSDNode>(C.Op1))
1817 // Never swap comparisons with zero since there are many ways to optimize
1819 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
1820 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
1823 // Also keep natural memory operands second if the loaded value is
1824 // only used here. Several comparisons have memory forms.
1825 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
1828 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
1829 // In that case we generally prefer the memory to be second.
1830 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
1831 // The only exceptions are when the second operand is a constant and
1832 // we can use things like CHHSI.
1835 // The unsigned memory-immediate instructions can handle 16-bit
1836 // unsigned integers.
1837 if (C.ICmpType != SystemZICMP::SignedOnly &&
1838 isUInt<16>(ConstOp1->getZExtValue()))
1840 // The signed memory-immediate instructions can handle 16-bit
1842 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
1843 isInt<16>(ConstOp1->getSExtValue()))
1848 // Try to promote the use of CGFR and CLGFR.
1849 unsigned Opcode0 = C.Op0.getOpcode();
1850 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
1852 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
1854 if (C.ICmpType != SystemZICMP::SignedOnly &&
1855 Opcode0 == ISD::AND &&
1856 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
1857 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
1863 // Return a version of comparison CC mask CCMask in which the LT and GT
1864 // actions are swapped.
1865 static unsigned reverseCCMask(unsigned CCMask) {
1866 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
1867 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
1868 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
1869 (CCMask & SystemZ::CCMASK_CMP_UO));
1872 // Check whether C tests for equality between X and Y and whether X - Y
1873 // or Y - X is also computed. In that case it's better to compare the
1874 // result of the subtraction against zero.
1875 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
1877 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
1878 C.CCMask == SystemZ::CCMASK_CMP_NE) {
1879 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1881 if (N->getOpcode() == ISD::SUB &&
1882 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
1883 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
1884 C.Op0 = SDValue(N, 0);
1885 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
1892 // Check whether C compares a floating-point value with zero and if that
1893 // floating-point value is also negated. In this case we can use the
1894 // negation to set CC, so avoiding separate LOAD AND TEST and
1895 // LOAD (NEGATIVE/COMPLEMENT) instructions.
1896 static void adjustForFNeg(Comparison &C) {
1897 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
1898 if (C1 && C1->isZero()) {
1899 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
1901 if (N->getOpcode() == ISD::FNEG) {
1902 C.Op0 = SDValue(N, 0);
1903 C.CCMask = reverseCCMask(C.CCMask);
1910 // Check whether C compares (shl X, 32) with 0 and whether X is
1911 // also sign-extended. In that case it is better to test the result
1912 // of the sign extension using LTGFR.
1914 // This case is important because InstCombine transforms a comparison
1915 // with (sext (trunc X)) into a comparison with (shl X, 32).
1916 static void adjustForLTGFR(Comparison &C) {
1917 // Check for a comparison between (shl X, 32) and 0.
1918 if (C.Op0.getOpcode() == ISD::SHL &&
1919 C.Op0.getValueType() == MVT::i64 &&
1920 C.Op1.getOpcode() == ISD::Constant &&
1921 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1922 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
1923 if (C1 && C1->getZExtValue() == 32) {
1924 SDValue ShlOp0 = C.Op0.getOperand(0);
1925 // See whether X has any SIGN_EXTEND_INREG uses.
1926 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
1928 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
1929 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
1930 C.Op0 = SDValue(N, 0);
1938 // If C compares the truncation of an extending load, try to compare
1939 // the untruncated value instead. This exposes more opportunities to
1941 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
1943 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
1944 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
1945 C.Op1.getOpcode() == ISD::Constant &&
1946 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1947 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
1948 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
1949 unsigned Type = L->getExtensionType();
1950 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
1951 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
1952 C.Op0 = C.Op0.getOperand(0);
1953 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
1959 // Return true if shift operation N has an in-range constant shift value.
1960 // Store it in ShiftVal if so.
1961 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
1962 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
1966 uint64_t Amount = Shift->getZExtValue();
1967 if (Amount >= N.getValueSizeInBits())
1974 // Check whether an AND with Mask is suitable for a TEST UNDER MASK
1975 // instruction and whether the CC value is descriptive enough to handle
1976 // a comparison of type Opcode between the AND result and CmpVal.
1977 // CCMask says which comparison result is being tested and BitSize is
1978 // the number of bits in the operands. If TEST UNDER MASK can be used,
1979 // return the corresponding CC mask, otherwise return 0.
1980 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
1981 uint64_t Mask, uint64_t CmpVal,
1982 unsigned ICmpType) {
1983 assert(Mask != 0 && "ANDs with zero should have been removed by now");
1985 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
1986 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
1987 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
1990 // Work out the masks for the lowest and highest bits.
1991 unsigned HighShift = 63 - countLeadingZeros(Mask);
1992 uint64_t High = uint64_t(1) << HighShift;
1993 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
1995 // Signed ordered comparisons are effectively unsigned if the sign
1997 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
1999 // Check for equality comparisons with 0, or the equivalent.
2001 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2002 return SystemZ::CCMASK_TM_ALL_0;
2003 if (CCMask == SystemZ::CCMASK_CMP_NE)
2004 return SystemZ::CCMASK_TM_SOME_1;
2006 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
2007 if (CCMask == SystemZ::CCMASK_CMP_LT)
2008 return SystemZ::CCMASK_TM_ALL_0;
2009 if (CCMask == SystemZ::CCMASK_CMP_GE)
2010 return SystemZ::CCMASK_TM_SOME_1;
2012 if (EffectivelyUnsigned && CmpVal < Low) {
2013 if (CCMask == SystemZ::CCMASK_CMP_LE)
2014 return SystemZ::CCMASK_TM_ALL_0;
2015 if (CCMask == SystemZ::CCMASK_CMP_GT)
2016 return SystemZ::CCMASK_TM_SOME_1;
2019 // Check for equality comparisons with the mask, or the equivalent.
2020 if (CmpVal == Mask) {
2021 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2022 return SystemZ::CCMASK_TM_ALL_1;
2023 if (CCMask == SystemZ::CCMASK_CMP_NE)
2024 return SystemZ::CCMASK_TM_SOME_0;
2026 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
2027 if (CCMask == SystemZ::CCMASK_CMP_GT)
2028 return SystemZ::CCMASK_TM_ALL_1;
2029 if (CCMask == SystemZ::CCMASK_CMP_LE)
2030 return SystemZ::CCMASK_TM_SOME_0;
2032 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
2033 if (CCMask == SystemZ::CCMASK_CMP_GE)
2034 return SystemZ::CCMASK_TM_ALL_1;
2035 if (CCMask == SystemZ::CCMASK_CMP_LT)
2036 return SystemZ::CCMASK_TM_SOME_0;
2039 // Check for ordered comparisons with the top bit.
2040 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
2041 if (CCMask == SystemZ::CCMASK_CMP_LE)
2042 return SystemZ::CCMASK_TM_MSB_0;
2043 if (CCMask == SystemZ::CCMASK_CMP_GT)
2044 return SystemZ::CCMASK_TM_MSB_1;
2046 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
2047 if (CCMask == SystemZ::CCMASK_CMP_LT)
2048 return SystemZ::CCMASK_TM_MSB_0;
2049 if (CCMask == SystemZ::CCMASK_CMP_GE)
2050 return SystemZ::CCMASK_TM_MSB_1;
2053 // If there are just two bits, we can do equality checks for Low and High
2055 if (Mask == Low + High) {
2056 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
2057 return SystemZ::CCMASK_TM_MIXED_MSB_0;
2058 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
2059 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
2060 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
2061 return SystemZ::CCMASK_TM_MIXED_MSB_1;
2062 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
2063 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
2066 // Looks like we've exhausted our options.
2070 // See whether C can be implemented as a TEST UNDER MASK instruction.
2071 // Update the arguments with the TM version if so.
2072 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2074 // Check that we have a comparison with a constant.
2075 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2078 uint64_t CmpVal = ConstOp1->getZExtValue();
2080 // Check whether the nonconstant input is an AND with a constant mask.
2083 ConstantSDNode *Mask = nullptr;
2084 if (C.Op0.getOpcode() == ISD::AND) {
2085 NewC.Op0 = C.Op0.getOperand(0);
2086 NewC.Op1 = C.Op0.getOperand(1);
2087 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2090 MaskVal = Mask->getZExtValue();
2092 // There is no instruction to compare with a 64-bit immediate
2093 // so use TMHH instead if possible. We need an unsigned ordered
2094 // comparison with an i64 immediate.
2095 if (NewC.Op0.getValueType() != MVT::i64 ||
2096 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2097 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2098 NewC.ICmpType == SystemZICMP::SignedOnly)
2100 // Convert LE and GT comparisons into LT and GE.
2101 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2102 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2103 if (CmpVal == uint64_t(-1))
2106 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2108 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2109 // be masked off without changing the result.
2110 MaskVal = -(CmpVal & -CmpVal);
2111 NewC.ICmpType = SystemZICMP::UnsignedOnly;
2116 // Check whether the combination of mask, comparison value and comparison
2117 // type are suitable.
2118 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2119 unsigned NewCCMask, ShiftVal;
2120 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2121 NewC.Op0.getOpcode() == ISD::SHL &&
2122 isSimpleShift(NewC.Op0, ShiftVal) &&
2123 (MaskVal >> ShiftVal != 0) &&
2124 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2125 MaskVal >> ShiftVal,
2127 SystemZICMP::Any))) {
2128 NewC.Op0 = NewC.Op0.getOperand(0);
2129 MaskVal >>= ShiftVal;
2130 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2131 NewC.Op0.getOpcode() == ISD::SRL &&
2132 isSimpleShift(NewC.Op0, ShiftVal) &&
2133 (MaskVal << ShiftVal != 0) &&
2134 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2135 MaskVal << ShiftVal,
2137 SystemZICMP::UnsignedOnly))) {
2138 NewC.Op0 = NewC.Op0.getOperand(0);
2139 MaskVal <<= ShiftVal;
2141 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2147 // Go ahead and make the change.
2148 C.Opcode = SystemZISD::TM;
2150 if (Mask && Mask->getZExtValue() == MaskVal)
2151 C.Op1 = SDValue(Mask, 0);
2153 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2154 C.CCValid = SystemZ::CCMASK_TM;
2155 C.CCMask = NewCCMask;
2158 // Return a Comparison that tests the condition-code result of intrinsic
2159 // node Call against constant integer CC using comparison code Cond.
2160 // Opcode is the opcode of the SystemZISD operation for the intrinsic
2161 // and CCValid is the set of possible condition-code results.
2162 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2163 SDValue Call, unsigned CCValid, uint64_t CC,
2164 ISD::CondCode Cond) {
2165 Comparison C(Call, SDValue());
2167 C.CCValid = CCValid;
2168 if (Cond == ISD::SETEQ)
2169 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2170 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2171 else if (Cond == ISD::SETNE)
2172 // ...and the inverse of that.
2173 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2174 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2175 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2176 // always true for CC>3.
2177 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2178 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2179 // ...and the inverse of that.
2180 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2181 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2182 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2183 // always true for CC>3.
2184 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2185 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2186 // ...and the inverse of that.
2187 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2189 llvm_unreachable("Unexpected integer comparison type");
2190 C.CCMask &= CCValid;
2194 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2195 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2196 ISD::CondCode Cond, const SDLoc &DL) {
2197 if (CmpOp1.getOpcode() == ISD::Constant) {
2198 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2199 unsigned Opcode, CCValid;
2200 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2201 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2202 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2203 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2204 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2205 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2206 isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2207 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2209 Comparison C(CmpOp0, CmpOp1);
2210 C.CCMask = CCMaskForCondCode(Cond);
2211 if (C.Op0.getValueType().isFloatingPoint()) {
2212 C.CCValid = SystemZ::CCMASK_FCMP;
2213 C.Opcode = SystemZISD::FCMP;
2216 C.CCValid = SystemZ::CCMASK_ICMP;
2217 C.Opcode = SystemZISD::ICMP;
2218 // Choose the type of comparison. Equality and inequality tests can
2219 // use either signed or unsigned comparisons. The choice also doesn't
2220 // matter if both sign bits are known to be clear. In those cases we
2221 // want to give the main isel code the freedom to choose whichever
2223 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2224 C.CCMask == SystemZ::CCMASK_CMP_NE ||
2225 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2226 C.ICmpType = SystemZICMP::Any;
2227 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2228 C.ICmpType = SystemZICMP::UnsignedOnly;
2230 C.ICmpType = SystemZICMP::SignedOnly;
2231 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2232 adjustZeroCmp(DAG, DL, C);
2233 adjustSubwordCmp(DAG, DL, C);
2234 adjustForSubtraction(DAG, DL, C);
2236 adjustICmpTruncate(DAG, DL, C);
2239 if (shouldSwapCmpOperands(C)) {
2240 std::swap(C.Op0, C.Op1);
2241 C.CCMask = reverseCCMask(C.CCMask);
2244 adjustForTestUnderMask(DAG, DL, C);
2248 // Emit the comparison instruction described by C.
2249 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2250 if (!C.Op1.getNode()) {
2252 switch (C.Op0.getOpcode()) {
2253 case ISD::INTRINSIC_W_CHAIN:
2254 Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode);
2256 case ISD::INTRINSIC_WO_CHAIN:
2257 Op = emitIntrinsicWithGlue(DAG, C.Op0, C.Opcode);
2260 llvm_unreachable("Invalid comparison operands");
2262 return SDValue(Op.getNode(), Op->getNumValues() - 1);
2264 if (C.Opcode == SystemZISD::ICMP)
2265 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
2266 DAG.getConstant(C.ICmpType, DL, MVT::i32));
2267 if (C.Opcode == SystemZISD::TM) {
2268 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2269 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2270 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
2271 DAG.getConstant(RegisterOnly, DL, MVT::i32));
2273 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
2276 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
2277 // 64 bits. Extend is the extension type to use. Store the high part
2278 // in Hi and the low part in Lo.
2279 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2280 SDValue Op0, SDValue Op1, SDValue &Hi,
2282 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2283 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2284 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2285 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2286 DAG.getConstant(32, DL, MVT::i64));
2287 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2288 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2291 // Lower a binary operation that produces two VT results, one in each
2292 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2293 // and Opcode performs the GR128 operation. Store the even register result
2294 // in Even and the odd register result in Odd.
2295 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2296 unsigned Opcode, SDValue Op0, SDValue Op1,
2297 SDValue &Even, SDValue &Odd) {
2298 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
2299 bool Is32Bit = is32Bit(VT);
2300 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2301 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2304 // Return an i32 value that is 1 if the CC value produced by Glue is
2305 // in the mask CCMask and 0 otherwise. CC is known to have a value
2306 // in CCValid, so other values can be ignored.
2307 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue,
2308 unsigned CCValid, unsigned CCMask) {
2309 IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
2310 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
2312 if (Conversion.XORValue)
2313 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
2314 DAG.getConstant(Conversion.XORValue, DL, MVT::i32));
2316 if (Conversion.AddValue)
2317 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
2318 DAG.getConstant(Conversion.AddValue, DL, MVT::i32));
2320 // The SHR/AND sequence should get optimized to an RISBG.
2321 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
2322 DAG.getConstant(Conversion.Bit, DL, MVT::i32));
2323 if (Conversion.Bit != 31)
2324 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
2325 DAG.getConstant(1, DL, MVT::i32));
2329 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2330 // be done directly. IsFP is true if CC is for a floating-point rather than
2331 // integer comparison.
2332 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
2336 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;
2340 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);
2344 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;
2347 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;
2354 // Return the SystemZISD vector comparison operation for CC or its inverse,
2355 // or 0 if neither can be done directly. Indicate in Invert whether the
2356 // result is for the inverse of CC. IsFP is true if CC is for a
2357 // floating-point rather than integer comparison.
2358 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
2360 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2365 CC = ISD::getSetCCInverse(CC, !IsFP);
2366 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2374 // Return a v2f64 that contains the extended form of elements Start and Start+1
2375 // of v4f32 value Op.
2376 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2378 int Mask[] = { Start, -1, Start + 1, -1 };
2379 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2380 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2383 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2384 // producing a result of type VT.
2385 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
2386 const SDLoc &DL, EVT VT,
2388 SDValue CmpOp1) const {
2389 // There is no hardware support for v4f32 (unless we have the vector
2390 // enhancements facility 1), so extend the vector into two v2f64s
2391 // and compare those.
2392 if (CmpOp0.getValueType() == MVT::v4f32 &&
2393 !Subtarget.hasVectorEnhancements1()) {
2394 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
2395 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
2396 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
2397 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
2398 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2399 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2400 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2402 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2405 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2406 // an integer mask of type VT.
2407 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
2408 const SDLoc &DL, EVT VT,
2411 SDValue CmpOp1) const {
2412 bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2413 bool Invert = false;
2416 // Handle tests for order using (or (ogt y x) (oge x y)).
2421 assert(IsFP && "Unexpected integer comparison");
2422 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2423 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
2424 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2428 // Handle <> tests using (or (ogt y x) (ogt x y)).
2433 assert(IsFP && "Unexpected integer comparison");
2434 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2435 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
2436 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2440 // Otherwise a single comparison is enough. It doesn't really
2441 // matter whether we try the inversion or the swap first, since
2442 // there are no cases where both work.
2444 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2445 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2447 CC = ISD::getSetCCSwappedOperands(CC);
2448 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2449 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2451 llvm_unreachable("Unhandled comparison");
2456 SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
2457 DAG.getConstant(65535, DL, MVT::i32));
2458 Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask);
2459 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2464 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2465 SelectionDAG &DAG) const {
2466 SDValue CmpOp0 = Op.getOperand(0);
2467 SDValue CmpOp1 = Op.getOperand(1);
2468 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2470 EVT VT = Op.getValueType();
2472 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2474 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2475 SDValue Glue = emitCmp(DAG, DL, C);
2476 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
2479 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2480 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2481 SDValue CmpOp0 = Op.getOperand(2);
2482 SDValue CmpOp1 = Op.getOperand(3);
2483 SDValue Dest = Op.getOperand(4);
2486 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2487 SDValue Glue = emitCmp(DAG, DL, C);
2488 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
2489 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32),
2490 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue);
2493 // Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2494 // allowing Pos and Neg to be wider than CmpOp.
2495 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2496 return (Neg.getOpcode() == ISD::SUB &&
2497 Neg.getOperand(0).getOpcode() == ISD::Constant &&
2498 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2499 Neg.getOperand(1) == Pos &&
2501 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2502 Pos.getOperand(0) == CmpOp)));
2505 // Return the absolute or negative absolute of Op; IsNegative decides which.
2506 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2508 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2510 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2511 DAG.getConstant(0, DL, Op.getValueType()), Op);
2515 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2516 SelectionDAG &DAG) const {
2517 SDValue CmpOp0 = Op.getOperand(0);
2518 SDValue CmpOp1 = Op.getOperand(1);
2519 SDValue TrueOp = Op.getOperand(2);
2520 SDValue FalseOp = Op.getOperand(3);
2521 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2524 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2526 // Check for absolute and negative-absolute selections, including those
2527 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2528 // This check supplements the one in DAGCombiner.
2529 if (C.Opcode == SystemZISD::ICMP &&
2530 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2531 C.CCMask != SystemZ::CCMASK_CMP_NE &&
2532 C.Op1.getOpcode() == ISD::Constant &&
2533 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2534 if (isAbsolute(C.Op0, TrueOp, FalseOp))
2535 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2536 if (isAbsolute(C.Op0, FalseOp, TrueOp))
2537 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2540 SDValue Glue = emitCmp(DAG, DL, C);
2542 // Special case for handling -1/0 results. The shifts we use here
2543 // should get optimized with the IPM conversion sequence.
2544 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp);
2545 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp);
2546 if (TrueC && FalseC) {
2547 int64_t TrueVal = TrueC->getSExtValue();
2548 int64_t FalseVal = FalseC->getSExtValue();
2549 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
2550 // Invert the condition if we want -1 on false.
2552 C.CCMask ^= C.CCValid;
2553 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
2554 EVT VT = Op.getValueType();
2555 // Extend the result to VT. Upper bits are ignored.
2557 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
2558 // Sign-extend from the low bit.
2559 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32);
2560 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
2561 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
2565 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32),
2566 DAG.getConstant(C.CCMask, DL, MVT::i32), Glue};
2568 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
2569 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
2572 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
2573 SelectionDAG &DAG) const {
2575 const GlobalValue *GV = Node->getGlobal();
2576 int64_t Offset = Node->getOffset();
2577 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2578 CodeModel::Model CM = DAG.getTarget().getCodeModel();
2581 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2582 // Assign anchors at 1<<12 byte boundaries.
2583 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2584 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
2585 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2587 // The offset can be folded into the address if it is aligned to a halfword.
2589 if (Offset != 0 && (Offset & 1) == 0) {
2590 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
2591 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
2595 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
2596 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2597 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2598 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2601 // If there was a non-zero offset that we didn't fold, create an explicit
2604 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2605 DAG.getConstant(Offset, DL, PtrVT));
2610 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
2613 SDValue GOTOffset) const {
2615 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2616 SDValue Chain = DAG.getEntryNode();
2619 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2620 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2621 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2622 Glue = Chain.getValue(1);
2623 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2624 Glue = Chain.getValue(1);
2626 // The first call operand is the chain and the second is the TLS symbol.
2627 SmallVector<SDValue, 8> Ops;
2628 Ops.push_back(Chain);
2629 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
2630 Node->getValueType(0),
2633 // Add argument registers to the end of the list so that they are
2634 // known live into the call.
2635 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
2636 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
2638 // Add a register mask operand representing the call-preserved registers.
2639 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2640 const uint32_t *Mask =
2641 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
2642 assert(Mask && "Missing call preserved mask for calling convention");
2643 Ops.push_back(DAG.getRegisterMask(Mask));
2645 // Glue the call to the argument copies.
2646 Ops.push_back(Glue);
2649 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2650 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
2651 Glue = Chain.getValue(1);
2653 // Copy the return value from %r2.
2654 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
2657 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
2658 SelectionDAG &DAG) const {
2659 SDValue Chain = DAG.getEntryNode();
2660 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2662 // The high part of the thread pointer is in access register 0.
2663 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
2664 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
2666 // The low part of the thread pointer is in access register 1.
2667 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
2668 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
2670 // Merge them into a single 64-bit address.
2671 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
2672 DAG.getConstant(32, DL, PtrVT));
2673 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
2676 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
2677 SelectionDAG &DAG) const {
2678 if (DAG.getTarget().Options.EmulatedTLS)
2679 return LowerToTLSEmulatedModel(Node, DAG);
2681 const GlobalValue *GV = Node->getGlobal();
2682 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2683 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
2685 SDValue TP = lowerThreadPointer(DL, DAG);
2687 // Get the offset of GA from the thread pointer, based on the TLS model.
2690 case TLSModel::GeneralDynamic: {
2691 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2692 SystemZConstantPoolValue *CPV =
2693 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
2695 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2696 Offset = DAG.getLoad(
2697 PtrVT, DL, DAG.getEntryNode(), Offset,
2698 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2700 // Call __tls_get_offset to retrieve the offset.
2701 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
2705 case TLSModel::LocalDynamic: {
2706 // Load the GOT offset of the module ID.
2707 SystemZConstantPoolValue *CPV =
2708 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
2710 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2711 Offset = DAG.getLoad(
2712 PtrVT, DL, DAG.getEntryNode(), Offset,
2713 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2715 // Call __tls_get_offset to retrieve the module base offset.
2716 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
2718 // Note: The SystemZLDCleanupPass will remove redundant computations
2719 // of the module base offset. Count total number of local-dynamic
2720 // accesses to trigger execution of that pass.
2721 SystemZMachineFunctionInfo* MFI =
2722 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
2723 MFI->incNumLocalDynamicTLSAccesses();
2725 // Add the per-symbol offset.
2726 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
2728 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
2729 DTPOffset = DAG.getLoad(
2730 PtrVT, DL, DAG.getEntryNode(), DTPOffset,
2731 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2733 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
2737 case TLSModel::InitialExec: {
2738 // Load the offset from the GOT.
2739 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2740 SystemZII::MO_INDNTPOFF);
2741 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
2743 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
2744 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2748 case TLSModel::LocalExec: {
2749 // Force the offset into the constant pool and load it from there.
2750 SystemZConstantPoolValue *CPV =
2751 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
2753 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2754 Offset = DAG.getLoad(
2755 PtrVT, DL, DAG.getEntryNode(), Offset,
2756 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2761 // Add the base and offset together.
2762 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
2765 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
2766 SelectionDAG &DAG) const {
2768 const BlockAddress *BA = Node->getBlockAddress();
2769 int64_t Offset = Node->getOffset();
2770 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2772 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
2773 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2777 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
2778 SelectionDAG &DAG) const {
2780 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2781 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2783 // Use LARL to load the address of the table.
2784 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2787 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
2788 SelectionDAG &DAG) const {
2790 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2793 if (CP->isMachineConstantPoolEntry())
2794 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2795 CP->getAlignment());
2797 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2798 CP->getAlignment(), CP->getOffset());
2800 // Use LARL to load the address of the constant pool entry.
2801 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2804 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
2805 SelectionDAG &DAG) const {
2806 MachineFunction &MF = DAG.getMachineFunction();
2807 MachineFrameInfo &MFI = MF.getFrameInfo();
2808 MFI.setFrameAddressIsTaken(true);
2811 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2812 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2814 // If the back chain frame index has not been allocated yet, do so.
2815 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
2816 int BackChainIdx = FI->getFramePointerSaveIndex();
2817 if (!BackChainIdx) {
2818 // By definition, the frame address is the address of the back chain.
2819 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
2820 FI->setFramePointerSaveIndex(BackChainIdx);
2822 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
2824 // FIXME The frontend should detect this case.
2826 report_fatal_error("Unsupported stack frame traversal count");
2832 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
2833 SelectionDAG &DAG) const {
2834 MachineFunction &MF = DAG.getMachineFunction();
2835 MachineFrameInfo &MFI = MF.getFrameInfo();
2836 MFI.setReturnAddressIsTaken(true);
2838 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2842 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2843 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2845 // FIXME The frontend should detect this case.
2847 report_fatal_error("Unsupported stack frame traversal count");
2850 // Return R14D, which has the return address. Mark it an implicit live-in.
2851 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
2852 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
2855 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
2856 SelectionDAG &DAG) const {
2858 SDValue In = Op.getOperand(0);
2859 EVT InVT = In.getValueType();
2860 EVT ResVT = Op.getValueType();
2862 // Convert loads directly. This is normally done by DAGCombiner,
2863 // but we need this case for bitcasts that are created during lowering
2864 // and which are then lowered themselves.
2865 if (auto *LoadN = dyn_cast<LoadSDNode>(In))
2866 if (ISD::isNormalLoad(LoadN))
2867 return DAG.getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(),
2868 LoadN->getMemOperand());
2870 if (InVT == MVT::i32 && ResVT == MVT::f32) {
2872 if (Subtarget.hasHighWord()) {
2873 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
2875 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
2876 MVT::i64, SDValue(U64, 0), In);
2878 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
2879 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
2880 DAG.getConstant(32, DL, MVT::i64));
2882 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
2883 return DAG.getTargetExtractSubreg(SystemZ::subreg_r32,
2884 DL, MVT::f32, Out64);
2886 if (InVT == MVT::f32 && ResVT == MVT::i32) {
2887 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
2888 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_r32, DL,
2889 MVT::f64, SDValue(U64, 0), In);
2890 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
2891 if (Subtarget.hasHighWord())
2892 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
2894 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
2895 DAG.getConstant(32, DL, MVT::i64));
2896 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
2898 llvm_unreachable("Unexpected bitcast combination");
2901 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
2902 SelectionDAG &DAG) const {
2903 MachineFunction &MF = DAG.getMachineFunction();
2904 SystemZMachineFunctionInfo *FuncInfo =
2905 MF.getInfo<SystemZMachineFunctionInfo>();
2906 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2908 SDValue Chain = Op.getOperand(0);
2909 SDValue Addr = Op.getOperand(1);
2910 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2913 // The initial values of each field.
2914 const unsigned NumFields = 4;
2915 SDValue Fields[NumFields] = {
2916 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
2917 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
2918 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
2919 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
2922 // Store each field into its respective slot.
2923 SDValue MemOps[NumFields];
2924 unsigned Offset = 0;
2925 for (unsigned I = 0; I < NumFields; ++I) {
2926 SDValue FieldAddr = Addr;
2928 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
2929 DAG.getIntPtrConstant(Offset, DL));
2930 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
2931 MachinePointerInfo(SV, Offset));
2934 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
2937 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
2938 SelectionDAG &DAG) const {
2939 SDValue Chain = Op.getOperand(0);
2940 SDValue DstPtr = Op.getOperand(1);
2941 SDValue SrcPtr = Op.getOperand(2);
2942 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2943 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
2946 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
2947 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
2948 /*isTailCall*/false,
2949 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
2952 SDValue SystemZTargetLowering::
2953 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
2954 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
2955 MachineFunction &MF = DAG.getMachineFunction();
2956 bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack");
2957 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
2959 SDValue Chain = Op.getOperand(0);
2960 SDValue Size = Op.getOperand(1);
2961 SDValue Align = Op.getOperand(2);
2964 // If user has set the no alignment function attribute, ignore
2965 // alloca alignments.
2966 uint64_t AlignVal = (RealignOpt ?
2967 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
2969 uint64_t StackAlign = TFI->getStackAlignment();
2970 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
2971 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
2973 unsigned SPReg = getStackPointerRegisterToSaveRestore();
2974 SDValue NeededSpace = Size;
2976 // Get a reference to the stack pointer.
2977 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
2979 // If we need a backchain, save it now.
2982 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
2984 // Add extra space for alignment if needed.
2985 if (ExtraAlignSpace)
2986 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
2987 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
2989 // Get the new stack pointer value.
2990 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
2992 // Copy the new stack pointer back.
2993 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
2995 // The allocated data lives above the 160 bytes allocated for the standard
2996 // frame, plus any outgoing stack arguments. We don't know how much that
2997 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
2998 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
2999 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
3001 // Dynamically realign if needed.
3002 if (RequiredAlign > StackAlign) {
3004 DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
3005 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3007 DAG.getNode(ISD::AND, DL, MVT::i64, Result,
3008 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
3012 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3014 SDValue Ops[2] = { Result, Chain };
3015 return DAG.getMergeValues(Ops, DL);
3018 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3019 SDValue Op, SelectionDAG &DAG) const {
3022 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3025 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
3026 SelectionDAG &DAG) const {
3027 EVT VT = Op.getValueType();
3031 // Just do a normal 64-bit multiplication and extract the results.
3032 // We define this so that it can be used for constant division.
3033 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
3034 Op.getOperand(1), Ops[1], Ops[0]);
3035 else if (Subtarget.hasMiscellaneousExtensions2())
3036 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3037 // the high result in the even register. ISD::SMUL_LOHI is defined to
3038 // return the low half first, so the results are in reverse order.
3039 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI,
3040 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3042 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3044 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3046 // but using the fact that the upper halves are either all zeros
3049 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3051 // and grouping the right terms together since they are quicker than the
3054 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3055 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
3056 SDValue LL = Op.getOperand(0);
3057 SDValue RL = Op.getOperand(1);
3058 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
3059 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
3060 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3061 // the high result in the even register. ISD::SMUL_LOHI is defined to
3062 // return the low half first, so the results are in reverse order.
3063 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3064 LL, RL, Ops[1], Ops[0]);
3065 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
3066 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
3067 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
3068 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
3070 return DAG.getMergeValues(Ops, DL);
3073 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
3074 SelectionDAG &DAG) const {
3075 EVT VT = Op.getValueType();
3079 // Just do a normal 64-bit multiplication and extract the results.
3080 // We define this so that it can be used for constant division.
3081 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
3082 Op.getOperand(1), Ops[1], Ops[0]);
3084 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3085 // the high result in the even register. ISD::UMUL_LOHI is defined to
3086 // return the low half first, so the results are in reverse order.
3087 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3088 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3089 return DAG.getMergeValues(Ops, DL);
3092 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3093 SelectionDAG &DAG) const {
3094 SDValue Op0 = Op.getOperand(0);
3095 SDValue Op1 = Op.getOperand(1);
3096 EVT VT = Op.getValueType();
3099 // We use DSGF for 32-bit division. This means the first operand must
3100 // always be 64-bit, and the second operand should be 32-bit whenever
3101 // that is possible, to improve performance.
3103 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3104 else if (DAG.ComputeNumSignBits(Op1) > 32)
3105 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3107 // DSG(F) returns the remainder in the even register and the
3108 // quotient in the odd register.
3110 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
3111 return DAG.getMergeValues(Ops, DL);
3114 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3115 SelectionDAG &DAG) const {
3116 EVT VT = Op.getValueType();
3119 // DL(G) returns the remainder in the even register and the
3120 // quotient in the odd register.
3122 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
3123 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3124 return DAG.getMergeValues(Ops, DL);
3127 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3128 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
3130 // Get the known-zero masks for each operand.
3131 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
3133 DAG.computeKnownBits(Ops[0], Known[0]);
3134 DAG.computeKnownBits(Ops[1], Known[1]);
3136 // See if the upper 32 bits of one operand and the lower 32 bits of the
3137 // other are known zero. They are the low and high operands respectively.
3138 uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
3139 Known[1].Zero.getZExtValue() };
3141 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3143 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3148 SDValue LowOp = Ops[Low];
3149 SDValue HighOp = Ops[High];
3151 // If the high part is a constant, we're better off using IILH.
3152 if (HighOp.getOpcode() == ISD::Constant)
3155 // If the low part is a constant that is outside the range of LHI,
3156 // then we're better off using IILF.
3157 if (LowOp.getOpcode() == ISD::Constant) {
3158 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3159 if (!isInt<16>(Value))
3163 // Check whether the high part is an AND that doesn't change the
3164 // high 32 bits and just masks out low bits. We can skip it if so.
3165 if (HighOp.getOpcode() == ISD::AND &&
3166 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3167 SDValue HighOp0 = HighOp.getOperand(0);
3168 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3169 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3173 // Take advantage of the fact that all GR32 operations only change the
3174 // low 32 bits by truncating Low to an i32 and inserting it directly
3175 // using a subreg. The interesting cases are those where the truncation
3178 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3179 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3180 MVT::i64, HighOp, Low32);
3183 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3184 SelectionDAG &DAG) const {
3185 EVT VT = Op.getValueType();
3187 Op = Op.getOperand(0);
3189 // Handle vector types via VPOPCT.
3190 if (VT.isVector()) {
3191 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3192 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3193 switch (VT.getScalarSizeInBits()) {
3197 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3198 SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3199 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3200 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3201 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3205 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
3206 DAG.getConstant(0, DL, MVT::i32));
3207 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3211 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
3212 DAG.getConstant(0, DL, MVT::i32));
3213 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3214 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3218 llvm_unreachable("Unexpected type");
3223 // Get the known-zero mask for the operand.
3225 DAG.computeKnownBits(Op, Known);
3226 unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
3227 if (NumSignificantBits == 0)
3228 return DAG.getConstant(0, DL, VT);
3230 // Skip known-zero high parts of the operand.
3231 int64_t OrigBitSize = VT.getSizeInBits();
3232 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3233 BitSize = std::min(BitSize, OrigBitSize);
3235 // The POPCNT instruction counts the number of bits in each byte.
3236 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3237 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3238 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3240 // Add up per-byte counts in a binary tree. All bits of Op at
3241 // position larger than BitSize remain zero throughout.
3242 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3243 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3244 if (BitSize != OrigBitSize)
3245 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3246 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3247 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3250 // Extract overall result from high byte.
3252 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3253 DAG.getConstant(BitSize - 8, DL, VT));
3258 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3259 SelectionDAG &DAG) const {
3261 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3262 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3263 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
3264 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3266 // The only fence that needs an instruction is a sequentially-consistent
3267 // cross-thread fence.
3268 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3269 FenceSSID == SyncScope::System) {
3270 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3275 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3276 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3279 // Op is an atomic load. Lower it into a serialization followed
3280 // by a normal volatile load.
3281 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3282 SelectionDAG &DAG) const {
3283 auto *Node = cast<AtomicSDNode>(Op.getNode());
3284 SDValue Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
3285 MVT::Other, Node->getChain()), 0);
3286 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3287 Chain, Node->getBasePtr(),
3288 Node->getMemoryVT(), Node->getMemOperand());
3291 // Op is an atomic store. Lower it into a normal volatile store followed
3292 // by a serialization.
3293 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3294 SelectionDAG &DAG) const {
3295 auto *Node = cast<AtomicSDNode>(Op.getNode());
3296 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3297 Node->getBasePtr(), Node->getMemoryVT(),
3298 Node->getMemOperand());
3299 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other,
3303 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3304 // two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3305 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3307 unsigned Opcode) const {
3308 auto *Node = cast<AtomicSDNode>(Op.getNode());
3310 // 32-bit operations need no code outside the main loop.
3311 EVT NarrowVT = Node->getMemoryVT();
3312 EVT WideVT = MVT::i32;
3313 if (NarrowVT == WideVT)
3316 int64_t BitSize = NarrowVT.getSizeInBits();
3317 SDValue ChainIn = Node->getChain();
3318 SDValue Addr = Node->getBasePtr();
3319 SDValue Src2 = Node->getVal();
3320 MachineMemOperand *MMO = Node->getMemOperand();
3322 EVT PtrVT = Addr.getValueType();
3324 // Convert atomic subtracts of constants into additions.
3325 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3326 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3327 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
3328 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3331 // Get the address of the containing word.
3332 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3333 DAG.getConstant(-4, DL, PtrVT));
3335 // Get the number of bits that the word must be rotated left in order
3336 // to bring the field to the top bits of a GR32.
3337 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3338 DAG.getConstant(3, DL, PtrVT));
3339 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3341 // Get the complementing shift amount, for rotating a field in the top
3342 // bits back to its proper position.
3343 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3344 DAG.getConstant(0, DL, WideVT), BitShift);
3346 // Extend the source operand to 32 bits and prepare it for the inner loop.
3347 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3348 // operations require the source to be shifted in advance. (This shift
3349 // can be folded if the source is constant.) For AND and NAND, the lower
3350 // bits must be set, while for other opcodes they should be left clear.
3351 if (Opcode != SystemZISD::ATOMIC_SWAPW)
3352 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3353 DAG.getConstant(32 - BitSize, DL, WideVT));
3354 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3355 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
3356 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3357 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3359 // Construct the ATOMIC_LOADW_* node.
3360 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3361 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3362 DAG.getConstant(BitSize, DL, WideVT) };
3363 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3366 // Rotate the result of the final CS so that the field is in the lower
3367 // bits of a GR32, then truncate it.
3368 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3369 DAG.getConstant(BitSize, DL, WideVT));
3370 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3372 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3373 return DAG.getMergeValues(RetOps, DL);
3376 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3377 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3378 // operations into additions.
3379 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3380 SelectionDAG &DAG) const {
3381 auto *Node = cast<AtomicSDNode>(Op.getNode());
3382 EVT MemVT = Node->getMemoryVT();
3383 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3384 // A full-width operation.
3385 assert(Op.getValueType() == MemVT && "Mismatched VTs");
3386 SDValue Src2 = Node->getVal();
3390 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3391 // Use an addition if the operand is constant and either LAA(G) is
3392 // available or the negative value is in the range of A(G)FHI.
3393 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3394 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3395 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3396 } else if (Subtarget.hasInterlockedAccess1())
3397 // Use LAA(G) if available.
3398 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3401 if (NegSrc2.getNode())
3402 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3403 Node->getChain(), Node->getBasePtr(), NegSrc2,
3404 Node->getMemOperand());
3406 // Use the node as-is.
3410 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3413 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
3414 // into a fullword ATOMIC_CMP_SWAPW operation.
3415 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3416 SelectionDAG &DAG) const {
3417 auto *Node = cast<AtomicSDNode>(Op.getNode());
3419 // We have native support for 32-bit compare and swap.
3420 EVT NarrowVT = Node->getMemoryVT();
3421 EVT WideVT = MVT::i32;
3422 if (NarrowVT == WideVT)
3425 int64_t BitSize = NarrowVT.getSizeInBits();
3426 SDValue ChainIn = Node->getOperand(0);
3427 SDValue Addr = Node->getOperand(1);
3428 SDValue CmpVal = Node->getOperand(2);
3429 SDValue SwapVal = Node->getOperand(3);
3430 MachineMemOperand *MMO = Node->getMemOperand();
3432 EVT PtrVT = Addr.getValueType();
3434 // Get the address of the containing word.
3435 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3436 DAG.getConstant(-4, DL, PtrVT));
3438 // Get the number of bits that the word must be rotated left in order
3439 // to bring the field to the top bits of a GR32.
3440 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3441 DAG.getConstant(3, DL, PtrVT));
3442 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3444 // Get the complementing shift amount, for rotating a field in the top
3445 // bits back to its proper position.
3446 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3447 DAG.getConstant(0, DL, WideVT), BitShift);
3449 // Construct the ATOMIC_CMP_SWAPW node.
3450 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3451 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3452 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
3453 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
3454 VTList, Ops, NarrowVT, MMO);
3458 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
3459 SelectionDAG &DAG) const {
3460 MachineFunction &MF = DAG.getMachineFunction();
3461 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3462 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
3463 SystemZ::R15D, Op.getValueType());
3466 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
3467 SelectionDAG &DAG) const {
3468 MachineFunction &MF = DAG.getMachineFunction();
3469 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3470 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
3472 SDValue Chain = Op.getOperand(0);
3473 SDValue NewSP = Op.getOperand(1);
3477 if (StoreBackchain) {
3478 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
3479 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3482 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3485 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3490 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
3491 SelectionDAG &DAG) const {
3492 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3494 // Just preserve the chain.
3495 return Op.getOperand(0);
3498 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3499 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
3500 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
3503 DAG.getConstant(Code, DL, MVT::i32),
3506 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
3507 Node->getVTList(), Ops,
3508 Node->getMemoryVT(), Node->getMemOperand());
3511 // Return an i32 that contains the value of CC immediately after After,
3512 // whose final operand must be MVT::Glue.
3513 static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) {
3515 SDValue Glue = SDValue(After, After->getNumValues() - 1);
3516 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
3517 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
3518 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
3522 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
3523 SelectionDAG &DAG) const {
3524 unsigned Opcode, CCValid;
3525 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
3526 assert(Op->getNumValues() == 2 && "Expected only CC result and chain");
3527 SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode);
3528 SDValue CC = getCCResult(DAG, Glued.getNode());
3529 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
3537 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
3538 SelectionDAG &DAG) const {
3539 unsigned Opcode, CCValid;
3540 if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
3541 SDValue Glued = emitIntrinsicWithGlue(DAG, Op, Opcode);
3542 SDValue CC = getCCResult(DAG, Glued.getNode());
3543 if (Op->getNumValues() == 1)
3545 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result");
3546 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), Glued,
3550 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3552 case Intrinsic::thread_pointer:
3553 return lowerThreadPointer(SDLoc(Op), DAG);
3555 case Intrinsic::s390_vpdi:
3556 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
3557 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3559 case Intrinsic::s390_vperm:
3560 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
3561 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3563 case Intrinsic::s390_vuphb:
3564 case Intrinsic::s390_vuphh:
3565 case Intrinsic::s390_vuphf:
3566 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
3569 case Intrinsic::s390_vuplhb:
3570 case Intrinsic::s390_vuplhh:
3571 case Intrinsic::s390_vuplhf:
3572 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
3575 case Intrinsic::s390_vuplb:
3576 case Intrinsic::s390_vuplhw:
3577 case Intrinsic::s390_vuplf:
3578 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
3581 case Intrinsic::s390_vupllb:
3582 case Intrinsic::s390_vupllh:
3583 case Intrinsic::s390_vupllf:
3584 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
3587 case Intrinsic::s390_vsumb:
3588 case Intrinsic::s390_vsumh:
3589 case Intrinsic::s390_vsumgh:
3590 case Intrinsic::s390_vsumgf:
3591 case Intrinsic::s390_vsumqf:
3592 case Intrinsic::s390_vsumqg:
3593 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
3594 Op.getOperand(1), Op.getOperand(2));
3601 // Says that SystemZISD operation Opcode can be used to perform the equivalent
3602 // of a VPERM with permute vector Bytes. If Opcode takes three operands,
3603 // Operand is the constant third operand, otherwise it is the number of
3604 // bytes in each element of the result.
3608 unsigned char Bytes[SystemZ::VectorBytes];
3612 static const Permute PermuteForms[] = {
3614 { SystemZISD::MERGE_HIGH, 8,
3615 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3617 { SystemZISD::MERGE_HIGH, 4,
3618 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
3620 { SystemZISD::MERGE_HIGH, 2,
3621 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
3623 { SystemZISD::MERGE_HIGH, 1,
3624 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
3626 { SystemZISD::MERGE_LOW, 8,
3627 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
3629 { SystemZISD::MERGE_LOW, 4,
3630 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
3632 { SystemZISD::MERGE_LOW, 2,
3633 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
3635 { SystemZISD::MERGE_LOW, 1,
3636 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
3638 { SystemZISD::PACK, 4,
3639 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
3641 { SystemZISD::PACK, 2,
3642 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
3644 { SystemZISD::PACK, 1,
3645 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
3646 // VPDI V1, V2, 4 (low half of V1, high half of V2)
3647 { SystemZISD::PERMUTE_DWORDS, 4,
3648 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
3649 // VPDI V1, V2, 1 (high half of V1, low half of V2)
3650 { SystemZISD::PERMUTE_DWORDS, 1,
3651 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
3654 // Called after matching a vector shuffle against a particular pattern.
3655 // Both the original shuffle and the pattern have two vector operands.
3656 // OpNos[0] is the operand of the original shuffle that should be used for
3657 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
3658 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
3659 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used
3660 // for operands 0 and 1 of the pattern.
3661 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
3665 OpNo0 = OpNo1 = OpNos[1];
3666 } else if (OpNos[1] < 0) {
3667 OpNo0 = OpNo1 = OpNos[0];
3675 // Bytes is a VPERM-like permute vector, except that -1 is used for
3676 // undefined bytes. Return true if the VPERM can be implemented using P.
3677 // When returning true set OpNo0 to the VPERM operand that should be
3678 // used for operand 0 of P and likewise OpNo1 for operand 1 of P.
3680 // For example, if swapping the VPERM operands allows P to match, OpNo0
3681 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
3682 // operand, but rewriting it to use two duplicated operands allows it to
3683 // match P, then OpNo0 and OpNo1 will be the same.
3684 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
3685 unsigned &OpNo0, unsigned &OpNo1) {
3686 int OpNos[] = { -1, -1 };
3687 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
3690 // Make sure that the two permute vectors use the same suboperand
3691 // byte number. Only the operand numbers (the high bits) are
3692 // allowed to differ.
3693 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
3695 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
3696 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
3697 // Make sure that the operand mappings are consistent with previous
3699 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3701 OpNos[ModelOpNo] = RealOpNo;
3704 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
3707 // As above, but search for a matching permute.
3708 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
3709 unsigned &OpNo0, unsigned &OpNo1) {
3710 for (auto &P : PermuteForms)
3711 if (matchPermute(Bytes, P, OpNo0, OpNo1))
3716 // Bytes is a VPERM-like permute vector, except that -1 is used for
3717 // undefined bytes. This permute is an operand of an outer permute.
3718 // See whether redistributing the -1 bytes gives a shuffle that can be
3719 // implemented using P. If so, set Transform to a VPERM-like permute vector
3720 // that, when applied to the result of P, gives the original permute in Bytes.
3721 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
3723 SmallVectorImpl<int> &Transform) {
3725 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
3726 int Elt = Bytes[From];
3728 // Byte number From of the result is undefined.
3729 Transform[From] = -1;
3731 while (P.Bytes[To] != Elt) {
3733 if (To == SystemZ::VectorBytes)
3736 Transform[From] = To;
3742 // As above, but search for a matching permute.
3743 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
3744 SmallVectorImpl<int> &Transform) {
3745 for (auto &P : PermuteForms)
3746 if (matchDoublePermute(Bytes, P, Transform))
3751 // Convert the mask of the given VECTOR_SHUFFLE into a byte-level mask,
3752 // as if it had type vNi8.
3753 static void getVPermMask(ShuffleVectorSDNode *VSN,
3754 SmallVectorImpl<int> &Bytes) {
3755 EVT VT = VSN->getValueType(0);
3756 unsigned NumElements = VT.getVectorNumElements();
3757 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3758 Bytes.resize(NumElements * BytesPerElement, -1);
3759 for (unsigned I = 0; I < NumElements; ++I) {
3760 int Index = VSN->getMaskElt(I);
3762 for (unsigned J = 0; J < BytesPerElement; ++J)
3763 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
3767 // Bytes is a VPERM-like permute vector, except that -1 is used for
3768 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
3769 // the result come from a contiguous sequence of bytes from one input.
3770 // Set Base to the selector for the first byte if so.
3771 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
3772 unsigned BytesPerElement, int &Base) {
3774 for (unsigned I = 0; I < BytesPerElement; ++I) {
3775 if (Bytes[Start + I] >= 0) {
3776 unsigned Elem = Bytes[Start + I];
3779 // Make sure the bytes would come from one input operand.
3780 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
3782 } else if (unsigned(Base) != Elem - I)
3789 // Bytes is a VPERM-like permute vector, except that -1 is used for
3790 // undefined bytes. Return true if it can be performed using VSLDI.
3791 // When returning true, set StartIndex to the shift amount and OpNo0
3792 // and OpNo1 to the VPERM operands that should be used as the first
3793 // and second shift operand respectively.
3794 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
3795 unsigned &StartIndex, unsigned &OpNo0,
3797 int OpNos[] = { -1, -1 };
3799 for (unsigned I = 0; I < 16; ++I) {
3800 int Index = Bytes[I];
3802 int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
3803 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
3804 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
3806 Shift = ExpectedShift;
3807 else if (Shift != ExpectedShift)
3809 // Make sure that the operand mappings are consistent with previous
3811 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3813 OpNos[ModelOpNo] = RealOpNo;
3817 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
3820 // Create a node that performs P on operands Op0 and Op1, casting the
3821 // operands to the appropriate type. The type of the result is determined by P.
3822 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
3823 const Permute &P, SDValue Op0, SDValue Op1) {
3824 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
3825 // elements of a PACK are twice as wide as the outputs.
3826 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
3827 P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
3829 // Cast both operands to the appropriate type.
3830 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
3831 SystemZ::VectorBytes / InBytes);
3832 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
3833 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
3835 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
3836 SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32);
3837 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
3838 } else if (P.Opcode == SystemZISD::PACK) {
3839 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
3840 SystemZ::VectorBytes / P.Operand);
3841 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
3843 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
3848 // Bytes is a VPERM-like permute vector, except that -1 is used for
3849 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using
3851 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
3853 const SmallVectorImpl<int> &Bytes) {
3854 for (unsigned I = 0; I < 2; ++I)
3855 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
3857 // First see whether VSLDI can be used.
3858 unsigned StartIndex, OpNo0, OpNo1;
3859 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
3860 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
3861 Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32));
3863 // Fall back on VPERM. Construct an SDNode for the permute vector.
3864 SDValue IndexNodes[SystemZ::VectorBytes];
3865 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
3867 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
3869 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
3870 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
3871 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
3875 // Describes a general N-operand vector shuffle.
3876 struct GeneralShuffle {
3877 GeneralShuffle(EVT vt) : VT(vt) {}
3879 bool add(SDValue, unsigned);
3880 SDValue getNode(SelectionDAG &, const SDLoc &);
3882 // The operands of the shuffle.
3883 SmallVector<SDValue, SystemZ::VectorBytes> Ops;
3885 // Index I is -1 if byte I of the result is undefined. Otherwise the
3886 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
3887 // Bytes[I] / SystemZ::VectorBytes.
3888 SmallVector<int, SystemZ::VectorBytes> Bytes;
3890 // The type of the shuffle result.
3895 // Add an extra undefined element to the shuffle.
3896 void GeneralShuffle::addUndef() {
3897 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3898 for (unsigned I = 0; I < BytesPerElement; ++I)
3899 Bytes.push_back(-1);
3902 // Add an extra element to the shuffle, taking it from element Elem of Op.
3903 // A null Op indicates a vector input whose value will be calculated later;
3904 // there is at most one such input per shuffle and it always has the same
3905 // type as the result. Aborts and returns false if the source vector elements
3906 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
3907 // LLVM they become implicitly extended, but this is rare and not optimized.
3908 bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
3909 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
3911 // The source vector can have wider elements than the result,
3912 // either through an explicit TRUNCATE or because of type legalization.
3913 // We want the least significant part.
3914 EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
3915 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
3917 // Return false if the source elements are smaller than their destination
3919 if (FromBytesPerElement < BytesPerElement)
3922 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
3923 (FromBytesPerElement - BytesPerElement));
3925 // Look through things like shuffles and bitcasts.
3926 while (Op.getNode()) {
3927 if (Op.getOpcode() == ISD::BITCAST)
3928 Op = Op.getOperand(0);
3929 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
3930 // See whether the bytes we need come from a contiguous part of one
3932 SmallVector<int, SystemZ::VectorBytes> OpBytes;
3933 getVPermMask(cast<ShuffleVectorSDNode>(Op), OpBytes);
3935 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
3941 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
3942 Byte = unsigned(NewByte) % SystemZ::VectorBytes;
3943 } else if (Op.isUndef()) {
3950 // Make sure that the source of the extraction is in Ops.
3952 for (; OpNo < Ops.size(); ++OpNo)
3953 if (Ops[OpNo] == Op)
3955 if (OpNo == Ops.size())
3958 // Add the element to Bytes.
3959 unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
3960 for (unsigned I = 0; I < BytesPerElement; ++I)
3961 Bytes.push_back(Base + I);
3966 // Return SDNodes for the completed shuffle.
3967 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
3968 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector");
3970 if (Ops.size() == 0)
3971 return DAG.getUNDEF(VT);
3973 // Make sure that there are at least two shuffle operands.
3974 if (Ops.size() == 1)
3975 Ops.push_back(DAG.getUNDEF(MVT::v16i8));
3977 // Create a tree of shuffles, deferring root node until after the loop.
3978 // Try to redistribute the undefined elements of non-root nodes so that
3979 // the non-root shuffles match something like a pack or merge, then adjust
3980 // the parent node's permute vector to compensate for the new order.
3981 // Among other things, this copes with vectors like <2 x i16> that were
3982 // padded with undefined elements during type legalization.
3984 // In the best case this redistribution will lead to the whole tree
3985 // using packs and merges. It should rarely be a loss in other cases.
3986 unsigned Stride = 1;
3987 for (; Stride * 2 < Ops.size(); Stride *= 2) {
3988 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
3989 SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
3991 // Create a mask for just these two operands.
3992 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
3993 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
3994 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
3995 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
3998 else if (OpNo == I + Stride)
3999 NewBytes[J] = SystemZ::VectorBytes + Byte;
4003 // See if it would be better to reorganize NewMask to avoid using VPERM.
4004 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
4005 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
4006 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
4007 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4008 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4009 if (NewBytes[J] >= 0) {
4010 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
4011 "Invalid double permute");
4012 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
4014 assert(NewBytesMap[J] < 0 && "Invalid double permute");
4017 // Just use NewBytes on the operands.
4018 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
4019 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
4020 if (NewBytes[J] >= 0)
4021 Bytes[J] = I * SystemZ::VectorBytes + J;
4026 // Now we just have 2 inputs. Put the second operand in Ops[1].
4028 Ops[1] = Ops[Stride];
4029 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4030 if (Bytes[I] >= int(SystemZ::VectorBytes))
4031 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
4034 // Look for an instruction that can do the permute without resorting
4036 unsigned OpNo0, OpNo1;
4038 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
4039 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
4041 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
4042 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4045 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4046 static bool isScalarToVector(SDValue Op) {
4047 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
4048 if (!Op.getOperand(I).isUndef())
4053 // Return a vector of type VT that contains Value in the first element.
4054 // The other elements don't matter.
4055 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4057 // If we have a constant, replicate it to all elements and let the
4058 // BUILD_VECTOR lowering take care of it.
4059 if (Value.getOpcode() == ISD::Constant ||
4060 Value.getOpcode() == ISD::ConstantFP) {
4061 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
4062 return DAG.getBuildVector(VT, DL, Ops);
4064 if (Value.isUndef())
4065 return DAG.getUNDEF(VT);
4066 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4069 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4070 // element 1. Used for cases in which replication is cheap.
4071 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4072 SDValue Op0, SDValue Op1) {
4073 if (Op0.isUndef()) {
4075 return DAG.getUNDEF(VT);
4076 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
4079 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
4080 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
4081 buildScalarToVector(DAG, DL, VT, Op0),
4082 buildScalarToVector(DAG, DL, VT, Op1));
4085 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4087 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4089 if (Op0.isUndef() && Op1.isUndef())
4090 return DAG.getUNDEF(MVT::v2i64);
4091 // If one of the two inputs is undefined then replicate the other one,
4092 // in order to avoid using another register unnecessarily.
4094 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4095 else if (Op1.isUndef())
4096 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4098 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4099 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4101 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4104 // Try to represent constant BUILD_VECTOR node BVN using a
4105 // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask
4107 static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) {
4108 EVT ElemVT = BVN->getValueType(0).getVectorElementType();
4109 unsigned BytesPerElement = ElemVT.getStoreSize();
4110 for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) {
4111 SDValue Op = BVN->getOperand(I);
4112 if (!Op.isUndef()) {
4114 if (Op.getOpcode() == ISD::Constant)
4115 Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue();
4116 else if (Op.getOpcode() == ISD::ConstantFP)
4117 Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
4121 for (unsigned J = 0; J < BytesPerElement; ++J) {
4122 uint64_t Byte = (Value >> (J * 8)) & 0xff;
4124 Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J);
4133 // Try to load a vector constant in which BitsPerElement-bit value Value
4134 // is replicated to fill the vector. VT is the type of the resulting
4135 // constant, which may have elements of a different size from BitsPerElement.
4136 // Return the SDValue of the constant on success, otherwise return
4138 static SDValue tryBuildVectorReplicate(SelectionDAG &DAG,
4139 const SystemZInstrInfo *TII,
4140 const SDLoc &DL, EVT VT, uint64_t Value,
4141 unsigned BitsPerElement) {
4142 // Signed 16-bit values can be replicated using VREPI.
4143 int64_t SignedValue = SignExtend64(Value, BitsPerElement);
4144 if (isInt<16>(SignedValue)) {
4145 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
4146 SystemZ::VectorBits / BitsPerElement);
4147 SDValue Op = DAG.getNode(SystemZISD::REPLICATE, DL, VecVT,
4148 DAG.getConstant(SignedValue, DL, MVT::i32));
4149 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4151 // See whether rotating the constant left some N places gives a value that
4152 // is one less than a power of 2 (i.e. all zeros followed by all ones).
4153 // If so we can use VGM.
4154 unsigned Start, End;
4155 if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) {
4156 // isRxSBGMask returns the bit numbers for a full 64-bit value,
4157 // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to
4158 // bit numbers for an BitsPerElement value, so that 0 denotes
4159 // 1 << (BitsPerElement-1).
4160 Start -= 64 - BitsPerElement;
4161 End -= 64 - BitsPerElement;
4162 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement),
4163 SystemZ::VectorBits / BitsPerElement);
4164 SDValue Op = DAG.getNode(SystemZISD::ROTATE_MASK, DL, VecVT,
4165 DAG.getConstant(Start, DL, MVT::i32),
4166 DAG.getConstant(End, DL, MVT::i32));
4167 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4172 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4173 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4174 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4175 // would benefit from this representation and return it if so.
4176 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
4177 BuildVectorSDNode *BVN) {
4178 EVT VT = BVN->getValueType(0);
4179 unsigned NumElements = VT.getVectorNumElements();
4181 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4182 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4183 // need a BUILD_VECTOR, add an additional placeholder operand for that
4184 // BUILD_VECTOR and store its operands in ResidueOps.
4185 GeneralShuffle GS(VT);
4186 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
4187 bool FoundOne = false;
4188 for (unsigned I = 0; I < NumElements; ++I) {
4189 SDValue Op = BVN->getOperand(I);
4190 if (Op.getOpcode() == ISD::TRUNCATE)
4191 Op = Op.getOperand(0);
4192 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4193 Op.getOperand(1).getOpcode() == ISD::Constant) {
4194 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4195 if (!GS.add(Op.getOperand(0), Elem))
4198 } else if (Op.isUndef()) {
4201 if (!GS.add(SDValue(), ResidueOps.size()))
4203 ResidueOps.push_back(BVN->getOperand(I));
4207 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4211 // Create the BUILD_VECTOR for the remaining elements, if any.
4212 if (!ResidueOps.empty()) {
4213 while (ResidueOps.size() < NumElements)
4214 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4215 for (auto &Op : GS.Ops) {
4216 if (!Op.getNode()) {
4217 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4222 return GS.getNode(DAG, SDLoc(BVN));
4225 // Combine GPR scalar values Elems into a vector of type VT.
4226 static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4227 SmallVectorImpl<SDValue> &Elems) {
4228 // See whether there is a single replicated value.
4230 unsigned int NumElements = Elems.size();
4231 unsigned int Count = 0;
4232 for (auto Elem : Elems) {
4233 if (!Elem.isUndef()) {
4234 if (!Single.getNode())
4236 else if (Elem != Single) {
4243 // There are three cases here:
4245 // - if the only defined element is a loaded one, the best sequence
4246 // is a replicating load.
4248 // - otherwise, if the only defined element is an i64 value, we will
4249 // end up with the same VLVGP sequence regardless of whether we short-cut
4250 // for replication or fall through to the later code.
4252 // - otherwise, if the only defined element is an i32 or smaller value,
4253 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4254 // This is only a win if the single defined element is used more than once.
4255 // In other cases we're better off using a single VLVGx.
4256 if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD))
4257 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
4259 // If all elements are loads, use VLREP/VLEs (below).
4260 bool AllLoads = true;
4261 for (auto Elem : Elems)
4262 if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) {
4267 // The best way of building a v2i64 from two i64s is to use VLVGP.
4268 if (VT == MVT::v2i64 && !AllLoads)
4269 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4271 // Use a 64-bit merge high to combine two doubles.
4272 if (VT == MVT::v2f64 && !AllLoads)
4273 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4275 // Build v4f32 values directly from the FPRs:
4277 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4282 if (VT == MVT::v4f32 && !AllLoads) {
4283 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4284 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
4285 // Avoid unnecessary undefs by reusing the other operand.
4288 else if (Op23.isUndef())
4290 // Merging identical replications is a no-op.
4291 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
4293 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
4294 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
4295 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
4296 DL, MVT::v2i64, Op01, Op23);
4297 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4300 // Collect the constant terms.
4301 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
4302 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
4304 unsigned NumConstants = 0;
4305 for (unsigned I = 0; I < NumElements; ++I) {
4306 SDValue Elem = Elems[I];
4307 if (Elem.getOpcode() == ISD::Constant ||
4308 Elem.getOpcode() == ISD::ConstantFP) {
4310 Constants[I] = Elem;
4314 // If there was at least one constant, fill in the other elements of
4315 // Constants with undefs to get a full vector constant and use that
4316 // as the starting point.
4318 if (NumConstants > 0) {
4319 for (unsigned I = 0; I < NumElements; ++I)
4320 if (!Constants[I].getNode())
4321 Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
4322 Result = DAG.getBuildVector(VT, DL, Constants);
4324 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
4325 // avoid a false dependency on any previous contents of the vector
4328 // Use a VLREP if at least one element is a load.
4329 unsigned LoadElIdx = UINT_MAX;
4330 for (unsigned I = 0; I < NumElements; ++I)
4331 if (Elems[I].getOpcode() == ISD::LOAD &&
4332 cast<LoadSDNode>(Elems[I])->isUnindexed()) {
4336 if (LoadElIdx != UINT_MAX) {
4337 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, Elems[LoadElIdx]);
4338 Done[LoadElIdx] = true;
4340 // Try to use VLVGP.
4341 unsigned I1 = NumElements / 2 - 1;
4342 unsigned I2 = NumElements - 1;
4343 bool Def1 = !Elems[I1].isUndef();
4344 bool Def2 = !Elems[I2].isUndef();
4346 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4347 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4348 Result = DAG.getNode(ISD::BITCAST, DL, VT,
4349 joinDwords(DAG, DL, Elem1, Elem2));
4353 Result = DAG.getUNDEF(VT);
4357 // Use VLVGx to insert the other elements.
4358 for (unsigned I = 0; I < NumElements; ++I)
4359 if (!Done[I] && !Elems[I].isUndef())
4360 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
4361 DAG.getConstant(I, DL, MVT::i32));
4365 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
4366 SelectionDAG &DAG) const {
4367 const SystemZInstrInfo *TII =
4368 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
4369 auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
4371 EVT VT = Op.getValueType();
4373 if (BVN->isConstant()) {
4374 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
4375 // preferred way of creating all-zero and all-one vectors so give it
4376 // priority over other methods below.
4378 if (tryBuildVectorByteMask(BVN, Mask)) {
4379 SDValue Op = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8,
4380 DAG.getConstant(Mask, DL, MVT::i32));
4381 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4384 // Try using some form of replication.
4385 APInt SplatBits, SplatUndef;
4386 unsigned SplatBitSize;
4388 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4390 SplatBitSize <= 64) {
4391 // First try assuming that any undefined bits above the highest set bit
4392 // and below the lowest set bit are 1s. This increases the likelihood of
4393 // being able to use a sign-extended element value in VECTOR REPLICATE
4394 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
4395 uint64_t SplatBitsZ = SplatBits.getZExtValue();
4396 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
4397 uint64_t Lower = (SplatUndefZ
4398 & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
4399 uint64_t Upper = (SplatUndefZ
4400 & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
4401 uint64_t Value = SplatBitsZ | Upper | Lower;
4402 SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value,
4407 // Now try assuming that any undefined bits between the first and
4408 // last defined set bits are set. This increases the chances of
4409 // using a non-wraparound mask.
4410 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
4411 Value = SplatBitsZ | Middle;
4412 Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize);
4417 // Fall back to loading it from memory.
4421 // See if we should use shuffles to construct the vector from other vectors.
4422 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
4425 // Detect SCALAR_TO_VECTOR conversions.
4426 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
4427 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
4429 // Otherwise use buildVector to build the vector up from GPRs.
4430 unsigned NumElements = Op.getNumOperands();
4431 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
4432 for (unsigned I = 0; I < NumElements; ++I)
4433 Ops[I] = Op.getOperand(I);
4434 return buildVector(DAG, DL, VT, Ops);
4437 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4438 SelectionDAG &DAG) const {
4439 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
4441 EVT VT = Op.getValueType();
4442 unsigned NumElements = VT.getVectorNumElements();
4444 if (VSN->isSplat()) {
4445 SDValue Op0 = Op.getOperand(0);
4446 unsigned Index = VSN->getSplatIndex();
4447 assert(Index < VT.getVectorNumElements() &&
4448 "Splat index should be defined and in first operand");
4449 // See whether the value we're splatting is directly available as a scalar.
4450 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4451 Op0.getOpcode() == ISD::BUILD_VECTOR)
4452 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
4453 // Otherwise keep it as a vector-to-vector operation.
4454 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
4455 DAG.getConstant(Index, DL, MVT::i32));
4458 GeneralShuffle GS(VT);
4459 for (unsigned I = 0; I < NumElements; ++I) {
4460 int Elt = VSN->getMaskElt(I);
4463 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
4464 unsigned(Elt) % NumElements))
4467 return GS.getNode(DAG, SDLoc(VSN));
4470 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
4471 SelectionDAG &DAG) const {
4473 // Just insert the scalar into element 0 of an undefined vector.
4474 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
4475 Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
4476 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
4479 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4480 SelectionDAG &DAG) const {
4481 // Handle insertions of floating-point values.
4483 SDValue Op0 = Op.getOperand(0);
4484 SDValue Op1 = Op.getOperand(1);
4485 SDValue Op2 = Op.getOperand(2);
4486 EVT VT = Op.getValueType();
4488 // Insertions into constant indices of a v2f64 can be done using VPDI.
4489 // However, if the inserted value is a bitcast or a constant then it's
4490 // better to use GPRs, as below.
4491 if (VT == MVT::v2f64 &&
4492 Op1.getOpcode() != ISD::BITCAST &&
4493 Op1.getOpcode() != ISD::ConstantFP &&
4494 Op2.getOpcode() == ISD::Constant) {
4495 uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue();
4496 unsigned Mask = VT.getVectorNumElements() - 1;
4501 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4502 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
4503 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
4504 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
4505 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
4506 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
4507 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4511 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4512 SelectionDAG &DAG) const {
4513 // Handle extractions of floating-point values.
4515 SDValue Op0 = Op.getOperand(0);
4516 SDValue Op1 = Op.getOperand(1);
4517 EVT VT = Op.getValueType();
4518 EVT VecVT = Op0.getValueType();
4520 // Extractions of constant indices can be done directly.
4521 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
4522 uint64_t Index = CIndexN->getZExtValue();
4523 unsigned Mask = VecVT.getVectorNumElements() - 1;
4528 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
4529 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
4530 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
4531 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
4532 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
4533 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4537 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
4538 unsigned UnpackHigh) const {
4539 SDValue PackedOp = Op.getOperand(0);
4540 EVT OutVT = Op.getValueType();
4541 EVT InVT = PackedOp.getValueType();
4542 unsigned ToBits = OutVT.getScalarSizeInBits();
4543 unsigned FromBits = InVT.getScalarSizeInBits();
4546 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
4547 SystemZ::VectorBits / FromBits);
4548 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
4549 } while (FromBits != ToBits);
4553 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
4554 unsigned ByScalar) const {
4555 // Look for cases where a vector shift can use the *_BY_SCALAR form.
4556 SDValue Op0 = Op.getOperand(0);
4557 SDValue Op1 = Op.getOperand(1);
4559 EVT VT = Op.getValueType();
4560 unsigned ElemBitSize = VT.getScalarSizeInBits();
4562 // See whether the shift vector is a splat represented as BUILD_VECTOR.
4563 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
4564 APInt SplatBits, SplatUndef;
4565 unsigned SplatBitSize;
4567 // Check for constant splats. Use ElemBitSize as the minimum element
4568 // width and reject splats that need wider elements.
4569 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4570 ElemBitSize, true) &&
4571 SplatBitSize == ElemBitSize) {
4572 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
4574 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4576 // Check for variable splats.
4577 BitVector UndefElements;
4578 SDValue Splat = BVN->getSplatValue(&UndefElements);
4580 // Since i32 is the smallest legal type, we either need a no-op
4582 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
4583 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4587 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
4588 // and the shift amount is directly available in a GPR.
4589 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
4590 if (VSN->isSplat()) {
4591 SDValue VSNOp0 = VSN->getOperand(0);
4592 unsigned Index = VSN->getSplatIndex();
4593 assert(Index < VT.getVectorNumElements() &&
4594 "Splat index should be defined and in first operand");
4595 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4596 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
4597 // Since i32 is the smallest legal type, we either need a no-op
4599 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
4600 VSNOp0.getOperand(Index));
4601 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4606 // Otherwise just treat the current form as legal.
4610 SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
4611 SelectionDAG &DAG) const {
4612 switch (Op.getOpcode()) {
4613 case ISD::FRAMEADDR:
4614 return lowerFRAMEADDR(Op, DAG);
4615 case ISD::RETURNADDR:
4616 return lowerRETURNADDR(Op, DAG);
4618 return lowerBR_CC(Op, DAG);
4619 case ISD::SELECT_CC:
4620 return lowerSELECT_CC(Op, DAG);
4622 return lowerSETCC(Op, DAG);
4623 case ISD::GlobalAddress:
4624 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
4625 case ISD::GlobalTLSAddress:
4626 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
4627 case ISD::BlockAddress:
4628 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
4629 case ISD::JumpTable:
4630 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
4631 case ISD::ConstantPool:
4632 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
4634 return lowerBITCAST(Op, DAG);
4636 return lowerVASTART(Op, DAG);
4638 return lowerVACOPY(Op, DAG);
4639 case ISD::DYNAMIC_STACKALLOC:
4640 return lowerDYNAMIC_STACKALLOC(Op, DAG);
4641 case ISD::GET_DYNAMIC_AREA_OFFSET:
4642 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
4643 case ISD::SMUL_LOHI:
4644 return lowerSMUL_LOHI(Op, DAG);
4645 case ISD::UMUL_LOHI:
4646 return lowerUMUL_LOHI(Op, DAG);
4648 return lowerSDIVREM(Op, DAG);
4650 return lowerUDIVREM(Op, DAG);
4652 return lowerOR(Op, DAG);
4654 return lowerCTPOP(Op, DAG);
4655 case ISD::ATOMIC_FENCE:
4656 return lowerATOMIC_FENCE(Op, DAG);
4657 case ISD::ATOMIC_SWAP:
4658 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
4659 case ISD::ATOMIC_STORE:
4660 return lowerATOMIC_STORE(Op, DAG);
4661 case ISD::ATOMIC_LOAD:
4662 return lowerATOMIC_LOAD(Op, DAG);
4663 case ISD::ATOMIC_LOAD_ADD:
4664 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
4665 case ISD::ATOMIC_LOAD_SUB:
4666 return lowerATOMIC_LOAD_SUB(Op, DAG);
4667 case ISD::ATOMIC_LOAD_AND:
4668 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
4669 case ISD::ATOMIC_LOAD_OR:
4670 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
4671 case ISD::ATOMIC_LOAD_XOR:
4672 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
4673 case ISD::ATOMIC_LOAD_NAND:
4674 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
4675 case ISD::ATOMIC_LOAD_MIN:
4676 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
4677 case ISD::ATOMIC_LOAD_MAX:
4678 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
4679 case ISD::ATOMIC_LOAD_UMIN:
4680 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
4681 case ISD::ATOMIC_LOAD_UMAX:
4682 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
4683 case ISD::ATOMIC_CMP_SWAP:
4684 return lowerATOMIC_CMP_SWAP(Op, DAG);
4685 case ISD::STACKSAVE:
4686 return lowerSTACKSAVE(Op, DAG);
4687 case ISD::STACKRESTORE:
4688 return lowerSTACKRESTORE(Op, DAG);
4690 return lowerPREFETCH(Op, DAG);
4691 case ISD::INTRINSIC_W_CHAIN:
4692 return lowerINTRINSIC_W_CHAIN(Op, DAG);
4693 case ISD::INTRINSIC_WO_CHAIN:
4694 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
4695 case ISD::BUILD_VECTOR:
4696 return lowerBUILD_VECTOR(Op, DAG);
4697 case ISD::VECTOR_SHUFFLE:
4698 return lowerVECTOR_SHUFFLE(Op, DAG);
4699 case ISD::SCALAR_TO_VECTOR:
4700 return lowerSCALAR_TO_VECTOR(Op, DAG);
4701 case ISD::INSERT_VECTOR_ELT:
4702 return lowerINSERT_VECTOR_ELT(Op, DAG);
4703 case ISD::EXTRACT_VECTOR_ELT:
4704 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4705 case ISD::SIGN_EXTEND_VECTOR_INREG:
4706 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
4707 case ISD::ZERO_EXTEND_VECTOR_INREG:
4708 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
4710 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
4712 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
4714 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
4716 llvm_unreachable("Unexpected node to lower");
4720 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
4721 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
4722 switch ((SystemZISD::NodeType)Opcode) {
4723 case SystemZISD::FIRST_NUMBER: break;
4729 OPCODE(PCREL_WRAPPER);
4730 OPCODE(PCREL_OFFSET);
4736 OPCODE(SELECT_CCMASK);
4737 OPCODE(ADJDYNALLOC);
4755 OPCODE(SEARCH_STRING);
4759 OPCODE(TBEGIN_NOFLOAT);
4762 OPCODE(ROTATE_MASK);
4764 OPCODE(JOIN_DWORDS);
4769 OPCODE(PERMUTE_DWORDS);
4774 OPCODE(UNPACK_HIGH);
4775 OPCODE(UNPACKL_HIGH);
4777 OPCODE(UNPACKL_LOW);
4778 OPCODE(VSHL_BY_SCALAR);
4779 OPCODE(VSRL_BY_SCALAR);
4780 OPCODE(VSRA_BY_SCALAR);
4808 OPCODE(ATOMIC_SWAPW);
4809 OPCODE(ATOMIC_LOADW_ADD);
4810 OPCODE(ATOMIC_LOADW_SUB);
4811 OPCODE(ATOMIC_LOADW_AND);
4812 OPCODE(ATOMIC_LOADW_OR);
4813 OPCODE(ATOMIC_LOADW_XOR);
4814 OPCODE(ATOMIC_LOADW_NAND);
4815 OPCODE(ATOMIC_LOADW_MIN);
4816 OPCODE(ATOMIC_LOADW_MAX);
4817 OPCODE(ATOMIC_LOADW_UMIN);
4818 OPCODE(ATOMIC_LOADW_UMAX);
4819 OPCODE(ATOMIC_CMP_SWAPW);
4828 // Return true if VT is a vector whose elements are a whole number of bytes
4829 // in width. Also check for presence of vector support.
4830 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
4831 if (!Subtarget.hasVector())
4834 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
4837 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
4838 // producing a result of type ResVT. Op is a possibly bitcast version
4839 // of the input vector and Index is the index (based on type VecVT) that
4840 // should be extracted. Return the new extraction if a simplification
4841 // was possible or if Force is true.
4842 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
4843 EVT VecVT, SDValue Op,
4845 DAGCombinerInfo &DCI,
4847 SelectionDAG &DAG = DCI.DAG;
4849 // The number of bytes being extracted.
4850 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
4853 unsigned Opcode = Op.getOpcode();
4854 if (Opcode == ISD::BITCAST)
4855 // Look through bitcasts.
4856 Op = Op.getOperand(0);
4857 else if (Opcode == ISD::VECTOR_SHUFFLE &&
4858 canTreatAsByteVector(Op.getValueType())) {
4859 // Get a VPERM-like permute mask and see whether the bytes covered
4860 // by the extracted element are a contiguous sequence from one
4862 SmallVector<int, SystemZ::VectorBytes> Bytes;
4863 getVPermMask(cast<ShuffleVectorSDNode>(Op), Bytes);
4865 if (!getShuffleInput(Bytes, Index * BytesPerElement,
4866 BytesPerElement, First))
4869 return DAG.getUNDEF(ResVT);
4870 // Make sure the contiguous sequence starts at a multiple of the
4871 // original element size.
4872 unsigned Byte = unsigned(First) % Bytes.size();
4873 if (Byte % BytesPerElement != 0)
4875 // We can get the extracted value directly from an input.
4876 Index = Byte / BytesPerElement;
4877 Op = Op.getOperand(unsigned(First) / Bytes.size());
4879 } else if (Opcode == ISD::BUILD_VECTOR &&
4880 canTreatAsByteVector(Op.getValueType())) {
4881 // We can only optimize this case if the BUILD_VECTOR elements are
4882 // at least as wide as the extracted value.
4883 EVT OpVT = Op.getValueType();
4884 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
4885 if (OpBytesPerElement < BytesPerElement)
4887 // Make sure that the least-significant bit of the extracted value
4888 // is the least significant bit of an input.
4889 unsigned End = (Index + 1) * BytesPerElement;
4890 if (End % OpBytesPerElement != 0)
4892 // We're extracting the low part of one operand of the BUILD_VECTOR.
4893 Op = Op.getOperand(End / OpBytesPerElement - 1);
4894 if (!Op.getValueType().isInteger()) {
4895 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
4896 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
4897 DCI.AddToWorklist(Op.getNode());
4899 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
4900 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
4902 DCI.AddToWorklist(Op.getNode());
4903 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
4906 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
4907 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
4908 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
4909 canTreatAsByteVector(Op.getValueType()) &&
4910 canTreatAsByteVector(Op.getOperand(0).getValueType())) {
4911 // Make sure that only the unextended bits are significant.
4912 EVT ExtVT = Op.getValueType();
4913 EVT OpVT = Op.getOperand(0).getValueType();
4914 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
4915 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
4916 unsigned Byte = Index * BytesPerElement;
4917 unsigned SubByte = Byte % ExtBytesPerElement;
4918 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
4919 if (SubByte < MinSubByte ||
4920 SubByte + BytesPerElement > ExtBytesPerElement)
4922 // Get the byte offset of the unextended element
4923 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
4924 // ...then add the byte offset relative to that element.
4925 Byte += SubByte - MinSubByte;
4926 if (Byte % BytesPerElement != 0)
4928 Op = Op.getOperand(0);
4929 Index = Byte / BytesPerElement;
4935 if (Op.getValueType() != VecVT) {
4936 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
4937 DCI.AddToWorklist(Op.getNode());
4939 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
4940 DAG.getConstant(Index, DL, MVT::i32));
4945 // Optimize vector operations in scalar value Op on the basis that Op
4946 // is truncated to TruncVT.
4947 SDValue SystemZTargetLowering::combineTruncateExtract(
4948 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
4949 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
4950 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
4952 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4953 TruncVT.getSizeInBits() % 8 == 0) {
4954 SDValue Vec = Op.getOperand(0);
4955 EVT VecVT = Vec.getValueType();
4956 if (canTreatAsByteVector(VecVT)) {
4957 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
4958 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
4959 unsigned TruncBytes = TruncVT.getStoreSize();
4960 if (BytesPerElement % TruncBytes == 0) {
4961 // Calculate the value of Y' in the above description. We are
4962 // splitting the original elements into Scale equal-sized pieces
4963 // and for truncation purposes want the last (least-significant)
4964 // of these pieces for IndexN. This is easiest to do by calculating
4965 // the start index of the following element and then subtracting 1.
4966 unsigned Scale = BytesPerElement / TruncBytes;
4967 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
4969 // Defer the creation of the bitcast from X to combineExtract,
4970 // which might be able to optimize the extraction.
4971 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
4972 VecVT.getStoreSize() / TruncBytes);
4973 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
4974 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
4982 SDValue SystemZTargetLowering::combineSIGN_EXTEND(
4983 SDNode *N, DAGCombinerInfo &DCI) const {
4984 // Convert (sext (ashr (shl X, C1), C2)) to
4985 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
4986 // cheap as narrower ones.
4987 SelectionDAG &DAG = DCI.DAG;
4988 SDValue N0 = N->getOperand(0);
4989 EVT VT = N->getValueType(0);
4990 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
4991 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4992 SDValue Inner = N0.getOperand(0);
4993 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
4994 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
4995 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
4996 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
4997 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
4998 EVT ShiftVT = N0.getOperand(1).getValueType();
4999 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
5000 Inner.getOperand(0));
5001 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
5002 DAG.getConstant(NewShlAmt, SDLoc(Inner),
5004 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
5005 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
5012 SDValue SystemZTargetLowering::combineMERGE(
5013 SDNode *N, DAGCombinerInfo &DCI) const {
5014 SelectionDAG &DAG = DCI.DAG;
5015 unsigned Opcode = N->getOpcode();
5016 SDValue Op0 = N->getOperand(0);
5017 SDValue Op1 = N->getOperand(1);
5018 if (Op0.getOpcode() == ISD::BITCAST)
5019 Op0 = Op0.getOperand(0);
5020 if (Op0.getOpcode() == SystemZISD::BYTE_MASK &&
5021 cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) {
5022 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5024 if (Op1 == N->getOperand(0))
5026 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5027 EVT VT = Op1.getValueType();
5028 unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
5029 if (ElemBytes <= 4) {
5030 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
5031 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
5032 EVT InVT = VT.changeVectorElementTypeToInteger();
5033 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
5034 SystemZ::VectorBytes / ElemBytes / 2);
5036 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
5037 DCI.AddToWorklist(Op1.getNode());
5039 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
5040 DCI.AddToWorklist(Op.getNode());
5041 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
5047 SDValue SystemZTargetLowering::combineSTORE(
5048 SDNode *N, DAGCombinerInfo &DCI) const {
5049 SelectionDAG &DAG = DCI.DAG;
5050 auto *SN = cast<StoreSDNode>(N);
5051 auto &Op1 = N->getOperand(1);
5052 EVT MemVT = SN->getMemoryVT();
5053 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
5054 // for the extraction to be done on a vMiN value, so that we can use VSTE.
5055 // If X has wider elements then convert it to:
5056 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
5057 if (MemVT.isInteger()) {
5059 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
5060 DCI.AddToWorklist(Value.getNode());
5062 // Rewrite the store with the new form of stored value.
5063 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
5064 SN->getBasePtr(), SN->getMemoryVT(),
5065 SN->getMemOperand());
5068 // Combine STORE (BSWAP) into STRVH/STRV/STRVG
5069 // See comment in combineBSWAP about volatile accesses.
5070 if (!SN->isVolatile() &&
5071 Op1.getOpcode() == ISD::BSWAP &&
5072 Op1.getNode()->hasOneUse() &&
5073 (Op1.getValueType() == MVT::i16 ||
5074 Op1.getValueType() == MVT::i32 ||
5075 Op1.getValueType() == MVT::i64)) {
5077 SDValue BSwapOp = Op1.getOperand(0);
5079 if (BSwapOp.getValueType() == MVT::i16)
5080 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
5083 N->getOperand(0), BSwapOp, N->getOperand(2),
5084 DAG.getValueType(Op1.getValueType())
5088 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
5089 Ops, MemVT, SN->getMemOperand());
5094 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5095 SDNode *N, DAGCombinerInfo &DCI) const {
5097 if (!Subtarget.hasVector())
5100 // Try to simplify a vector extraction.
5101 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
5102 SDValue Op0 = N->getOperand(0);
5103 EVT VecVT = Op0.getValueType();
5104 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
5105 IndexN->getZExtValue(), DCI, false);
5110 SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5111 SDNode *N, DAGCombinerInfo &DCI) const {
5112 SelectionDAG &DAG = DCI.DAG;
5113 // (join_dwords X, X) == (replicate X)
5114 if (N->getOperand(0) == N->getOperand(1))
5115 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
5120 SDValue SystemZTargetLowering::combineFP_ROUND(
5121 SDNode *N, DAGCombinerInfo &DCI) const {
5122 // (fpround (extract_vector_elt X 0))
5123 // (fpround (extract_vector_elt X 1)) ->
5124 // (extract_vector_elt (VROUND X) 0)
5125 // (extract_vector_elt (VROUND X) 1)
5127 // This is a special case since the target doesn't really support v2f32s.
5128 SelectionDAG &DAG = DCI.DAG;
5129 SDValue Op0 = N->getOperand(0);
5130 if (N->getValueType(0) == MVT::f32 &&
5132 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5133 Op0.getOperand(0).getValueType() == MVT::v2f64 &&
5134 Op0.getOperand(1).getOpcode() == ISD::Constant &&
5135 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
5136 SDValue Vec = Op0.getOperand(0);
5137 for (auto *U : Vec->uses()) {
5138 if (U != Op0.getNode() &&
5140 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5141 U->getOperand(0) == Vec &&
5142 U->getOperand(1).getOpcode() == ISD::Constant &&
5143 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5144 SDValue OtherRound = SDValue(*U->use_begin(), 0);
5145 if (OtherRound.getOpcode() == ISD::FP_ROUND &&
5146 OtherRound.getOperand(0) == SDValue(U, 0) &&
5147 OtherRound.getValueType() == MVT::f32) {
5148 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
5150 DCI.AddToWorklist(VRound.getNode());
5152 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
5153 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
5154 DCI.AddToWorklist(Extract1.getNode());
5155 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
5157 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
5158 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
5167 SDValue SystemZTargetLowering::combineBSWAP(
5168 SDNode *N, DAGCombinerInfo &DCI) const {
5169 SelectionDAG &DAG = DCI.DAG;
5170 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG
5171 // These loads are allowed to access memory multiple times, and so we must check
5172 // that the loads are not volatile before performing the combine.
5173 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5174 N->getOperand(0).hasOneUse() &&
5175 (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 ||
5176 N->getValueType(0) == MVT::i64) &&
5177 !cast<LoadSDNode>(N->getOperand(0))->isVolatile()) {
5178 SDValue Load = N->getOperand(0);
5179 LoadSDNode *LD = cast<LoadSDNode>(Load);
5181 // Create the byte-swapping load.
5183 LD->getChain(), // Chain
5184 LD->getBasePtr(), // Ptr
5185 DAG.getValueType(N->getValueType(0)) // VT
5188 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
5189 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
5190 MVT::i64 : MVT::i32, MVT::Other),
5191 Ops, LD->getMemoryVT(), LD->getMemOperand());
5193 // If this is an i16 load, insert the truncate.
5194 SDValue ResVal = BSLoad;
5195 if (N->getValueType(0) == MVT::i16)
5196 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
5198 // First, combine the bswap away. This makes the value produced by the
5200 DCI.CombineTo(N, ResVal);
5202 // Next, combine the load away, we give it a bogus result value but a real
5203 // chain result. The result value is dead because the bswap is dead.
5204 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
5206 // Return N so it doesn't get rechecked!
5207 return SDValue(N, 0);
5212 SDValue SystemZTargetLowering::combineSHIFTROT(
5213 SDNode *N, DAGCombinerInfo &DCI) const {
5215 SelectionDAG &DAG = DCI.DAG;
5217 // Shift/rotate instructions only use the last 6 bits of the second operand
5218 // register. If the second operand is the result of an AND with an immediate
5219 // value that has its last 6 bits set, we can safely remove the AND operation.
5221 // If the AND operation doesn't have the last 6 bits set, we can't remove it
5222 // entirely, but we can still truncate it to a 16-bit value. This prevents
5223 // us from ending up with a NILL with a signed operand, which will cause the
5224 // instruction printer to abort.
5225 SDValue N1 = N->getOperand(1);
5226 if (N1.getOpcode() == ISD::AND) {
5227 SDValue AndMaskOp = N1->getOperand(1);
5228 auto *AndMask = dyn_cast<ConstantSDNode>(AndMaskOp);
5230 // The AND mask is constant
5232 auto AmtVal = AndMask->getZExtValue();
5234 // Bottom 6 bits are set
5235 if ((AmtVal & 0x3f) == 0x3f) {
5236 SDValue AndOp = N1->getOperand(0);
5238 // This is the only use, so remove the node
5239 if (N1.hasOneUse()) {
5240 // Combine the AND away
5241 DCI.CombineTo(N1.getNode(), AndOp);
5243 // Return N so it isn't rechecked
5244 return SDValue(N, 0);
5246 // The node will be reused, so create a new node for this one use
5248 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
5249 N->getValueType(0), N->getOperand(0),
5251 DCI.AddToWorklist(Replace.getNode());
5256 // We can't remove the AND, but we can use NILL here (normally we would
5257 // use NILF). Only keep the last 16 bits of the mask. The actual
5258 // transformation will be handled by .td definitions.
5259 } else if (AmtVal >> 16 != 0) {
5260 SDValue AndOp = N1->getOperand(0);
5262 auto NewMask = DAG.getConstant(AndMask->getZExtValue() & 0x0000ffff,
5264 AndMaskOp.getValueType());
5266 auto NewAnd = DAG.getNode(N1.getOpcode(), SDLoc(N1), N1.getValueType(),
5269 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N),
5270 N->getValueType(0), N->getOperand(0),
5272 DCI.AddToWorklist(Replace.getNode());
5282 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
5283 DAGCombinerInfo &DCI) const {
5284 switch(N->getOpcode()) {
5286 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI);
5287 case SystemZISD::MERGE_HIGH:
5288 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI);
5289 case ISD::STORE: return combineSTORE(N, DCI);
5290 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI);
5291 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI);
5292 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI);
5293 case ISD::BSWAP: return combineBSWAP(N, DCI);
5297 case ISD::ROTL: return combineSHIFTROT(N, DCI);
5303 //===----------------------------------------------------------------------===//
5305 //===----------------------------------------------------------------------===//
5307 // Create a new basic block after MBB.
5308 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
5309 MachineFunction &MF = *MBB->getParent();
5310 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
5311 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
5315 // Split MBB after MI and return the new block (the one that contains
5316 // instructions after MI).
5317 static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI,
5318 MachineBasicBlock *MBB) {
5319 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
5320 NewMBB->splice(NewMBB->begin(), MBB,
5321 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
5322 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
5326 // Split MBB before MI and return the new block (the one that contains MI).
5327 static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI,
5328 MachineBasicBlock *MBB) {
5329 MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
5330 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
5331 NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
5335 // Force base value Base into a register before MI. Return the register.
5336 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
5337 const SystemZInstrInfo *TII) {
5339 return Base.getReg();
5341 MachineBasicBlock *MBB = MI.getParent();
5342 MachineFunction &MF = *MBB->getParent();
5343 MachineRegisterInfo &MRI = MF.getRegInfo();
5345 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
5346 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
5353 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
5355 SystemZTargetLowering::emitSelect(MachineInstr &MI,
5356 MachineBasicBlock *MBB,
5357 unsigned LOCROpcode) const {
5358 const SystemZInstrInfo *TII =
5359 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5361 unsigned DestReg = MI.getOperand(0).getReg();
5362 unsigned TrueReg = MI.getOperand(1).getReg();
5363 unsigned FalseReg = MI.getOperand(2).getReg();
5364 unsigned CCValid = MI.getOperand(3).getImm();
5365 unsigned CCMask = MI.getOperand(4).getImm();
5366 DebugLoc DL = MI.getDebugLoc();
5368 // Use LOCROpcode if possible.
5369 if (LOCROpcode && Subtarget.hasLoadStoreOnCond()) {
5370 BuildMI(*MBB, MI, DL, TII->get(LOCROpcode), DestReg)
5371 .addReg(FalseReg).addReg(TrueReg)
5372 .addImm(CCValid).addImm(CCMask);
5373 MI.eraseFromParent();
5377 MachineBasicBlock *StartMBB = MBB;
5378 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
5379 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
5382 // BRC CCMask, JoinMBB
5383 // # fallthrough to FalseMBB
5385 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5386 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
5387 MBB->addSuccessor(JoinMBB);
5388 MBB->addSuccessor(FalseMBB);
5391 // # fallthrough to JoinMBB
5393 MBB->addSuccessor(JoinMBB);
5396 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
5399 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
5400 .addReg(TrueReg).addMBB(StartMBB)
5401 .addReg(FalseReg).addMBB(FalseMBB);
5403 MI.eraseFromParent();
5407 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
5408 // StoreOpcode is the store to use and Invert says whether the store should
5409 // happen when the condition is false rather than true. If a STORE ON
5410 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
5411 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
5412 MachineBasicBlock *MBB,
5413 unsigned StoreOpcode,
5414 unsigned STOCOpcode,
5415 bool Invert) const {
5416 const SystemZInstrInfo *TII =
5417 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5419 unsigned SrcReg = MI.getOperand(0).getReg();
5420 MachineOperand Base = MI.getOperand(1);
5421 int64_t Disp = MI.getOperand(2).getImm();
5422 unsigned IndexReg = MI.getOperand(3).getReg();
5423 unsigned CCValid = MI.getOperand(4).getImm();
5424 unsigned CCMask = MI.getOperand(5).getImm();
5425 DebugLoc DL = MI.getDebugLoc();
5427 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
5429 // Use STOCOpcode if possible. We could use different store patterns in
5430 // order to avoid matching the index register, but the performance trade-offs
5431 // might be more complicated in that case.
5432 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
5436 // ISel pattern matching also adds a load memory operand of the same
5437 // address, so take special care to find the storing memory operand.
5438 MachineMemOperand *MMO = nullptr;
5439 for (auto *I : MI.memoperands())
5445 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
5451 .addMemOperand(MMO);
5453 MI.eraseFromParent();
5457 // Get the condition needed to branch around the store.
5461 MachineBasicBlock *StartMBB = MBB;
5462 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
5463 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
5466 // BRC CCMask, JoinMBB
5467 // # fallthrough to FalseMBB
5469 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5470 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
5471 MBB->addSuccessor(JoinMBB);
5472 MBB->addSuccessor(FalseMBB);
5475 // store %SrcReg, %Disp(%Index,%Base)
5476 // # fallthrough to JoinMBB
5478 BuildMI(MBB, DL, TII->get(StoreOpcode))
5483 MBB->addSuccessor(JoinMBB);
5485 MI.eraseFromParent();
5489 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
5490 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
5491 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
5492 // BitSize is the width of the field in bits, or 0 if this is a partword
5493 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
5494 // is one of the operands. Invert says whether the field should be
5495 // inverted after performing BinOpcode (e.g. for NAND).
5496 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
5497 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
5498 unsigned BitSize, bool Invert) const {
5499 MachineFunction &MF = *MBB->getParent();
5500 const SystemZInstrInfo *TII =
5501 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5502 MachineRegisterInfo &MRI = MF.getRegInfo();
5503 bool IsSubWord = (BitSize < 32);
5505 // Extract the operands. Base can be a register or a frame index.
5506 // Src2 can be a register or immediate.
5507 unsigned Dest = MI.getOperand(0).getReg();
5508 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5509 int64_t Disp = MI.getOperand(2).getImm();
5510 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
5511 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
5512 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
5513 DebugLoc DL = MI.getDebugLoc();
5515 BitSize = MI.getOperand(6).getImm();
5517 // Subword operations use 32-bit registers.
5518 const TargetRegisterClass *RC = (BitSize <= 32 ?
5519 &SystemZ::GR32BitRegClass :
5520 &SystemZ::GR64BitRegClass);
5521 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
5522 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5524 // Get the right opcodes for the displacement.
5525 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
5526 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
5527 assert(LOpcode && CSOpcode && "Displacement out of range");
5529 // Create virtual registers for temporary results.
5530 unsigned OrigVal = MRI.createVirtualRegister(RC);
5531 unsigned OldVal = MRI.createVirtualRegister(RC);
5532 unsigned NewVal = (BinOpcode || IsSubWord ?
5533 MRI.createVirtualRegister(RC) : Src2.getReg());
5534 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
5535 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
5537 // Insert a basic block for the main loop.
5538 MachineBasicBlock *StartMBB = MBB;
5539 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5540 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5544 // %OrigVal = L Disp(%Base)
5545 // # fall through to LoopMMB
5547 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
5548 MBB->addSuccessor(LoopMBB);
5551 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
5552 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
5553 // %RotatedNewVal = OP %RotatedOldVal, %Src2
5554 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
5555 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
5557 // # fall through to DoneMMB
5559 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5560 .addReg(OrigVal).addMBB(StartMBB)
5561 .addReg(Dest).addMBB(LoopMBB);
5563 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5564 .addReg(OldVal).addReg(BitShift).addImm(0);
5566 // Perform the operation normally and then invert every bit of the field.
5567 unsigned Tmp = MRI.createVirtualRegister(RC);
5568 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2);
5570 // XILF with the upper BitSize bits set.
5571 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
5572 .addReg(Tmp).addImm(-1U << (32 - BitSize));
5574 // Use LCGR and add -1 to the result, which is more compact than
5575 // an XILF, XILH pair.
5576 unsigned Tmp2 = MRI.createVirtualRegister(RC);
5577 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
5578 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
5579 .addReg(Tmp2).addImm(-1);
5581 } else if (BinOpcode)
5582 // A simply binary operation.
5583 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
5584 .addReg(RotatedOldVal)
5587 // Use RISBG to rotate Src2 into position and use it to replace the
5588 // field in RotatedOldVal.
5589 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
5590 .addReg(RotatedOldVal).addReg(Src2.getReg())
5591 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
5593 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5594 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
5595 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5600 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5601 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5602 MBB->addSuccessor(LoopMBB);
5603 MBB->addSuccessor(DoneMBB);
5605 MI.eraseFromParent();
5609 // Implement EmitInstrWithCustomInserter for pseudo
5610 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
5611 // instruction that should be used to compare the current field with the
5612 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
5613 // for when the current field should be kept. BitSize is the width of
5614 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
5615 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
5616 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
5617 unsigned KeepOldMask, unsigned BitSize) const {
5618 MachineFunction &MF = *MBB->getParent();
5619 const SystemZInstrInfo *TII =
5620 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5621 MachineRegisterInfo &MRI = MF.getRegInfo();
5622 bool IsSubWord = (BitSize < 32);
5624 // Extract the operands. Base can be a register or a frame index.
5625 unsigned Dest = MI.getOperand(0).getReg();
5626 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5627 int64_t Disp = MI.getOperand(2).getImm();
5628 unsigned Src2 = MI.getOperand(3).getReg();
5629 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
5630 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
5631 DebugLoc DL = MI.getDebugLoc();
5633 BitSize = MI.getOperand(6).getImm();
5635 // Subword operations use 32-bit registers.
5636 const TargetRegisterClass *RC = (BitSize <= 32 ?
5637 &SystemZ::GR32BitRegClass :
5638 &SystemZ::GR64BitRegClass);
5639 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
5640 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5642 // Get the right opcodes for the displacement.
5643 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
5644 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
5645 assert(LOpcode && CSOpcode && "Displacement out of range");
5647 // Create virtual registers for temporary results.
5648 unsigned OrigVal = MRI.createVirtualRegister(RC);
5649 unsigned OldVal = MRI.createVirtualRegister(RC);
5650 unsigned NewVal = MRI.createVirtualRegister(RC);
5651 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
5652 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
5653 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
5655 // Insert 3 basic blocks for the loop.
5656 MachineBasicBlock *StartMBB = MBB;
5657 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5658 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5659 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
5660 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
5664 // %OrigVal = L Disp(%Base)
5665 // # fall through to LoopMMB
5667 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0);
5668 MBB->addSuccessor(LoopMBB);
5671 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
5672 // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
5673 // CompareOpcode %RotatedOldVal, %Src2
5674 // BRC KeepOldMask, UpdateMBB
5676 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5677 .addReg(OrigVal).addMBB(StartMBB)
5678 .addReg(Dest).addMBB(UpdateMBB);
5680 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5681 .addReg(OldVal).addReg(BitShift).addImm(0);
5682 BuildMI(MBB, DL, TII->get(CompareOpcode))
5683 .addReg(RotatedOldVal).addReg(Src2);
5684 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5685 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
5686 MBB->addSuccessor(UpdateMBB);
5687 MBB->addSuccessor(UseAltMBB);
5690 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
5691 // # fall through to UpdateMMB
5694 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
5695 .addReg(RotatedOldVal).addReg(Src2)
5696 .addImm(32).addImm(31 + BitSize).addImm(0);
5697 MBB->addSuccessor(UpdateMBB);
5700 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
5701 // [ %RotatedAltVal, UseAltMBB ]
5702 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
5703 // %Dest = CS %OldVal, %NewVal, Disp(%Base)
5705 // # fall through to DoneMMB
5707 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
5708 .addReg(RotatedOldVal).addMBB(LoopMBB)
5709 .addReg(RotatedAltVal).addMBB(UseAltMBB);
5711 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5712 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
5713 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5718 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5719 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5720 MBB->addSuccessor(LoopMBB);
5721 MBB->addSuccessor(DoneMBB);
5723 MI.eraseFromParent();
5727 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
5730 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
5731 MachineBasicBlock *MBB) const {
5733 MachineFunction &MF = *MBB->getParent();
5734 const SystemZInstrInfo *TII =
5735 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5736 MachineRegisterInfo &MRI = MF.getRegInfo();
5738 // Extract the operands. Base can be a register or a frame index.
5739 unsigned Dest = MI.getOperand(0).getReg();
5740 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
5741 int64_t Disp = MI.getOperand(2).getImm();
5742 unsigned OrigCmpVal = MI.getOperand(3).getReg();
5743 unsigned OrigSwapVal = MI.getOperand(4).getReg();
5744 unsigned BitShift = MI.getOperand(5).getReg();
5745 unsigned NegBitShift = MI.getOperand(6).getReg();
5746 int64_t BitSize = MI.getOperand(7).getImm();
5747 DebugLoc DL = MI.getDebugLoc();
5749 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
5751 // Get the right opcodes for the displacement.
5752 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
5753 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
5754 assert(LOpcode && CSOpcode && "Displacement out of range");
5756 // Create virtual registers for temporary results.
5757 unsigned OrigOldVal = MRI.createVirtualRegister(RC);
5758 unsigned OldVal = MRI.createVirtualRegister(RC);
5759 unsigned CmpVal = MRI.createVirtualRegister(RC);
5760 unsigned SwapVal = MRI.createVirtualRegister(RC);
5761 unsigned StoreVal = MRI.createVirtualRegister(RC);
5762 unsigned RetryOldVal = MRI.createVirtualRegister(RC);
5763 unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
5764 unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
5766 // Insert 2 basic blocks for the loop.
5767 MachineBasicBlock *StartMBB = MBB;
5768 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5769 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5770 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
5774 // %OrigOldVal = L Disp(%Base)
5775 // # fall through to LoopMMB
5777 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
5781 MBB->addSuccessor(LoopMBB);
5784 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
5785 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
5786 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
5787 // %Dest = RLL %OldVal, BitSize(%BitShift)
5788 // ^^ The low BitSize bits contain the field
5790 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
5791 // ^^ Replace the upper 32-BitSize bits of the
5792 // comparison value with those that we loaded,
5793 // so that we can use a full word comparison.
5794 // CR %Dest, %RetryCmpVal
5796 // # Fall through to SetMBB
5798 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5799 .addReg(OrigOldVal).addMBB(StartMBB)
5800 .addReg(RetryOldVal).addMBB(SetMBB);
5801 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
5802 .addReg(OrigCmpVal).addMBB(StartMBB)
5803 .addReg(RetryCmpVal).addMBB(SetMBB);
5804 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
5805 .addReg(OrigSwapVal).addMBB(StartMBB)
5806 .addReg(RetrySwapVal).addMBB(SetMBB);
5807 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
5808 .addReg(OldVal).addReg(BitShift).addImm(BitSize);
5809 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
5810 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
5811 BuildMI(MBB, DL, TII->get(SystemZ::CR))
5812 .addReg(Dest).addReg(RetryCmpVal);
5813 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5814 .addImm(SystemZ::CCMASK_ICMP)
5815 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
5816 MBB->addSuccessor(DoneMBB);
5817 MBB->addSuccessor(SetMBB);
5820 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
5821 // ^^ Replace the upper 32-BitSize bits of the new
5822 // value with those that we loaded.
5823 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
5824 // ^^ Rotate the new field to its proper position.
5825 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
5827 // # fall through to ExitMMB
5829 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
5830 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
5831 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
5832 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
5833 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
5838 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5839 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
5840 MBB->addSuccessor(LoopMBB);
5841 MBB->addSuccessor(DoneMBB);
5843 MI.eraseFromParent();
5847 // Emit an extension from a GR64 to a GR128. ClearEven is true
5848 // if the high register of the GR128 value must be cleared or false if
5849 // it's "don't care".
5850 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
5851 MachineBasicBlock *MBB,
5852 bool ClearEven) const {
5853 MachineFunction &MF = *MBB->getParent();
5854 const SystemZInstrInfo *TII =
5855 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5856 MachineRegisterInfo &MRI = MF.getRegInfo();
5857 DebugLoc DL = MI.getDebugLoc();
5859 unsigned Dest = MI.getOperand(0).getReg();
5860 unsigned Src = MI.getOperand(1).getReg();
5861 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
5863 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
5865 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
5866 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
5868 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
5870 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
5871 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
5874 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
5875 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64);
5877 MI.eraseFromParent();
5881 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(
5882 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
5883 MachineFunction &MF = *MBB->getParent();
5884 const SystemZInstrInfo *TII =
5885 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
5886 MachineRegisterInfo &MRI = MF.getRegInfo();
5887 DebugLoc DL = MI.getDebugLoc();
5889 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0));
5890 uint64_t DestDisp = MI.getOperand(1).getImm();
5891 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2));
5892 uint64_t SrcDisp = MI.getOperand(3).getImm();
5893 uint64_t Length = MI.getOperand(4).getImm();
5895 // When generating more than one CLC, all but the last will need to
5896 // branch to the end when a difference is found.
5897 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
5898 splitBlockAfter(MI, MBB) : nullptr);
5900 // Check for the loop form, in which operand 5 is the trip count.
5901 if (MI.getNumExplicitOperands() > 5) {
5902 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
5904 uint64_t StartCountReg = MI.getOperand(5).getReg();
5905 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
5906 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
5907 forceReg(MI, DestBase, TII));
5909 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
5910 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
5911 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
5912 MRI.createVirtualRegister(RC));
5913 uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
5914 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
5915 MRI.createVirtualRegister(RC));
5917 RC = &SystemZ::GR64BitRegClass;
5918 uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
5919 uint64_t NextCountReg = MRI.createVirtualRegister(RC);
5921 MachineBasicBlock *StartMBB = MBB;
5922 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
5923 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
5924 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
5927 // # fall through to LoopMMB
5928 MBB->addSuccessor(LoopMBB);
5931 // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
5932 // [ %NextDestReg, NextMBB ]
5933 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
5934 // [ %NextSrcReg, NextMBB ]
5935 // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
5936 // [ %NextCountReg, NextMBB ]
5937 // ( PFD 2, 768+DestDisp(%ThisDestReg) )
5938 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
5941 // The prefetch is used only for MVC. The JLH is used only for CLC.
5944 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
5945 .addReg(StartDestReg).addMBB(StartMBB)
5946 .addReg(NextDestReg).addMBB(NextMBB);
5947 if (!HaveSingleBase)
5948 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
5949 .addReg(StartSrcReg).addMBB(StartMBB)
5950 .addReg(NextSrcReg).addMBB(NextMBB);
5951 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
5952 .addReg(StartCountReg).addMBB(StartMBB)
5953 .addReg(NextCountReg).addMBB(NextMBB);
5954 if (Opcode == SystemZ::MVC)
5955 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
5956 .addImm(SystemZ::PFD_WRITE)
5957 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
5958 BuildMI(MBB, DL, TII->get(Opcode))
5959 .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
5960 .addReg(ThisSrcReg).addImm(SrcDisp);
5962 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5963 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5965 MBB->addSuccessor(EndMBB);
5966 MBB->addSuccessor(NextMBB);
5970 // %NextDestReg = LA 256(%ThisDestReg)
5971 // %NextSrcReg = LA 256(%ThisSrcReg)
5972 // %NextCountReg = AGHI %ThisCountReg, -1
5973 // CGHI %NextCountReg, 0
5975 // # fall through to DoneMMB
5977 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
5980 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
5981 .addReg(ThisDestReg).addImm(256).addReg(0);
5982 if (!HaveSingleBase)
5983 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
5984 .addReg(ThisSrcReg).addImm(256).addReg(0);
5985 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
5986 .addReg(ThisCountReg).addImm(-1);
5987 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
5988 .addReg(NextCountReg).addImm(0);
5989 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5990 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
5992 MBB->addSuccessor(LoopMBB);
5993 MBB->addSuccessor(DoneMBB);
5995 DestBase = MachineOperand::CreateReg(NextDestReg, false);
5996 SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
6000 // Handle any remaining bytes with straight-line code.
6001 while (Length > 0) {
6002 uint64_t ThisLength = std::min(Length, uint64_t(256));
6003 // The previous iteration might have created out-of-range displacements.
6004 // Apply them using LAY if so.
6005 if (!isUInt<12>(DestDisp)) {
6006 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
6007 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
6011 DestBase = MachineOperand::CreateReg(Reg, false);
6014 if (!isUInt<12>(SrcDisp)) {
6015 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
6016 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)
6020 SrcBase = MachineOperand::CreateReg(Reg, false);
6023 BuildMI(*MBB, MI, DL, TII->get(Opcode))
6029 ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
6030 DestDisp += ThisLength;
6031 SrcDisp += ThisLength;
6032 Length -= ThisLength;
6033 // If there's another CLC to go, branch to the end if a difference
6035 if (EndMBB && Length > 0) {
6036 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
6037 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6038 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
6040 MBB->addSuccessor(EndMBB);
6041 MBB->addSuccessor(NextMBB);
6046 MBB->addSuccessor(EndMBB);
6048 MBB->addLiveIn(SystemZ::CC);
6051 MI.eraseFromParent();
6055 // Decompose string pseudo-instruction MI into a loop that continually performs
6056 // Opcode until CC != 3.
6057 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
6058 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
6059 MachineFunction &MF = *MBB->getParent();
6060 const SystemZInstrInfo *TII =
6061 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
6062 MachineRegisterInfo &MRI = MF.getRegInfo();
6063 DebugLoc DL = MI.getDebugLoc();
6065 uint64_t End1Reg = MI.getOperand(0).getReg();
6066 uint64_t Start1Reg = MI.getOperand(1).getReg();
6067 uint64_t Start2Reg = MI.getOperand(2).getReg();
6068 uint64_t CharReg = MI.getOperand(3).getReg();
6070 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
6071 uint64_t This1Reg = MRI.createVirtualRegister(RC);
6072 uint64_t This2Reg = MRI.createVirtualRegister(RC);
6073 uint64_t End2Reg = MRI.createVirtualRegister(RC);
6075 MachineBasicBlock *StartMBB = MBB;
6076 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
6077 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
6080 // # fall through to LoopMMB
6081 MBB->addSuccessor(LoopMBB);
6084 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
6085 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
6087 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
6089 // # fall through to DoneMMB
6091 // The load of R0L can be hoisted by post-RA LICM.
6094 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
6095 .addReg(Start1Reg).addMBB(StartMBB)
6096 .addReg(End1Reg).addMBB(LoopMBB);
6097 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
6098 .addReg(Start2Reg).addMBB(StartMBB)
6099 .addReg(End2Reg).addMBB(LoopMBB);
6100 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
6101 BuildMI(MBB, DL, TII->get(Opcode))
6102 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
6103 .addReg(This1Reg).addReg(This2Reg);
6104 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
6105 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
6106 MBB->addSuccessor(LoopMBB);
6107 MBB->addSuccessor(DoneMBB);
6109 DoneMBB->addLiveIn(SystemZ::CC);
6111 MI.eraseFromParent();
6115 // Update TBEGIN instruction with final opcode and register clobbers.
6116 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
6117 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
6118 bool NoFloat) const {
6119 MachineFunction &MF = *MBB->getParent();
6120 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
6121 const SystemZInstrInfo *TII = Subtarget.getInstrInfo();
6124 MI.setDesc(TII->get(Opcode));
6126 // We cannot handle a TBEGIN that clobbers the stack or frame pointer.
6127 // Make sure to add the corresponding GRSM bits if they are missing.
6128 uint64_t Control = MI.getOperand(2).getImm();
6129 static const unsigned GPRControlBit[16] = {
6130 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
6131 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
6133 Control |= GPRControlBit[15];
6135 Control |= GPRControlBit[11];
6136 MI.getOperand(2).setImm(Control);
6138 // Add GPR clobbers.
6139 for (int I = 0; I < 16; I++) {
6140 if ((Control & GPRControlBit[I]) == 0) {
6141 unsigned Reg = SystemZMC::GR64Regs[I];
6142 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6146 // Add FPR/VR clobbers.
6147 if (!NoFloat && (Control & 4) != 0) {
6148 if (Subtarget.hasVector()) {
6149 for (int I = 0; I < 32; I++) {
6150 unsigned Reg = SystemZMC::VR128Regs[I];
6151 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6154 for (int I = 0; I < 16; I++) {
6155 unsigned Reg = SystemZMC::FP64Regs[I];
6156 MI.addOperand(MachineOperand::CreateReg(Reg, true, true));
6164 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
6165 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {
6166 MachineFunction &MF = *MBB->getParent();
6167 MachineRegisterInfo *MRI = &MF.getRegInfo();
6168 const SystemZInstrInfo *TII =
6169 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
6170 DebugLoc DL = MI.getDebugLoc();
6172 unsigned SrcReg = MI.getOperand(0).getReg();
6174 // Create new virtual register of the same class as source.
6175 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
6176 unsigned DstReg = MRI->createVirtualRegister(RC);
6178 // Replace pseudo with a normal load-and-test that models the def as
6180 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
6182 MI.eraseFromParent();
6187 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
6188 MachineInstr &MI, MachineBasicBlock *MBB) const {
6189 switch (MI.getOpcode()) {
6190 case SystemZ::Select32Mux:
6191 return emitSelect(MI, MBB,
6192 Subtarget.hasLoadStoreOnCond2()? SystemZ::LOCRMux : 0);
6193 case SystemZ::Select32:
6194 return emitSelect(MI, MBB, SystemZ::LOCR);
6195 case SystemZ::Select64:
6196 return emitSelect(MI, MBB, SystemZ::LOCGR);
6197 case SystemZ::SelectF32:
6198 case SystemZ::SelectF64:
6199 case SystemZ::SelectF128:
6200 case SystemZ::SelectVR128:
6201 return emitSelect(MI, MBB, 0);
6203 case SystemZ::CondStore8Mux:
6204 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
6205 case SystemZ::CondStore8MuxInv:
6206 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
6207 case SystemZ::CondStore16Mux:
6208 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
6209 case SystemZ::CondStore16MuxInv:
6210 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
6211 case SystemZ::CondStore32Mux:
6212 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false);
6213 case SystemZ::CondStore32MuxInv:
6214 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true);
6215 case SystemZ::CondStore8:
6216 return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
6217 case SystemZ::CondStore8Inv:
6218 return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
6219 case SystemZ::CondStore16:
6220 return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
6221 case SystemZ::CondStore16Inv:
6222 return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
6223 case SystemZ::CondStore32:
6224 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
6225 case SystemZ::CondStore32Inv:
6226 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
6227 case SystemZ::CondStore64:
6228 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
6229 case SystemZ::CondStore64Inv:
6230 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
6231 case SystemZ::CondStoreF32:
6232 return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
6233 case SystemZ::CondStoreF32Inv:
6234 return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
6235 case SystemZ::CondStoreF64:
6236 return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
6237 case SystemZ::CondStoreF64Inv:
6238 return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
6240 case SystemZ::AEXT128:
6241 return emitExt128(MI, MBB, false);
6242 case SystemZ::ZEXT128:
6243 return emitExt128(MI, MBB, true);
6245 case SystemZ::ATOMIC_SWAPW:
6246 return emitAtomicLoadBinary(MI, MBB, 0, 0);
6247 case SystemZ::ATOMIC_SWAP_32:
6248 return emitAtomicLoadBinary(MI, MBB, 0, 32);
6249 case SystemZ::ATOMIC_SWAP_64:
6250 return emitAtomicLoadBinary(MI, MBB, 0, 64);
6252 case SystemZ::ATOMIC_LOADW_AR:
6253 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
6254 case SystemZ::ATOMIC_LOADW_AFI:
6255 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
6256 case SystemZ::ATOMIC_LOAD_AR:
6257 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
6258 case SystemZ::ATOMIC_LOAD_AHI:
6259 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
6260 case SystemZ::ATOMIC_LOAD_AFI:
6261 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
6262 case SystemZ::ATOMIC_LOAD_AGR:
6263 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
6264 case SystemZ::ATOMIC_LOAD_AGHI:
6265 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
6266 case SystemZ::ATOMIC_LOAD_AGFI:
6267 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
6269 case SystemZ::ATOMIC_LOADW_SR:
6270 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
6271 case SystemZ::ATOMIC_LOAD_SR:
6272 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
6273 case SystemZ::ATOMIC_LOAD_SGR:
6274 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
6276 case SystemZ::ATOMIC_LOADW_NR:
6277 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
6278 case SystemZ::ATOMIC_LOADW_NILH:
6279 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
6280 case SystemZ::ATOMIC_LOAD_NR:
6281 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
6282 case SystemZ::ATOMIC_LOAD_NILL:
6283 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
6284 case SystemZ::ATOMIC_LOAD_NILH:
6285 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
6286 case SystemZ::ATOMIC_LOAD_NILF:
6287 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
6288 case SystemZ::ATOMIC_LOAD_NGR:
6289 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
6290 case SystemZ::ATOMIC_LOAD_NILL64:
6291 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
6292 case SystemZ::ATOMIC_LOAD_NILH64:
6293 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
6294 case SystemZ::ATOMIC_LOAD_NIHL64:
6295 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
6296 case SystemZ::ATOMIC_LOAD_NIHH64:
6297 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
6298 case SystemZ::ATOMIC_LOAD_NILF64:
6299 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
6300 case SystemZ::ATOMIC_LOAD_NIHF64:
6301 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
6303 case SystemZ::ATOMIC_LOADW_OR:
6304 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
6305 case SystemZ::ATOMIC_LOADW_OILH:
6306 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
6307 case SystemZ::ATOMIC_LOAD_OR:
6308 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
6309 case SystemZ::ATOMIC_LOAD_OILL:
6310 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
6311 case SystemZ::ATOMIC_LOAD_OILH:
6312 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
6313 case SystemZ::ATOMIC_LOAD_OILF:
6314 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
6315 case SystemZ::ATOMIC_LOAD_OGR:
6316 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
6317 case SystemZ::ATOMIC_LOAD_OILL64:
6318 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
6319 case SystemZ::ATOMIC_LOAD_OILH64:
6320 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
6321 case SystemZ::ATOMIC_LOAD_OIHL64:
6322 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
6323 case SystemZ::ATOMIC_LOAD_OIHH64:
6324 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
6325 case SystemZ::ATOMIC_LOAD_OILF64:
6326 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
6327 case SystemZ::ATOMIC_LOAD_OIHF64:
6328 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
6330 case SystemZ::ATOMIC_LOADW_XR:
6331 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
6332 case SystemZ::ATOMIC_LOADW_XILF:
6333 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
6334 case SystemZ::ATOMIC_LOAD_XR:
6335 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
6336 case SystemZ::ATOMIC_LOAD_XILF:
6337 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
6338 case SystemZ::ATOMIC_LOAD_XGR:
6339 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
6340 case SystemZ::ATOMIC_LOAD_XILF64:
6341 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
6342 case SystemZ::ATOMIC_LOAD_XIHF64:
6343 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
6345 case SystemZ::ATOMIC_LOADW_NRi:
6346 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
6347 case SystemZ::ATOMIC_LOADW_NILHi:
6348 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
6349 case SystemZ::ATOMIC_LOAD_NRi:
6350 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
6351 case SystemZ::ATOMIC_LOAD_NILLi:
6352 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
6353 case SystemZ::ATOMIC_LOAD_NILHi:
6354 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
6355 case SystemZ::ATOMIC_LOAD_NILFi:
6356 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
6357 case SystemZ::ATOMIC_LOAD_NGRi:
6358 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
6359 case SystemZ::ATOMIC_LOAD_NILL64i:
6360 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
6361 case SystemZ::ATOMIC_LOAD_NILH64i:
6362 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
6363 case SystemZ::ATOMIC_LOAD_NIHL64i:
6364 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
6365 case SystemZ::ATOMIC_LOAD_NIHH64i:
6366 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
6367 case SystemZ::ATOMIC_LOAD_NILF64i:
6368 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
6369 case SystemZ::ATOMIC_LOAD_NIHF64i:
6370 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
6372 case SystemZ::ATOMIC_LOADW_MIN:
6373 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6374 SystemZ::CCMASK_CMP_LE, 0);
6375 case SystemZ::ATOMIC_LOAD_MIN_32:
6376 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6377 SystemZ::CCMASK_CMP_LE, 32);
6378 case SystemZ::ATOMIC_LOAD_MIN_64:
6379 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6380 SystemZ::CCMASK_CMP_LE, 64);
6382 case SystemZ::ATOMIC_LOADW_MAX:
6383 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6384 SystemZ::CCMASK_CMP_GE, 0);
6385 case SystemZ::ATOMIC_LOAD_MAX_32:
6386 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6387 SystemZ::CCMASK_CMP_GE, 32);
6388 case SystemZ::ATOMIC_LOAD_MAX_64:
6389 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6390 SystemZ::CCMASK_CMP_GE, 64);
6392 case SystemZ::ATOMIC_LOADW_UMIN:
6393 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6394 SystemZ::CCMASK_CMP_LE, 0);
6395 case SystemZ::ATOMIC_LOAD_UMIN_32:
6396 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6397 SystemZ::CCMASK_CMP_LE, 32);
6398 case SystemZ::ATOMIC_LOAD_UMIN_64:
6399 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6400 SystemZ::CCMASK_CMP_LE, 64);
6402 case SystemZ::ATOMIC_LOADW_UMAX:
6403 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6404 SystemZ::CCMASK_CMP_GE, 0);
6405 case SystemZ::ATOMIC_LOAD_UMAX_32:
6406 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6407 SystemZ::CCMASK_CMP_GE, 32);
6408 case SystemZ::ATOMIC_LOAD_UMAX_64:
6409 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6410 SystemZ::CCMASK_CMP_GE, 64);
6412 case SystemZ::ATOMIC_CMP_SWAPW:
6413 return emitAtomicCmpSwapW(MI, MBB);
6414 case SystemZ::MVCSequence:
6415 case SystemZ::MVCLoop:
6416 return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
6417 case SystemZ::NCSequence:
6418 case SystemZ::NCLoop:
6419 return emitMemMemWrapper(MI, MBB, SystemZ::NC);
6420 case SystemZ::OCSequence:
6421 case SystemZ::OCLoop:
6422 return emitMemMemWrapper(MI, MBB, SystemZ::OC);
6423 case SystemZ::XCSequence:
6424 case SystemZ::XCLoop:
6425 return emitMemMemWrapper(MI, MBB, SystemZ::XC);
6426 case SystemZ::CLCSequence:
6427 case SystemZ::CLCLoop:
6428 return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
6429 case SystemZ::CLSTLoop:
6430 return emitStringWrapper(MI, MBB, SystemZ::CLST);
6431 case SystemZ::MVSTLoop:
6432 return emitStringWrapper(MI, MBB, SystemZ::MVST);
6433 case SystemZ::SRSTLoop:
6434 return emitStringWrapper(MI, MBB, SystemZ::SRST);
6435 case SystemZ::TBEGIN:
6436 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false);
6437 case SystemZ::TBEGIN_nofloat:
6438 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true);
6439 case SystemZ::TBEGINC:
6440 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true);
6441 case SystemZ::LTEBRCompare_VecPseudo:
6442 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
6443 case SystemZ::LTDBRCompare_VecPseudo:
6444 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
6445 case SystemZ::LTXBRCompare_VecPseudo:
6446 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
6449 llvm_unreachable("Unexpected instr type to insert");
6453 // This is only used by the isel schedulers, and is needed only to prevent
6454 // compiler from crashing when list-ilp is used.
6455 const TargetRegisterClass *
6456 SystemZTargetLowering::getRepRegClassFor(MVT VT) const {
6457 if (VT == MVT::Untyped)
6458 return &SystemZ::ADDR128BitRegClass;
6459 return TargetLowering::getRepRegClassFor(VT);