1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the PPCISelLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSymbolXCOFF.h"
78 #include "llvm/Support/AtomicOrdering.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CodeGen.h"
82 #include "llvm/Support/CommandLine.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/Debug.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/Format.h"
87 #include "llvm/Support/KnownBits.h"
88 #include "llvm/Support/MachineValueType.h"
89 #include "llvm/Support/MathExtras.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetMachine.h"
92 #include "llvm/Target/TargetOptions.h"
101 using namespace llvm;
103 #define DEBUG_TYPE "ppc-lowering"
105 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
106 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
109 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
112 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 static cl::opt<bool> DisableSCO("disable-ppc-sco",
115 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
118 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
121 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 STATISTIC(NumTailCalls, "Number of tail calls");
124 STATISTIC(NumSiblingCalls, "Number of sibling calls");
125 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
126 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
128 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
130 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
132 // FIXME: Remove this once the bug has been fixed!
133 extern cl::opt<bool> ANDIGlueBug;
135 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
136 const PPCSubtarget &STI)
137 : TargetLowering(TM), Subtarget(STI) {
138 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
139 // arguments are at least 4/8 bytes aligned.
140 bool isPPC64 = Subtarget.isPPC64();
141 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
143 // Set up the register classes.
144 addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
145 if (!useSoftFloat()) {
147 addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
148 addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
150 addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
151 addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
155 // Match BITREVERSE to customized fast code sequence in the td file.
156 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
157 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
159 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
160 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
162 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
163 for (MVT VT : MVT::integer_valuetypes()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
168 if (Subtarget.isISA3_0()) {
169 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
170 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
171 setTruncStoreAction(MVT::f64, MVT::f16, Legal);
172 setTruncStoreAction(MVT::f32, MVT::f16, Legal);
174 // No extending loads from f16 or HW conversions back and forth.
175 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
176 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
177 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
178 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
179 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
180 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
181 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
182 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
185 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
187 // PowerPC has pre-inc load and store's.
188 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
189 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
190 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
191 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
192 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
193 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
194 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
195 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
196 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
197 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
198 if (!Subtarget.hasSPE()) {
199 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
200 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
201 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
202 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
205 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
206 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
207 for (MVT VT : ScalarIntVTs) {
208 setOperationAction(ISD::ADDC, VT, Legal);
209 setOperationAction(ISD::ADDE, VT, Legal);
210 setOperationAction(ISD::SUBC, VT, Legal);
211 setOperationAction(ISD::SUBE, VT, Legal);
214 if (Subtarget.useCRBits()) {
215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
217 if (isPPC64 || Subtarget.hasFPCVT()) {
218 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
219 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
220 isPPC64 ? MVT::i64 : MVT::i32);
221 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
222 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
223 isPPC64 ? MVT::i64 : MVT::i32);
225 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
226 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
229 // PowerPC does not support direct load/store of condition registers.
230 setOperationAction(ISD::LOAD, MVT::i1, Custom);
231 setOperationAction(ISD::STORE, MVT::i1, Custom);
233 // FIXME: Remove this once the ANDI glue bug is fixed:
235 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
237 for (MVT VT : MVT::integer_valuetypes()) {
238 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
239 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
240 setTruncStoreAction(VT, MVT::i1, Expand);
243 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
246 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
247 // PPC (the libcall is not available).
248 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
249 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
251 // We do not currently implement these libm ops for PowerPC.
252 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
253 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
254 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
255 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
256 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
257 setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
259 // PowerPC has no SREM/UREM instructions unless we are on P9
260 // On P9 we may use a hardware instruction to compute the remainder.
261 // When the result of both the remainder and the division is required it is
262 // more efficient to compute the remainder from the result of the division
263 // rather than use the remainder instruction. The instructions are legalized
264 // directly because the DivRemPairsPass performs the transformation at the IR
266 if (Subtarget.isISA3_0()) {
267 setOperationAction(ISD::SREM, MVT::i32, Legal);
268 setOperationAction(ISD::UREM, MVT::i32, Legal);
269 setOperationAction(ISD::SREM, MVT::i64, Legal);
270 setOperationAction(ISD::UREM, MVT::i64, Legal);
272 setOperationAction(ISD::SREM, MVT::i32, Expand);
273 setOperationAction(ISD::UREM, MVT::i32, Expand);
274 setOperationAction(ISD::SREM, MVT::i64, Expand);
275 setOperationAction(ISD::UREM, MVT::i64, Expand);
278 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
279 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
280 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
281 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
282 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
283 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
284 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
285 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
286 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
288 // Handle constrained floating-point operations of scalar.
289 // TODO: Handle SPE specific operation.
290 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
291 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
292 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
293 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
294 setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
295 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
297 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
298 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
299 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
300 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
301 setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
302 if (Subtarget.hasVSX())
303 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal);
305 if (Subtarget.hasFSQRT()) {
306 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
307 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
310 if (Subtarget.hasFPRND()) {
311 setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
312 setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal);
313 setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
314 setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
316 setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
317 setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal);
318 setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
319 setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
322 // We don't support sin/cos/sqrt/fmod/pow
323 setOperationAction(ISD::FSIN , MVT::f64, Expand);
324 setOperationAction(ISD::FCOS , MVT::f64, Expand);
325 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
326 setOperationAction(ISD::FREM , MVT::f64, Expand);
327 setOperationAction(ISD::FPOW , MVT::f64, Expand);
328 setOperationAction(ISD::FSIN , MVT::f32, Expand);
329 setOperationAction(ISD::FCOS , MVT::f32, Expand);
330 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
331 setOperationAction(ISD::FREM , MVT::f32, Expand);
332 setOperationAction(ISD::FPOW , MVT::f32, Expand);
333 if (Subtarget.hasSPE()) {
334 setOperationAction(ISD::FMA , MVT::f64, Expand);
335 setOperationAction(ISD::FMA , MVT::f32, Expand);
337 setOperationAction(ISD::FMA , MVT::f64, Legal);
338 setOperationAction(ISD::FMA , MVT::f32, Legal);
341 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
343 // If we're enabling GP optimizations, use hardware square root
344 if (!Subtarget.hasFSQRT() &&
345 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
347 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
349 if (!Subtarget.hasFSQRT() &&
350 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
351 Subtarget.hasFRES()))
352 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
354 if (Subtarget.hasFCPSGN()) {
355 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
356 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
358 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
359 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
362 if (Subtarget.hasFPRND()) {
363 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
364 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
365 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
366 setOperationAction(ISD::FROUND, MVT::f64, Legal);
368 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
369 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
370 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
371 setOperationAction(ISD::FROUND, MVT::f32, Legal);
374 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
375 // to speed up scalar BSWAP64.
376 // CTPOP or CTTZ were introduced in P8/P9 respectively
377 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
378 if (Subtarget.hasP9Vector())
379 setOperationAction(ISD::BSWAP, MVT::i64 , Custom);
381 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
382 if (Subtarget.isISA3_0()) {
383 setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
384 setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
386 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
387 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
390 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
391 setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
392 setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
394 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
395 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
398 // PowerPC does not have ROTR
399 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
400 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
402 if (!Subtarget.useCRBits()) {
403 // PowerPC does not have Select
404 setOperationAction(ISD::SELECT, MVT::i32, Expand);
405 setOperationAction(ISD::SELECT, MVT::i64, Expand);
406 setOperationAction(ISD::SELECT, MVT::f32, Expand);
407 setOperationAction(ISD::SELECT, MVT::f64, Expand);
410 // PowerPC wants to turn select_cc of FP into fsel when possible.
411 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
412 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
414 // PowerPC wants to optimize integer setcc a bit
415 if (!Subtarget.useCRBits())
416 setOperationAction(ISD::SETCC, MVT::i32, Custom);
418 // PowerPC does not have BRCOND which requires SetCC
419 if (!Subtarget.useCRBits())
420 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
422 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
424 if (Subtarget.hasSPE()) {
425 // SPE has built-in conversions
426 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
427 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
428 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
429 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
430 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
431 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
433 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
434 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
436 // PowerPC does not have [U|S]INT_TO_FP
437 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
438 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
441 if (Subtarget.hasDirectMove() && isPPC64) {
442 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
443 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
444 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
445 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
446 if (TM.Options.UnsafeFPMath) {
447 setOperationAction(ISD::LRINT, MVT::f64, Legal);
448 setOperationAction(ISD::LRINT, MVT::f32, Legal);
449 setOperationAction(ISD::LLRINT, MVT::f64, Legal);
450 setOperationAction(ISD::LLRINT, MVT::f32, Legal);
451 setOperationAction(ISD::LROUND, MVT::f64, Legal);
452 setOperationAction(ISD::LROUND, MVT::f32, Legal);
453 setOperationAction(ISD::LLROUND, MVT::f64, Legal);
454 setOperationAction(ISD::LLROUND, MVT::f32, Legal);
457 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
458 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
459 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
460 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
463 // We cannot sextinreg(i1). Expand to shifts.
464 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
466 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
467 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
468 // support continuation, user-level threading, and etc.. As a result, no
469 // other SjLj exception interfaces are implemented and please don't build
470 // your own exception handling based on them.
471 // LLVM/Clang supports zero-cost DWARF exception handling.
472 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
473 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
475 // We want to legalize GlobalAddress and ConstantPool nodes into the
476 // appropriate instructions to materialize the address.
477 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
478 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
479 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
480 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
481 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
482 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
483 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
484 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
485 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
486 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
489 setOperationAction(ISD::TRAP, MVT::Other, Legal);
491 // TRAMPOLINE is custom lowered.
492 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
493 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
495 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
496 setOperationAction(ISD::VASTART , MVT::Other, Custom);
498 if (Subtarget.is64BitELFABI()) {
499 // VAARG always uses double-word chunks, so promote anything smaller.
500 setOperationAction(ISD::VAARG, MVT::i1, Promote);
501 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
502 setOperationAction(ISD::VAARG, MVT::i8, Promote);
503 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
504 setOperationAction(ISD::VAARG, MVT::i16, Promote);
505 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
506 setOperationAction(ISD::VAARG, MVT::i32, Promote);
507 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
508 setOperationAction(ISD::VAARG, MVT::Other, Expand);
509 } else if (Subtarget.is32BitELFABI()) {
510 // VAARG is custom lowered with the 32-bit SVR4 ABI.
511 setOperationAction(ISD::VAARG, MVT::Other, Custom);
512 setOperationAction(ISD::VAARG, MVT::i64, Custom);
514 setOperationAction(ISD::VAARG, MVT::Other, Expand);
516 // VACOPY is custom lowered with the 32-bit SVR4 ABI.
517 if (Subtarget.is32BitELFABI())
518 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
520 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
522 // Use the default implementation.
523 setOperationAction(ISD::VAEND , MVT::Other, Expand);
524 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
525 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
526 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
527 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
528 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
529 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
530 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
531 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
533 // We want to custom lower some of our intrinsics.
534 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
536 // To handle counter-based loop conditions.
537 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
539 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
540 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
541 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
542 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
544 // Comparisons that require checking two conditions.
545 if (Subtarget.hasSPE()) {
546 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
547 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
548 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
549 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
551 setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
552 setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
553 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
554 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
555 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
556 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
557 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
558 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
559 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
560 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
561 setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
562 setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
564 if (Subtarget.has64BitSupport()) {
565 // They also have instructions for converting between i64 and fp.
566 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
567 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
568 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
569 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
570 // This is just the low 32 bits of a (signed) fp->i64 conversion.
571 // We cannot do this with Promote because i64 is not a legal type.
572 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
574 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
575 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
577 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
578 if (Subtarget.hasSPE()) {
579 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
580 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
582 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
585 // With the instructions enabled under FPCVT, we can do everything.
586 if (Subtarget.hasFPCVT()) {
587 if (Subtarget.has64BitSupport()) {
588 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
589 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
590 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
591 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
594 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
595 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
596 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
597 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
600 if (Subtarget.use64BitRegs()) {
601 // 64-bit PowerPC implementations can support i64 types directly
602 addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
603 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
604 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
605 // 64-bit PowerPC wants to expand i128 shifts itself.
606 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
607 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
608 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
610 // 32-bit PowerPC wants to expand i64 shifts itself.
611 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
612 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
613 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
616 if (Subtarget.hasVSX()) {
617 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
618 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
619 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
620 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
623 if (Subtarget.hasAltivec()) {
624 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
625 setOperationAction(ISD::SADDSAT, VT, Legal);
626 setOperationAction(ISD::SSUBSAT, VT, Legal);
627 setOperationAction(ISD::UADDSAT, VT, Legal);
628 setOperationAction(ISD::USUBSAT, VT, Legal);
630 // First set operation action for all vector types to expand. Then we
631 // will selectively turn on ones that can be effectively codegen'd.
632 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
633 // add/sub are legal for all supported vector VT's.
634 setOperationAction(ISD::ADD, VT, Legal);
635 setOperationAction(ISD::SUB, VT, Legal);
637 // For v2i64, these are only valid with P8Vector. This is corrected after
639 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
640 setOperationAction(ISD::SMAX, VT, Legal);
641 setOperationAction(ISD::SMIN, VT, Legal);
642 setOperationAction(ISD::UMAX, VT, Legal);
643 setOperationAction(ISD::UMIN, VT, Legal);
646 setOperationAction(ISD::SMAX, VT, Expand);
647 setOperationAction(ISD::SMIN, VT, Expand);
648 setOperationAction(ISD::UMAX, VT, Expand);
649 setOperationAction(ISD::UMIN, VT, Expand);
652 if (Subtarget.hasVSX()) {
653 setOperationAction(ISD::FMAXNUM, VT, Legal);
654 setOperationAction(ISD::FMINNUM, VT, Legal);
657 // Vector instructions introduced in P8
658 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
659 setOperationAction(ISD::CTPOP, VT, Legal);
660 setOperationAction(ISD::CTLZ, VT, Legal);
663 setOperationAction(ISD::CTPOP, VT, Expand);
664 setOperationAction(ISD::CTLZ, VT, Expand);
667 // Vector instructions introduced in P9
668 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
669 setOperationAction(ISD::CTTZ, VT, Legal);
671 setOperationAction(ISD::CTTZ, VT, Expand);
673 // We promote all shuffles to v16i8.
674 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
675 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
677 // We promote all non-typed operations to v4i32.
678 setOperationAction(ISD::AND , VT, Promote);
679 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
680 setOperationAction(ISD::OR , VT, Promote);
681 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
682 setOperationAction(ISD::XOR , VT, Promote);
683 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
684 setOperationAction(ISD::LOAD , VT, Promote);
685 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
686 setOperationAction(ISD::SELECT, VT, Promote);
687 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
688 setOperationAction(ISD::VSELECT, VT, Legal);
689 setOperationAction(ISD::SELECT_CC, VT, Promote);
690 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
691 setOperationAction(ISD::STORE, VT, Promote);
692 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
694 // No other operations are legal.
695 setOperationAction(ISD::MUL , VT, Expand);
696 setOperationAction(ISD::SDIV, VT, Expand);
697 setOperationAction(ISD::SREM, VT, Expand);
698 setOperationAction(ISD::UDIV, VT, Expand);
699 setOperationAction(ISD::UREM, VT, Expand);
700 setOperationAction(ISD::FDIV, VT, Expand);
701 setOperationAction(ISD::FREM, VT, Expand);
702 setOperationAction(ISD::FNEG, VT, Expand);
703 setOperationAction(ISD::FSQRT, VT, Expand);
704 setOperationAction(ISD::FLOG, VT, Expand);
705 setOperationAction(ISD::FLOG10, VT, Expand);
706 setOperationAction(ISD::FLOG2, VT, Expand);
707 setOperationAction(ISD::FEXP, VT, Expand);
708 setOperationAction(ISD::FEXP2, VT, Expand);
709 setOperationAction(ISD::FSIN, VT, Expand);
710 setOperationAction(ISD::FCOS, VT, Expand);
711 setOperationAction(ISD::FABS, VT, Expand);
712 setOperationAction(ISD::FFLOOR, VT, Expand);
713 setOperationAction(ISD::FCEIL, VT, Expand);
714 setOperationAction(ISD::FTRUNC, VT, Expand);
715 setOperationAction(ISD::FRINT, VT, Expand);
716 setOperationAction(ISD::FNEARBYINT, VT, Expand);
717 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
718 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
719 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
720 setOperationAction(ISD::MULHU, VT, Expand);
721 setOperationAction(ISD::MULHS, VT, Expand);
722 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
723 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
724 setOperationAction(ISD::UDIVREM, VT, Expand);
725 setOperationAction(ISD::SDIVREM, VT, Expand);
726 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
727 setOperationAction(ISD::FPOW, VT, Expand);
728 setOperationAction(ISD::BSWAP, VT, Expand);
729 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
730 setOperationAction(ISD::ROTL, VT, Expand);
731 setOperationAction(ISD::ROTR, VT, Expand);
733 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
734 setTruncStoreAction(VT, InnerVT, Expand);
735 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
736 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
737 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
740 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
741 if (!Subtarget.hasP8Vector()) {
742 setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
743 setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
744 setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
745 setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
748 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
749 setOperationAction(ISD::ABS, VT, Custom);
751 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
752 // with merges, splats, etc.
753 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
755 // Vector truncates to sub-word integer that fit in an Altivec/VSX register
756 // are cheap, so handle them before they get expanded to scalar.
757 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
758 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
759 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
760 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
761 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
763 setOperationAction(ISD::AND , MVT::v4i32, Legal);
764 setOperationAction(ISD::OR , MVT::v4i32, Legal);
765 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
766 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
767 setOperationAction(ISD::SELECT, MVT::v4i32,
768 Subtarget.useCRBits() ? Legal : Expand);
769 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
770 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
771 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
772 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
773 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
774 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
775 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
776 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
777 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
779 // Without hasP8Altivec set, v2i64 SMAX isn't available.
780 // But ABS custom lowering requires SMAX support.
781 if (!Subtarget.hasP8Altivec())
782 setOperationAction(ISD::ABS, MVT::v2i64, Expand);
784 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
785 setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
786 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
787 if (Subtarget.hasAltivec())
788 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
789 setOperationAction(ISD::ROTL, VT, Legal);
790 // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
791 if (Subtarget.hasP8Altivec())
792 setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
794 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
795 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
796 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
797 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
799 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
800 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
802 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
803 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
804 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
807 if (Subtarget.hasP8Altivec())
808 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
810 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
812 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
813 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
815 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
816 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
818 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
819 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
820 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
821 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
823 // Altivec does not contain unordered floating-point compare instructions
824 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
825 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
826 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
827 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
829 if (Subtarget.hasVSX()) {
830 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
831 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
832 if (Subtarget.hasP8Vector()) {
833 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
834 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
836 if (Subtarget.hasDirectMove() && isPPC64) {
837 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
838 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
839 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
840 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
841 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
842 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
843 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
844 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
846 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
848 // The nearbyint variants are not allowed to raise the inexact exception
849 // so we can only code-gen them with unsafe math.
850 if (TM.Options.UnsafeFPMath) {
851 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
852 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
855 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
856 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
857 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
858 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
859 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
860 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
861 setOperationAction(ISD::FROUND, MVT::f64, Legal);
862 setOperationAction(ISD::FRINT, MVT::f64, Legal);
864 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
865 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
866 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
867 setOperationAction(ISD::FROUND, MVT::f32, Legal);
868 setOperationAction(ISD::FRINT, MVT::f32, Legal);
870 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
871 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
873 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
874 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
876 // Share the Altivec comparison restrictions.
877 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
878 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
879 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
880 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
882 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
883 setOperationAction(ISD::STORE, MVT::v2f64, Legal);
885 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
887 if (Subtarget.hasP8Vector())
888 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
890 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
892 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
893 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
894 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
896 if (Subtarget.hasP8Altivec()) {
897 setOperationAction(ISD::SHL, MVT::v2i64, Legal);
898 setOperationAction(ISD::SRA, MVT::v2i64, Legal);
899 setOperationAction(ISD::SRL, MVT::v2i64, Legal);
901 // 128 bit shifts can be accomplished via 3 instructions for SHL and
902 // SRL, but not for SRA because of the instructions available:
903 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
905 setOperationAction(ISD::SHL, MVT::v1i128, Expand);
906 setOperationAction(ISD::SRL, MVT::v1i128, Expand);
907 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
909 setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
912 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
913 setOperationAction(ISD::SRA, MVT::v2i64, Expand);
914 setOperationAction(ISD::SRL, MVT::v2i64, Expand);
916 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
918 // VSX v2i64 only supports non-arithmetic operations.
919 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
920 setOperationAction(ISD::SUB, MVT::v2i64, Expand);
923 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
924 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
925 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
926 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
930 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
932 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
933 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
935 // Custom handling for partial vectors of integers converted to
936 // floating point. We already have optimal handling for v2i32 through
937 // the DAG combine, so those aren't necessary.
938 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
939 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
940 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
941 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
942 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
943 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
944 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
945 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
947 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
948 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
949 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
950 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
951 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
952 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
954 if (Subtarget.hasDirectMove())
955 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
956 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
958 // Handle constrained floating-point operations of vector.
959 // The predictor is `hasVSX` because altivec instruction has
960 // no exception but VSX vector instruction has.
961 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
962 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
963 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
964 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
965 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
966 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
967 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
968 setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
969 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
970 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
971 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
972 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
973 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
975 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
976 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
977 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
978 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
979 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
980 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
981 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
982 setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
983 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
984 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
985 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
986 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
987 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
989 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
992 if (Subtarget.hasP8Altivec()) {
993 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
994 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
997 if (Subtarget.hasP9Vector()) {
998 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
999 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1001 // 128 bit shifts can be accomplished via 3 instructions for SHL and
1002 // SRL, but not for SRA because of the instructions available:
1003 // VS{RL} and VS{RL}O.
1004 setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1005 setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1006 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1008 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1009 setOperationAction(ISD::FADD, MVT::f128, Legal);
1010 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1011 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1012 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1013 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1014 // No extending loads to f128 on PPC.
1015 for (MVT FPT : MVT::fp_valuetypes())
1016 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1017 setOperationAction(ISD::FMA, MVT::f128, Legal);
1018 setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1019 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1020 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1021 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1022 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1023 setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1025 setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1026 setOperationAction(ISD::FRINT, MVT::f128, Legal);
1027 setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1028 setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1029 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1030 setOperationAction(ISD::FROUND, MVT::f128, Legal);
1032 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1033 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1034 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1035 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1036 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1037 setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1038 // No implementation for these ops for PowerPC.
1039 setOperationAction(ISD::FSIN, MVT::f128, Expand);
1040 setOperationAction(ISD::FCOS, MVT::f128, Expand);
1041 setOperationAction(ISD::FPOW, MVT::f128, Expand);
1042 setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1043 setOperationAction(ISD::FREM, MVT::f128, Expand);
1045 // Handle constrained floating-point operations of fp128
1046 setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1047 setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1048 setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1049 setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1050 setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1051 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1052 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1053 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1054 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1055 setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1056 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1057 setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1058 setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1059 setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1060 setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1061 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1062 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1063 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1064 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1065 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1068 if (Subtarget.hasP9Altivec()) {
1069 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1070 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1072 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
1073 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1074 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1075 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1076 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1077 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1078 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1082 if (Subtarget.hasQPX()) {
1083 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1084 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1085 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1086 setOperationAction(ISD::FREM, MVT::v4f64, Expand);
1088 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
1089 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
1091 setOperationAction(ISD::LOAD , MVT::v4f64, Custom);
1092 setOperationAction(ISD::STORE , MVT::v4f64, Custom);
1094 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
1095 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
1097 if (!Subtarget.useCRBits())
1098 setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
1099 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
1101 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
1102 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
1103 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
1104 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
1105 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
1106 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
1107 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
1109 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
1110 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
1112 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1113 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1115 setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1116 setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1117 setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1118 setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1119 setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1120 setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1121 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1122 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1123 setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1124 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1126 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1127 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1129 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1130 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1132 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1134 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1135 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1136 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1137 setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1139 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1140 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1142 setOperationAction(ISD::LOAD , MVT::v4f32, Custom);
1143 setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1145 if (!Subtarget.useCRBits())
1146 setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1147 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1149 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1150 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1151 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1152 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1153 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1154 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1155 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1157 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1158 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1160 setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1161 setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1162 setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1163 setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1164 setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1165 setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1166 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1167 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1168 setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1169 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1171 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1172 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1174 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1175 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1177 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1179 setOperationAction(ISD::AND , MVT::v4i1, Legal);
1180 setOperationAction(ISD::OR , MVT::v4i1, Legal);
1181 setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1183 if (!Subtarget.useCRBits())
1184 setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1185 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1187 setOperationAction(ISD::LOAD , MVT::v4i1, Custom);
1188 setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1190 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1191 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1192 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1193 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1194 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1195 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1196 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1198 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1199 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1201 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1203 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1209 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1210 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1211 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1213 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1214 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1216 // These need to set FE_INEXACT, and so cannot be vectorized here.
1217 setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1218 setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1220 if (TM.Options.UnsafeFPMath) {
1221 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1222 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1224 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1225 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1227 setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1228 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1230 setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1231 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1234 // TODO: Handle constrained floating-point operations of v4f64
1237 if (Subtarget.has64BitSupport())
1238 setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1240 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1243 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
1244 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1247 setBooleanContents(ZeroOrOneBooleanContent);
1249 if (Subtarget.hasAltivec()) {
1250 // Altivec instructions set fields to all zeros or all ones.
1251 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1255 // These libcalls are not available in 32-bit.
1256 setLibcallName(RTLIB::SHL_I128, nullptr);
1257 setLibcallName(RTLIB::SRL_I128, nullptr);
1258 setLibcallName(RTLIB::SRA_I128, nullptr);
1261 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1263 // We have target-specific dag combine patterns for the following nodes:
1264 setTargetDAGCombine(ISD::ADD);
1265 setTargetDAGCombine(ISD::SHL);
1266 setTargetDAGCombine(ISD::SRA);
1267 setTargetDAGCombine(ISD::SRL);
1268 setTargetDAGCombine(ISD::MUL);
1269 setTargetDAGCombine(ISD::FMA);
1270 setTargetDAGCombine(ISD::SINT_TO_FP);
1271 setTargetDAGCombine(ISD::BUILD_VECTOR);
1272 if (Subtarget.hasFPCVT())
1273 setTargetDAGCombine(ISD::UINT_TO_FP);
1274 setTargetDAGCombine(ISD::LOAD);
1275 setTargetDAGCombine(ISD::STORE);
1276 setTargetDAGCombine(ISD::BR_CC);
1277 if (Subtarget.useCRBits())
1278 setTargetDAGCombine(ISD::BRCOND);
1279 setTargetDAGCombine(ISD::BSWAP);
1280 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1281 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1282 setTargetDAGCombine(ISD::INTRINSIC_VOID);
1284 setTargetDAGCombine(ISD::SIGN_EXTEND);
1285 setTargetDAGCombine(ISD::ZERO_EXTEND);
1286 setTargetDAGCombine(ISD::ANY_EXTEND);
1288 setTargetDAGCombine(ISD::TRUNCATE);
1289 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1292 if (Subtarget.useCRBits()) {
1293 setTargetDAGCombine(ISD::TRUNCATE);
1294 setTargetDAGCombine(ISD::SETCC);
1295 setTargetDAGCombine(ISD::SELECT_CC);
1298 // Use reciprocal estimates.
1299 if (TM.Options.UnsafeFPMath) {
1300 setTargetDAGCombine(ISD::FDIV);
1301 setTargetDAGCombine(ISD::FSQRT);
1304 if (Subtarget.hasP9Altivec()) {
1305 setTargetDAGCombine(ISD::ABS);
1306 setTargetDAGCombine(ISD::VSELECT);
1309 setLibcallName(RTLIB::LOG_F128, "logf128");
1310 setLibcallName(RTLIB::LOG2_F128, "log2f128");
1311 setLibcallName(RTLIB::LOG10_F128, "log10f128");
1312 setLibcallName(RTLIB::EXP_F128, "expf128");
1313 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1314 setLibcallName(RTLIB::SIN_F128, "sinf128");
1315 setLibcallName(RTLIB::COS_F128, "cosf128");
1316 setLibcallName(RTLIB::POW_F128, "powf128");
1317 setLibcallName(RTLIB::FMIN_F128, "fminf128");
1318 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1319 setLibcallName(RTLIB::POWI_F128, "__powikf2");
1320 setLibcallName(RTLIB::REM_F128, "fmodf128");
1322 // With 32 condition bits, we don't need to sink (and duplicate) compares
1323 // aggressively in CodeGenPrep.
1324 if (Subtarget.useCRBits()) {
1325 setHasMultipleConditionRegisters();
1326 setJumpIsExpensive();
1329 setMinFunctionAlignment(Align(4));
1331 switch (Subtarget.getCPUDirective()) {
1336 case PPC::DIR_E500mc:
1337 case PPC::DIR_E5500:
1340 case PPC::DIR_PWR5X:
1342 case PPC::DIR_PWR6X:
1346 case PPC::DIR_PWR10:
1347 case PPC::DIR_PWR_FUTURE:
1348 setPrefLoopAlignment(Align(16));
1349 setPrefFunctionAlignment(Align(16));
1353 if (Subtarget.enableMachineScheduler())
1354 setSchedulingPreference(Sched::Source);
1356 setSchedulingPreference(Sched::Hybrid);
1358 computeRegisterProperties(STI.getRegisterInfo());
1360 // The Freescale cores do better with aggressive inlining of memcpy and
1361 // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1362 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1363 Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1364 MaxStoresPerMemset = 32;
1365 MaxStoresPerMemsetOptSize = 16;
1366 MaxStoresPerMemcpy = 32;
1367 MaxStoresPerMemcpyOptSize = 8;
1368 MaxStoresPerMemmove = 32;
1369 MaxStoresPerMemmoveOptSize = 8;
1370 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1371 // The A2 also benefits from (very) aggressive inlining of memcpy and
1372 // friends. The overhead of a the function call, even when warm, can be
1373 // over one hundred cycles.
1374 MaxStoresPerMemset = 128;
1375 MaxStoresPerMemcpy = 128;
1376 MaxStoresPerMemmove = 128;
1377 MaxLoadsPerMemcmp = 128;
1379 MaxLoadsPerMemcmp = 8;
1380 MaxLoadsPerMemcmpOptSize = 4;
1383 // Let the subtarget (CPU) decide if a predictable select is more expensive
1384 // than the corresponding branch. This information is used in CGP to decide
1385 // when to convert selects into branches.
1386 PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1389 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1390 /// the desired ByVal argument alignment.
1391 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1392 if (MaxAlign == MaxMaxAlign)
1394 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1395 if (MaxMaxAlign >= 32 &&
1396 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1397 MaxAlign = Align(32);
1398 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1400 MaxAlign = Align(16);
1401 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1403 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1404 if (EltAlign > MaxAlign)
1405 MaxAlign = EltAlign;
1406 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1407 for (auto *EltTy : STy->elements()) {
1409 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1410 if (EltAlign > MaxAlign)
1411 MaxAlign = EltAlign;
1412 if (MaxAlign == MaxMaxAlign)
1418 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1419 /// function arguments in the caller parameter area.
1420 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1421 const DataLayout &DL) const {
1422 // 16byte and wider vectors are passed on 16byte boundary.
1423 // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1424 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1425 if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1426 getMaxByValAlign(Ty, Alignment, Subtarget.hasQPX() ? Align(32) : Align(16));
1427 return Alignment.value();
1430 bool PPCTargetLowering::useSoftFloat() const {
1431 return Subtarget.useSoftFloat();
1434 bool PPCTargetLowering::hasSPE() const {
1435 return Subtarget.hasSPE();
1438 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1439 return VT.isScalarInteger();
1442 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1443 /// type is cheaper than a multiply followed by a shift.
1444 /// This is true for words and doublewords on 64-bit PowerPC.
1445 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1446 if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1447 isOperationLegal(ISD::MULHU, Type)))
1449 return TargetLowering::isMulhCheaperThanMulShift(Type);
1452 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1453 switch ((PPCISD::NodeType)Opcode) {
1454 case PPCISD::FIRST_NUMBER: break;
1455 case PPCISD::FSEL: return "PPCISD::FSEL";
1456 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP";
1457 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP";
1458 case PPCISD::FCFID: return "PPCISD::FCFID";
1459 case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
1460 case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
1461 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
1462 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
1463 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
1464 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
1465 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
1466 case PPCISD::FP_TO_UINT_IN_VSR:
1467 return "PPCISD::FP_TO_UINT_IN_VSR,";
1468 case PPCISD::FP_TO_SINT_IN_VSR:
1469 return "PPCISD::FP_TO_SINT_IN_VSR";
1470 case PPCISD::FRE: return "PPCISD::FRE";
1471 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
1472 case PPCISD::STFIWX: return "PPCISD::STFIWX";
1473 case PPCISD::VPERM: return "PPCISD::VPERM";
1474 case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
1475 case PPCISD::XXSPLTI_SP_TO_DP:
1476 return "PPCISD::XXSPLTI_SP_TO_DP";
1477 case PPCISD::XXSPLTI32DX:
1478 return "PPCISD::XXSPLTI32DX";
1479 case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
1480 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
1481 case PPCISD::VECSHL: return "PPCISD::VECSHL";
1482 case PPCISD::CMPB: return "PPCISD::CMPB";
1483 case PPCISD::Hi: return "PPCISD::Hi";
1484 case PPCISD::Lo: return "PPCISD::Lo";
1485 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
1486 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1487 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1488 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
1489 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
1490 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA";
1491 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
1492 case PPCISD::SRL: return "PPCISD::SRL";
1493 case PPCISD::SRA: return "PPCISD::SRA";
1494 case PPCISD::SHL: return "PPCISD::SHL";
1495 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
1496 case PPCISD::CALL: return "PPCISD::CALL";
1497 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
1498 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC";
1499 case PPCISD::MTCTR: return "PPCISD::MTCTR";
1500 case PPCISD::BCTRL: return "PPCISD::BCTRL";
1501 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
1502 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
1503 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
1504 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
1505 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1506 case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
1507 case PPCISD::MFVSR: return "PPCISD::MFVSR";
1508 case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
1509 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
1510 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
1511 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
1512 case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1513 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1514 case PPCISD::ANDI_rec_1_EQ_BIT:
1515 return "PPCISD::ANDI_rec_1_EQ_BIT";
1516 case PPCISD::ANDI_rec_1_GT_BIT:
1517 return "PPCISD::ANDI_rec_1_GT_BIT";
1518 case PPCISD::VCMP: return "PPCISD::VCMP";
1519 case PPCISD::VCMPo: return "PPCISD::VCMPo";
1520 case PPCISD::LBRX: return "PPCISD::LBRX";
1521 case PPCISD::STBRX: return "PPCISD::STBRX";
1522 case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
1523 case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
1524 case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
1525 case PPCISD::STXSIX: return "PPCISD::STXSIX";
1526 case PPCISD::VEXTS: return "PPCISD::VEXTS";
1527 case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
1528 case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
1529 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
1530 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
1531 case PPCISD::ST_VSR_SCAL_INT:
1532 return "PPCISD::ST_VSR_SCAL_INT";
1533 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
1534 case PPCISD::BDNZ: return "PPCISD::BDNZ";
1535 case PPCISD::BDZ: return "PPCISD::BDZ";
1536 case PPCISD::MFFS: return "PPCISD::MFFS";
1537 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
1538 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
1539 case PPCISD::CR6SET: return "PPCISD::CR6SET";
1540 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
1541 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
1542 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
1543 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1544 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
1545 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
1546 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
1547 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
1548 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
1549 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1550 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
1551 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
1552 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
1553 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1554 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1555 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
1556 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
1557 case PPCISD::SC: return "PPCISD::SC";
1558 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
1559 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
1560 case PPCISD::RFEBB: return "PPCISD::RFEBB";
1561 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
1562 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
1563 case PPCISD::VABSD: return "PPCISD::VABSD";
1564 case PPCISD::QVFPERM: return "PPCISD::QVFPERM";
1565 case PPCISD::QVGPCI: return "PPCISD::QVGPCI";
1566 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI";
1567 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI";
1568 case PPCISD::QBFLT: return "PPCISD::QBFLT";
1569 case PPCISD::QVLFSb: return "PPCISD::QVLFSb";
1570 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
1571 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
1572 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
1573 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
1574 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
1575 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
1576 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR";
1577 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
1578 case PPCISD::FNMSUB: return "PPCISD::FNMSUB";
1583 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1586 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1588 if (Subtarget.hasQPX())
1589 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1591 return VT.changeVectorElementTypeToInteger();
1594 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1595 assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1599 //===----------------------------------------------------------------------===//
1600 // Node matching predicates, for use by the tblgen matching code.
1601 //===----------------------------------------------------------------------===//
1603 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1604 static bool isFloatingPointZero(SDValue Op) {
1605 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1606 return CFP->getValueAPF().isZero();
1607 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1608 // Maybe this has already been legalized into the constant pool?
1609 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1610 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1611 return CFP->getValueAPF().isZero();
1616 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
1617 /// true if Op is undef or if it matches the specified value.
1618 static bool isConstantOrUndef(int Op, int Val) {
1619 return Op < 0 || Op == Val;
1622 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1623 /// VPKUHUM instruction.
1624 /// The ShuffleKind distinguishes between big-endian operations with
1625 /// two different inputs (0), either-endian operations with two identical
1626 /// inputs (1), and little-endian operations with two different inputs (2).
1627 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1628 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1629 SelectionDAG &DAG) {
1630 bool IsLE = DAG.getDataLayout().isLittleEndian();
1631 if (ShuffleKind == 0) {
1634 for (unsigned i = 0; i != 16; ++i)
1635 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1637 } else if (ShuffleKind == 2) {
1640 for (unsigned i = 0; i != 16; ++i)
1641 if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1643 } else if (ShuffleKind == 1) {
1644 unsigned j = IsLE ? 0 : 1;
1645 for (unsigned i = 0; i != 8; ++i)
1646 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
1647 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
1653 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1654 /// VPKUWUM instruction.
1655 /// The ShuffleKind distinguishes between big-endian operations with
1656 /// two different inputs (0), either-endian operations with two identical
1657 /// inputs (1), and little-endian operations with two different inputs (2).
1658 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1659 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1660 SelectionDAG &DAG) {
1661 bool IsLE = DAG.getDataLayout().isLittleEndian();
1662 if (ShuffleKind == 0) {
1665 for (unsigned i = 0; i != 16; i += 2)
1666 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
1667 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
1669 } else if (ShuffleKind == 2) {
1672 for (unsigned i = 0; i != 16; i += 2)
1673 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1674 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
1676 } else if (ShuffleKind == 1) {
1677 unsigned j = IsLE ? 0 : 2;
1678 for (unsigned i = 0; i != 8; i += 2)
1679 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1680 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1681 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1682 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
1688 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1689 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1690 /// current subtarget.
1692 /// The ShuffleKind distinguishes between big-endian operations with
1693 /// two different inputs (0), either-endian operations with two identical
1694 /// inputs (1), and little-endian operations with two different inputs (2).
1695 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1696 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1697 SelectionDAG &DAG) {
1698 const PPCSubtarget& Subtarget =
1699 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1700 if (!Subtarget.hasP8Vector())
1703 bool IsLE = DAG.getDataLayout().isLittleEndian();
1704 if (ShuffleKind == 0) {
1707 for (unsigned i = 0; i != 16; i += 4)
1708 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
1709 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
1710 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
1711 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
1713 } else if (ShuffleKind == 2) {
1716 for (unsigned i = 0; i != 16; i += 4)
1717 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1718 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
1719 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
1720 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
1722 } else if (ShuffleKind == 1) {
1723 unsigned j = IsLE ? 0 : 4;
1724 for (unsigned i = 0; i != 8; i += 4)
1725 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1726 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1727 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
1728 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
1729 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1730 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
1731 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1732 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1738 /// isVMerge - Common function, used to match vmrg* shuffles.
1740 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1741 unsigned LHSStart, unsigned RHSStart) {
1742 if (N->getValueType(0) != MVT::v16i8)
1744 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1745 "Unsupported merge size!");
1747 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
1748 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
1749 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1750 LHSStart+j+i*UnitSize) ||
1751 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1752 RHSStart+j+i*UnitSize))
1758 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1759 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1760 /// The ShuffleKind distinguishes between big-endian merges with two
1761 /// different inputs (0), either-endian merges with two identical inputs (1),
1762 /// and little-endian merges with two different inputs (2). For the latter,
1763 /// the input operands are swapped (see PPCInstrAltivec.td).
1764 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1765 unsigned ShuffleKind, SelectionDAG &DAG) {
1766 if (DAG.getDataLayout().isLittleEndian()) {
1767 if (ShuffleKind == 1) // unary
1768 return isVMerge(N, UnitSize, 0, 0);
1769 else if (ShuffleKind == 2) // swapped
1770 return isVMerge(N, UnitSize, 0, 16);
1774 if (ShuffleKind == 1) // unary
1775 return isVMerge(N, UnitSize, 8, 8);
1776 else if (ShuffleKind == 0) // normal
1777 return isVMerge(N, UnitSize, 8, 24);
1783 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1784 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1785 /// The ShuffleKind distinguishes between big-endian merges with two
1786 /// different inputs (0), either-endian merges with two identical inputs (1),
1787 /// and little-endian merges with two different inputs (2). For the latter,
1788 /// the input operands are swapped (see PPCInstrAltivec.td).
1789 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1790 unsigned ShuffleKind, SelectionDAG &DAG) {
1791 if (DAG.getDataLayout().isLittleEndian()) {
1792 if (ShuffleKind == 1) // unary
1793 return isVMerge(N, UnitSize, 8, 8);
1794 else if (ShuffleKind == 2) // swapped
1795 return isVMerge(N, UnitSize, 8, 24);
1799 if (ShuffleKind == 1) // unary
1800 return isVMerge(N, UnitSize, 0, 0);
1801 else if (ShuffleKind == 0) // normal
1802 return isVMerge(N, UnitSize, 0, 16);
1809 * Common function used to match vmrgew and vmrgow shuffles
1811 * The indexOffset determines whether to look for even or odd words in
1812 * the shuffle mask. This is based on the of the endianness of the target
1815 * - Use offset of 0 to check for odd elements
1816 * - Use offset of 4 to check for even elements
1818 * - Use offset of 0 to check for even elements
1819 * - Use offset of 4 to check for odd elements
1820 * A detailed description of the vector element ordering for little endian and
1821 * big endian can be found at
1822 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1823 * Targeting your applications - what little endian and big endian IBM XL C/C++
1824 * compiler differences mean to you
1826 * The mask to the shuffle vector instruction specifies the indices of the
1827 * elements from the two input vectors to place in the result. The elements are
1828 * numbered in array-access order, starting with the first vector. These vectors
1829 * are always of type v16i8, thus each vector will contain 16 elements of size
1830 * 8. More info on the shuffle vector can be found in the
1831 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1832 * Language Reference.
1834 * The RHSStartValue indicates whether the same input vectors are used (unary)
1835 * or two different input vectors are used, based on the following:
1836 * - If the instruction uses the same vector for both inputs, the range of the
1837 * indices will be 0 to 15. In this case, the RHSStart value passed should
1839 * - If the instruction has two different vectors then the range of the
1840 * indices will be 0 to 31. In this case, the RHSStart value passed should
1841 * be 16 (indices 0-15 specify elements in the first vector while indices 16
1842 * to 31 specify elements in the second vector).
1844 * \param[in] N The shuffle vector SD Node to analyze
1845 * \param[in] IndexOffset Specifies whether to look for even or odd elements
1846 * \param[in] RHSStartValue Specifies the starting index for the righthand input
1847 * vector to the shuffle_vector instruction
1848 * \return true iff this shuffle vector represents an even or odd word merge
1850 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1851 unsigned RHSStartValue) {
1852 if (N->getValueType(0) != MVT::v16i8)
1855 for (unsigned i = 0; i < 2; ++i)
1856 for (unsigned j = 0; j < 4; ++j)
1857 if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1858 i*RHSStartValue+j+IndexOffset) ||
1859 !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1860 i*RHSStartValue+j+IndexOffset+8))
1866 * Determine if the specified shuffle mask is suitable for the vmrgew or
1867 * vmrgow instructions.
1869 * \param[in] N The shuffle vector SD Node to analyze
1870 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1871 * \param[in] ShuffleKind Identify the type of merge:
1872 * - 0 = big-endian merge with two different inputs;
1873 * - 1 = either-endian merge with two identical inputs;
1874 * - 2 = little-endian merge with two different inputs (inputs are swapped for
1875 * little-endian merges).
1876 * \param[in] DAG The current SelectionDAG
1877 * \return true iff this shuffle mask
1879 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1880 unsigned ShuffleKind, SelectionDAG &DAG) {
1881 if (DAG.getDataLayout().isLittleEndian()) {
1882 unsigned indexOffset = CheckEven ? 4 : 0;
1883 if (ShuffleKind == 1) // Unary
1884 return isVMerge(N, indexOffset, 0);
1885 else if (ShuffleKind == 2) // swapped
1886 return isVMerge(N, indexOffset, 16);
1891 unsigned indexOffset = CheckEven ? 0 : 4;
1892 if (ShuffleKind == 1) // Unary
1893 return isVMerge(N, indexOffset, 0);
1894 else if (ShuffleKind == 0) // Normal
1895 return isVMerge(N, indexOffset, 16);
1902 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1903 /// amount, otherwise return -1.
1904 /// The ShuffleKind distinguishes between big-endian operations with two
1905 /// different inputs (0), either-endian operations with two identical inputs
1906 /// (1), and little-endian operations with two different inputs (2). For the
1907 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1908 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1909 SelectionDAG &DAG) {
1910 if (N->getValueType(0) != MVT::v16i8)
1913 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1915 // Find the first non-undef value in the shuffle mask.
1917 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1920 if (i == 16) return -1; // all undef.
1922 // Otherwise, check to see if the rest of the elements are consecutively
1923 // numbered from this value.
1924 unsigned ShiftAmt = SVOp->getMaskElt(i);
1925 if (ShiftAmt < i) return -1;
1928 bool isLE = DAG.getDataLayout().isLittleEndian();
1930 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1931 // Check the rest of the elements to see if they are consecutive.
1932 for (++i; i != 16; ++i)
1933 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1935 } else if (ShuffleKind == 1) {
1936 // Check the rest of the elements to see if they are consecutive.
1937 for (++i; i != 16; ++i)
1938 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1944 ShiftAmt = 16 - ShiftAmt;
1949 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1950 /// specifies a splat of a single element that is suitable for input to
1951 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1952 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1953 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1954 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1956 // The consecutive indices need to specify an element, not part of two
1957 // different elements. So abandon ship early if this isn't the case.
1958 if (N->getMaskElt(0) % EltSize != 0)
1961 // This is a splat operation if each element of the permute is the same, and
1962 // if the value doesn't reference the second vector.
1963 unsigned ElementBase = N->getMaskElt(0);
1965 // FIXME: Handle UNDEF elements too!
1966 if (ElementBase >= 16)
1969 // Check that the indices are consecutive, in the case of a multi-byte element
1970 // splatted with a v16i8 mask.
1971 for (unsigned i = 1; i != EltSize; ++i)
1972 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1975 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1976 if (N->getMaskElt(i) < 0) continue;
1977 for (unsigned j = 0; j != EltSize; ++j)
1978 if (N->getMaskElt(i+j) != N->getMaskElt(j))
1984 /// Check that the mask is shuffling N byte elements. Within each N byte
1985 /// element of the mask, the indices could be either in increasing or
1986 /// decreasing order as long as they are consecutive.
1987 /// \param[in] N the shuffle vector SD Node to analyze
1988 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1989 /// Word/DoubleWord/QuadWord).
1990 /// \param[in] StepLen the delta indices number among the N byte element, if
1991 /// the mask is in increasing/decreasing order then it is 1/-1.
1992 /// \return true iff the mask is shuffling N byte elements.
1993 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1995 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1996 "Unexpected element width.");
1997 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1999 unsigned NumOfElem = 16 / Width;
2000 unsigned MaskVal[16]; // Width is never greater than 16
2001 for (unsigned i = 0; i < NumOfElem; ++i) {
2002 MaskVal[0] = N->getMaskElt(i * Width);
2003 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2005 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2009 for (unsigned int j = 1; j < Width; ++j) {
2010 MaskVal[j] = N->getMaskElt(i * Width + j);
2011 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2020 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2021 unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2022 if (!isNByteElemShuffleMask(N, 4, 1))
2025 // Now we look at mask elements 0,4,8,12
2026 unsigned M0 = N->getMaskElt(0) / 4;
2027 unsigned M1 = N->getMaskElt(4) / 4;
2028 unsigned M2 = N->getMaskElt(8) / 4;
2029 unsigned M3 = N->getMaskElt(12) / 4;
2030 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2031 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2033 // Below, let H and L be arbitrary elements of the shuffle mask
2034 // where H is in the range [4,7] and L is in the range [0,3].
2035 // H, 1, 2, 3 or L, 5, 6, 7
2036 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2037 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2038 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2039 InsertAtByte = IsLE ? 12 : 0;
2043 // 0, H, 2, 3 or 4, L, 6, 7
2044 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2045 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2046 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2047 InsertAtByte = IsLE ? 8 : 4;
2051 // 0, 1, H, 3 or 4, 5, L, 7
2052 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2053 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2054 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2055 InsertAtByte = IsLE ? 4 : 8;
2059 // 0, 1, 2, H or 4, 5, 6, L
2060 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2061 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2062 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2063 InsertAtByte = IsLE ? 0 : 12;
2068 // If both vector operands for the shuffle are the same vector, the mask will
2069 // contain only elements from the first one and the second one will be undef.
2070 if (N->getOperand(1).isUndef()) {
2073 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2074 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2075 InsertAtByte = IsLE ? 12 : 0;
2078 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2079 InsertAtByte = IsLE ? 8 : 4;
2082 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2083 InsertAtByte = IsLE ? 4 : 8;
2086 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2087 InsertAtByte = IsLE ? 0 : 12;
2095 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2096 bool &Swap, bool IsLE) {
2097 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2098 // Ensure each byte index of the word is consecutive.
2099 if (!isNByteElemShuffleMask(N, 4, 1))
2102 // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2103 unsigned M0 = N->getMaskElt(0) / 4;
2104 unsigned M1 = N->getMaskElt(4) / 4;
2105 unsigned M2 = N->getMaskElt(8) / 4;
2106 unsigned M3 = N->getMaskElt(12) / 4;
2108 // If both vector operands for the shuffle are the same vector, the mask will
2109 // contain only elements from the first one and the second one will be undef.
2110 if (N->getOperand(1).isUndef()) {
2111 assert(M0 < 4 && "Indexing into an undef vector?");
2112 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2115 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2120 // Ensure each word index of the ShuffleVector Mask is consecutive.
2121 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2125 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2126 // Input vectors don't need to be swapped if the leading element
2127 // of the result is one of the 3 left elements of the second vector
2128 // (or if there is no shift to be done at all).
2130 ShiftElts = (8 - M0) % 8;
2131 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2132 // Input vectors need to be swapped if the leading element
2133 // of the result is one of the 3 left elements of the first vector
2134 // (or if we're shifting by 4 - thereby simply swapping the vectors).
2136 ShiftElts = (4 - M0) % 4;
2141 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2142 // Input vectors don't need to be swapped if the leading element
2143 // of the result is one of the 4 elements of the first vector.
2146 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2147 // Input vectors need to be swapped if the leading element
2148 // of the result is one of the 4 elements of the right vector.
2157 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2158 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2160 if (!isNByteElemShuffleMask(N, Width, -1))
2163 for (int i = 0; i < 16; i += Width)
2164 if (N->getMaskElt(i) != i + Width - 1)
2170 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2171 return isXXBRShuffleMaskHelper(N, 2);
2174 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2175 return isXXBRShuffleMaskHelper(N, 4);
2178 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2179 return isXXBRShuffleMaskHelper(N, 8);
2182 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2183 return isXXBRShuffleMaskHelper(N, 16);
2186 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2187 /// if the inputs to the instruction should be swapped and set \p DM to the
2188 /// value for the immediate.
2189 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2190 /// AND element 0 of the result comes from the first input (LE) or second input
2191 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2192 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2194 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2195 bool &Swap, bool IsLE) {
2196 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2198 // Ensure each byte index of the double word is consecutive.
2199 if (!isNByteElemShuffleMask(N, 8, 1))
2202 unsigned M0 = N->getMaskElt(0) / 8;
2203 unsigned M1 = N->getMaskElt(8) / 8;
2204 assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2206 // If both vector operands for the shuffle are the same vector, the mask will
2207 // contain only elements from the first one and the second one will be undef.
2208 if (N->getOperand(1).isUndef()) {
2209 if ((M0 | M1) < 2) {
2210 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2218 if (M0 > 1 && M1 < 2) {
2220 } else if (M0 < 2 && M1 > 1) {
2227 // Note: if control flow comes here that means Swap is already set above
2228 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2231 if (M0 < 2 && M1 > 1) {
2233 } else if (M0 > 1 && M1 < 2) {
2240 // Note: if control flow comes here that means Swap is already set above
2241 DM = (M0 << 1) + (M1 & 1);
2247 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2248 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2249 /// elements are counted from the left of the vector register).
2250 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2251 SelectionDAG &DAG) {
2252 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2253 assert(isSplatShuffleMask(SVOp, EltSize));
2254 if (DAG.getDataLayout().isLittleEndian())
2255 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2257 return SVOp->getMaskElt(0) / EltSize;
2260 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2261 /// by using a vspltis[bhw] instruction of the specified element size, return
2262 /// the constant being splatted. The ByteSize field indicates the number of
2263 /// bytes of each element [124] -> [bhw].
2264 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2265 SDValue OpVal(nullptr, 0);
2267 // If ByteSize of the splat is bigger than the element size of the
2268 // build_vector, then we have a case where we are checking for a splat where
2269 // multiple elements of the buildvector are folded together into a single
2270 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2271 unsigned EltSize = 16/N->getNumOperands();
2272 if (EltSize < ByteSize) {
2273 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
2274 SDValue UniquedVals[4];
2275 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2277 // See if all of the elements in the buildvector agree across.
2278 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2279 if (N->getOperand(i).isUndef()) continue;
2280 // If the element isn't a constant, bail fully out.
2281 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2283 if (!UniquedVals[i&(Multiple-1)].getNode())
2284 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2285 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2286 return SDValue(); // no match.
2289 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2290 // either constant or undef values that are identical for each chunk. See
2291 // if these chunks can form into a larger vspltis*.
2293 // Check to see if all of the leading entries are either 0 or -1. If
2294 // neither, then this won't fit into the immediate field.
2295 bool LeadingZero = true;
2296 bool LeadingOnes = true;
2297 for (unsigned i = 0; i != Multiple-1; ++i) {
2298 if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
2300 LeadingZero &= isNullConstant(UniquedVals[i]);
2301 LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2303 // Finally, check the least significant entry.
2305 if (!UniquedVals[Multiple-1].getNode())
2306 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
2307 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2308 if (Val < 16) // 0,0,0,4 -> vspltisw(4)
2309 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2312 if (!UniquedVals[Multiple-1].getNode())
2313 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2314 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2315 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
2316 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2322 // Check to see if this buildvec has a single non-undef value in its elements.
2323 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2324 if (N->getOperand(i).isUndef()) continue;
2325 if (!OpVal.getNode())
2326 OpVal = N->getOperand(i);
2327 else if (OpVal != N->getOperand(i))
2331 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
2333 unsigned ValSizeInBytes = EltSize;
2335 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2336 Value = CN->getZExtValue();
2337 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2338 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2339 Value = FloatToBits(CN->getValueAPF().convertToFloat());
2342 // If the splat value is larger than the element value, then we can never do
2343 // this splat. The only case that we could fit the replicated bits into our
2344 // immediate field for would be zero, and we prefer to use vxor for it.
2345 if (ValSizeInBytes < ByteSize) return SDValue();
2347 // If the element value is larger than the splat value, check if it consists
2348 // of a repeated bit pattern of size ByteSize.
2349 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2352 // Properly sign extend the value.
2353 int MaskVal = SignExtend32(Value, ByteSize * 8);
2355 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2356 if (MaskVal == 0) return SDValue();
2358 // Finally, if this value fits in a 5 bit sext field, return it
2359 if (SignExtend32<5>(MaskVal) == MaskVal)
2360 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2364 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2365 /// amount, otherwise return -1.
2366 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2367 EVT VT = N->getValueType(0);
2368 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2371 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2373 // Find the first non-undef value in the shuffle mask.
2375 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2378 if (i == 4) return -1; // all undef.
2380 // Otherwise, check to see if the rest of the elements are consecutively
2381 // numbered from this value.
2382 unsigned ShiftAmt = SVOp->getMaskElt(i);
2383 if (ShiftAmt < i) return -1;
2386 // Check the rest of the elements to see if they are consecutive.
2387 for (++i; i != 4; ++i)
2388 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2394 //===----------------------------------------------------------------------===//
2395 // Addressing Mode Selection
2396 //===----------------------------------------------------------------------===//
2398 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2399 /// or 64-bit immediate, and if the value can be accurately represented as a
2400 /// sign extension from a 16-bit value. If so, this returns true and the
2402 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2403 if (!isa<ConstantSDNode>(N))
2406 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2407 if (N->getValueType(0) == MVT::i32)
2408 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2410 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2412 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2413 return isIntS16Immediate(Op.getNode(), Imm);
2417 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2418 /// be represented as an indexed [r+r] operation.
2419 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2421 SelectionDAG &DAG) const {
2422 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2424 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2425 if (Memop->getMemoryVT() == MVT::f64) {
2426 Base = N.getOperand(0);
2427 Index = N.getOperand(1);
2435 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2436 /// can be represented as an indexed [r+r] operation. Returns false if it
2437 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2438 /// non-zero and N can be represented by a base register plus a signed 16-bit
2439 /// displacement, make a more precise judgement by checking (displacement % \p
2440 /// EncodingAlignment).
2441 bool PPCTargetLowering::SelectAddressRegReg(
2442 SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2443 MaybeAlign EncodingAlignment) const {
2444 // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2446 if (SelectAddressPCRel(N, Base))
2450 if (N.getOpcode() == ISD::ADD) {
2451 // Is there any SPE load/store (f64), which can't handle 16bit offset?
2452 // SPE load/store can only handle 8-bit offsets.
2453 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2455 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2456 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2457 return false; // r+i
2458 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2459 return false; // r+i
2461 Base = N.getOperand(0);
2462 Index = N.getOperand(1);
2464 } else if (N.getOpcode() == ISD::OR) {
2465 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2466 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2467 return false; // r+i can fold it if we can.
2469 // If this is an or of disjoint bitfields, we can codegen this as an add
2470 // (for better address arithmetic) if the LHS and RHS of the OR are provably
2472 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2474 if (LHSKnown.Zero.getBoolValue()) {
2475 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2476 // If all of the bits are known zero on the LHS or RHS, the add won't
2478 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2479 Base = N.getOperand(0);
2480 Index = N.getOperand(1);
2489 // If we happen to be doing an i64 load or store into a stack slot that has
2490 // less than a 4-byte alignment, then the frame-index elimination may need to
2491 // use an indexed load or store instruction (because the offset may not be a
2492 // multiple of 4). The extra register needed to hold the offset comes from the
2493 // register scavenger, and it is possible that the scavenger will need to use
2494 // an emergency spill slot. As a result, we need to make sure that a spill slot
2495 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2497 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2498 // FIXME: This does not handle the LWA case.
2502 // NOTE: We'll exclude negative FIs here, which come from argument
2503 // lowering, because there are no known test cases triggering this problem
2504 // using packed structures (or similar). We can remove this exclusion if
2505 // we find such a test case. The reason why this is so test-case driven is
2506 // because this entire 'fixup' is only to prevent crashes (from the
2507 // register scavenger) on not-really-valid inputs. For example, if we have:
2509 // %b = bitcast i1* %a to i64*
2510 // store i64* a, i64 b
2511 // then the store should really be marked as 'align 1', but is not. If it
2512 // were marked as 'align 1' then the indexed form would have been
2513 // instruction-selected initially, and the problem this 'fixup' is preventing
2514 // won't happen regardless.
2518 MachineFunction &MF = DAG.getMachineFunction();
2519 MachineFrameInfo &MFI = MF.getFrameInfo();
2521 if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2524 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2525 FuncInfo->setHasNonRISpills();
2528 /// Returns true if the address N can be represented by a base register plus
2529 /// a signed 16-bit displacement [r+imm], and if it is not better
2530 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
2531 /// displacements that are multiples of that value.
2532 bool PPCTargetLowering::SelectAddressRegImm(
2533 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2534 MaybeAlign EncodingAlignment) const {
2535 // FIXME dl should come from parent load or store, not from address
2538 // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2540 if (SelectAddressPCRel(N, Base))
2543 // If this can be more profitably realized as r+r, fail.
2544 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2547 if (N.getOpcode() == ISD::ADD) {
2549 if (isIntS16Immediate(N.getOperand(1), imm) &&
2550 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2551 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2552 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2553 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2554 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2556 Base = N.getOperand(0);
2558 return true; // [r+i]
2559 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2560 // Match LOAD (ADD (X, Lo(G))).
2561 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2562 && "Cannot handle constant offsets yet!");
2563 Disp = N.getOperand(1).getOperand(0); // The global address.
2564 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2565 Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2566 Disp.getOpcode() == ISD::TargetConstantPool ||
2567 Disp.getOpcode() == ISD::TargetJumpTable);
2568 Base = N.getOperand(0);
2569 return true; // [&g+r]
2571 } else if (N.getOpcode() == ISD::OR) {
2573 if (isIntS16Immediate(N.getOperand(1), imm) &&
2574 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2575 // If this is an or of disjoint bitfields, we can codegen this as an add
2576 // (for better address arithmetic) if the LHS and RHS of the OR are
2577 // provably disjoint.
2578 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2580 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2581 // If all of the bits are known zero on the LHS or RHS, the add won't
2583 if (FrameIndexSDNode *FI =
2584 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2585 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2586 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2588 Base = N.getOperand(0);
2590 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2594 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2595 // Loading from a constant address.
2597 // If this address fits entirely in a 16-bit sext immediate field, codegen
2600 if (isIntS16Immediate(CN, Imm) &&
2601 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2602 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2603 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2604 CN->getValueType(0));
2608 // Handle 32-bit sext immediates with LIS + addr mode.
2609 if ((CN->getValueType(0) == MVT::i32 ||
2610 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2611 (!EncodingAlignment ||
2612 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2613 int Addr = (int)CN->getZExtValue();
2615 // Otherwise, break this down into an LIS + disp.
2616 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2618 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2620 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2621 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2626 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2627 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2628 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2629 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2632 return true; // [r+0]
2635 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2636 /// represented as an indexed [r+r] operation.
2637 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2639 SelectionDAG &DAG) const {
2640 // Check to see if we can easily represent this as an [r+r] address. This
2641 // will fail if it thinks that the address is more profitably represented as
2642 // reg+imm, e.g. where imm = 0.
2643 if (SelectAddressRegReg(N, Base, Index, DAG))
2646 // If the address is the result of an add, we will utilize the fact that the
2647 // address calculation includes an implicit add. However, we can reduce
2648 // register pressure if we do not materialize a constant just for use as the
2649 // index register. We only get rid of the add if it is not an add of a
2650 // value and a 16-bit signed constant and both have a single use.
2652 if (N.getOpcode() == ISD::ADD &&
2653 (!isIntS16Immediate(N.getOperand(1), imm) ||
2654 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2655 Base = N.getOperand(0);
2656 Index = N.getOperand(1);
2660 // Otherwise, do it the hard way, using R0 as the base register.
2661 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2667 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2668 Ty *PCRelCand = dyn_cast<Ty>(N);
2669 return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2672 /// Returns true if this address is a PC Relative address.
2673 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2674 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2675 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2676 // This is a materialize PC Relative node. Always select this as PC Relative.
2678 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2680 if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2681 isValidPCRelNode<GlobalAddressSDNode>(N) ||
2682 isValidPCRelNode<JumpTableSDNode>(N) ||
2683 isValidPCRelNode<BlockAddressSDNode>(N))
2688 /// Returns true if we should use a direct load into vector instruction
2689 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2690 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2692 // If there are any other uses other than scalar to vector, then we should
2693 // keep it as a scalar load -> direct move pattern to prevent multiple
2695 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2699 EVT MemVT = LD->getMemoryVT();
2700 if (!MemVT.isSimple())
2702 switch(MemVT.getSimpleVT().SimpleTy) {
2706 if (!ST.hasP8Vector())
2711 if (!ST.hasP9Vector())
2718 SDValue LoadedVal(N, 0);
2719 if (!LoadedVal.hasOneUse())
2722 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2724 if (UI.getUse().get().getResNo() == 0 &&
2725 UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2726 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2732 /// getPreIndexedAddressParts - returns true by value, base pointer and
2733 /// offset pointer and addressing mode by reference if the node's address
2734 /// can be legally represented as pre-indexed load / store address.
2735 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2737 ISD::MemIndexedMode &AM,
2738 SelectionDAG &DAG) const {
2739 if (DisablePPCPreinc) return false;
2745 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2746 Ptr = LD->getBasePtr();
2747 VT = LD->getMemoryVT();
2748 Alignment = LD->getAlignment();
2749 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2750 Ptr = ST->getBasePtr();
2751 VT = ST->getMemoryVT();
2752 Alignment = ST->getAlignment();
2757 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2758 // instructions because we can fold these into a more efficient instruction
2759 // instead, (such as LXSD).
2760 if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2764 // PowerPC doesn't have preinc load/store instructions for vectors (except
2765 // for QPX, which does have preinc r+r forms).
2766 if (VT.isVector()) {
2767 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2769 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2775 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2776 // Common code will reject creating a pre-inc form if the base pointer
2777 // is a frame index, or if N is a store and the base pointer is either
2778 // the same as or a predecessor of the value being stored. Check for
2779 // those situations here, and try with swapped Base/Offset instead.
2782 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2785 SDValue Val = cast<StoreSDNode>(N)->getValue();
2786 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2791 std::swap(Base, Offset);
2797 // LDU/STU can only handle immediates that are a multiple of 4.
2798 if (VT != MVT::i64) {
2799 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2802 // LDU/STU need an address with at least 4-byte alignment.
2806 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2810 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2811 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
2812 // sext i32 to i64 when addr mode is r+i.
2813 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2814 LD->getExtensionType() == ISD::SEXTLOAD &&
2815 isa<ConstantSDNode>(Offset))
2823 //===----------------------------------------------------------------------===//
2824 // LowerOperation implementation
2825 //===----------------------------------------------------------------------===//
2827 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2828 /// and LoOpFlags to the target MO flags.
2829 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2830 unsigned &HiOpFlags, unsigned &LoOpFlags,
2831 const GlobalValue *GV = nullptr) {
2832 HiOpFlags = PPCII::MO_HA;
2833 LoOpFlags = PPCII::MO_LO;
2835 // Don't use the pic base if not in PIC relocation model.
2837 HiOpFlags |= PPCII::MO_PIC_FLAG;
2838 LoOpFlags |= PPCII::MO_PIC_FLAG;
2842 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2843 SelectionDAG &DAG) {
2845 EVT PtrVT = HiPart.getValueType();
2846 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2848 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2849 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2851 // With PIC, the first instruction is actually "GR+hi(&G)".
2853 Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2854 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2856 // Generate non-pic code that has direct accesses to the constant pool.
2857 // The address of the global is just (hi(&g)+lo(&g)).
2858 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2861 static void setUsesTOCBasePtr(MachineFunction &MF) {
2862 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2863 FuncInfo->setUsesTOCBasePtr();
2866 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2867 setUsesTOCBasePtr(DAG.getMachineFunction());
2870 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2872 const bool Is64Bit = Subtarget.isPPC64();
2873 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2874 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2875 : Subtarget.isAIXABI()
2876 ? DAG.getRegister(PPC::R2, VT)
2877 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2878 SDValue Ops[] = { GA, Reg };
2879 return DAG.getMemIntrinsicNode(
2880 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2881 MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2882 MachineMemOperand::MOLoad);
2885 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2886 SelectionDAG &DAG) const {
2887 EVT PtrVT = Op.getValueType();
2888 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2889 const Constant *C = CP->getConstVal();
2891 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2892 // The actual address of the GlobalValue is stored in the TOC.
2893 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2894 if (Subtarget.isUsingPCRelativeCalls()) {
2896 EVT Ty = getPointerTy(DAG.getDataLayout());
2897 SDValue ConstPool = DAG.getTargetConstantPool(
2898 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2899 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2901 setUsesTOCBasePtr(DAG);
2902 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2903 return getTOCEntry(DAG, SDLoc(CP), GA);
2906 unsigned MOHiFlag, MOLoFlag;
2907 bool IsPIC = isPositionIndependent();
2908 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2910 if (IsPIC && Subtarget.isSVR4ABI()) {
2912 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2913 return getTOCEntry(DAG, SDLoc(CP), GA);
2917 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2919 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2920 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2923 // For 64-bit PowerPC, prefer the more compact relative encodings.
2924 // This trades 32 bits per jump table entry for one or two instructions
2925 // on the jump site.
2926 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2927 if (isJumpTableRelative())
2928 return MachineJumpTableInfo::EK_LabelDifference32;
2930 return TargetLowering::getJumpTableEncoding();
2933 bool PPCTargetLowering::isJumpTableRelative() const {
2934 if (UseAbsoluteJumpTables)
2936 if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2938 return TargetLowering::isJumpTableRelative();
2941 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2942 SelectionDAG &DAG) const {
2943 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2944 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2946 switch (getTargetMachine().getCodeModel()) {
2947 case CodeModel::Small:
2948 case CodeModel::Medium:
2949 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2951 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2952 getPointerTy(DAG.getDataLayout()));
2957 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2959 MCContext &Ctx) const {
2960 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2961 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2963 switch (getTargetMachine().getCodeModel()) {
2964 case CodeModel::Small:
2965 case CodeModel::Medium:
2966 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2968 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2972 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2973 EVT PtrVT = Op.getValueType();
2974 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2976 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2977 if (Subtarget.isUsingPCRelativeCalls()) {
2979 EVT Ty = getPointerTy(DAG.getDataLayout());
2981 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2982 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2986 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2987 // The actual address of the GlobalValue is stored in the TOC.
2988 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2989 setUsesTOCBasePtr(DAG);
2990 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2991 return getTOCEntry(DAG, SDLoc(JT), GA);
2994 unsigned MOHiFlag, MOLoFlag;
2995 bool IsPIC = isPositionIndependent();
2996 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2998 if (IsPIC && Subtarget.isSVR4ABI()) {
2999 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3000 PPCII::MO_PIC_FLAG);
3001 return getTOCEntry(DAG, SDLoc(GA), GA);
3004 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3005 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3006 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3009 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3010 SelectionDAG &DAG) const {
3011 EVT PtrVT = Op.getValueType();
3012 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3013 const BlockAddress *BA = BASDN->getBlockAddress();
3015 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3016 if (Subtarget.isUsingPCRelativeCalls()) {
3018 EVT Ty = getPointerTy(DAG.getDataLayout());
3019 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3020 PPCII::MO_PCREL_FLAG);
3021 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3025 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3026 // The actual BlockAddress is stored in the TOC.
3027 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3028 setUsesTOCBasePtr(DAG);
3029 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3030 return getTOCEntry(DAG, SDLoc(BASDN), GA);
3033 // 32-bit position-independent ELF stores the BlockAddress in the .got.
3034 if (Subtarget.is32BitELFABI() && isPositionIndependent())
3037 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3039 unsigned MOHiFlag, MOLoFlag;
3040 bool IsPIC = isPositionIndependent();
3041 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3042 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3043 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3044 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3047 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3048 SelectionDAG &DAG) const {
3049 // FIXME: TLS addresses currently use medium model code sequences,
3050 // which is the most useful form. Eventually support for small and
3051 // large models could be added if users need it, at the cost of
3052 // additional complexity.
3053 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3054 if (DAG.getTarget().useEmulatedTLS())
3055 return LowerToTLSEmulatedModel(GA, DAG);
3058 const GlobalValue *GV = GA->getGlobal();
3059 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3060 bool is64bit = Subtarget.isPPC64();
3061 const Module *M = DAG.getMachineFunction().getFunction().getParent();
3062 PICLevel::Level picLevel = M->getPICLevel();
3064 const TargetMachine &TM = getTargetMachine();
3065 TLSModel::Model Model = TM.getTLSModel(GV);
3067 if (Model == TLSModel::LocalExec) {
3068 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3069 PPCII::MO_TPREL_HA);
3070 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3071 PPCII::MO_TPREL_LO);
3072 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3073 : DAG.getRegister(PPC::R2, MVT::i32);
3075 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3076 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3079 if (Model == TLSModel::InitialExec) {
3080 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3081 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3085 setUsesTOCBasePtr(DAG);
3086 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3087 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
3088 PtrVT, GOTReg, TGA);
3090 if (!TM.isPositionIndependent())
3091 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3092 else if (picLevel == PICLevel::SmallPIC)
3093 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3095 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3097 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
3098 PtrVT, TGA, GOTPtr);
3099 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3102 if (Model == TLSModel::GeneralDynamic) {
3103 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3106 setUsesTOCBasePtr(DAG);
3107 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3108 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3111 if (picLevel == PICLevel::SmallPIC)
3112 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3114 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3116 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3120 if (Model == TLSModel::LocalDynamic) {
3121 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3124 setUsesTOCBasePtr(DAG);
3125 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3126 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3129 if (picLevel == PICLevel::SmallPIC)
3130 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3132 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3134 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3135 PtrVT, GOTPtr, TGA, TGA);
3136 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3137 PtrVT, TLSAddr, TGA);
3138 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3141 llvm_unreachable("Unknown TLS model!");
3144 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3145 SelectionDAG &DAG) const {
3146 EVT PtrVT = Op.getValueType();
3147 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3149 const GlobalValue *GV = GSDN->getGlobal();
3151 // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3152 // The actual address of the GlobalValue is stored in the TOC.
3153 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3154 if (Subtarget.isUsingPCRelativeCalls()) {
3155 EVT Ty = getPointerTy(DAG.getDataLayout());
3156 if (isAccessedAsGotIndirect(Op)) {
3157 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3158 PPCII::MO_PCREL_FLAG |
3159 PPCII::MO_GOT_FLAG);
3160 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3161 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3162 MachinePointerInfo());
3165 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3166 PPCII::MO_PCREL_FLAG);
3167 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3170 setUsesTOCBasePtr(DAG);
3171 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3172 return getTOCEntry(DAG, DL, GA);
3175 unsigned MOHiFlag, MOLoFlag;
3176 bool IsPIC = isPositionIndependent();
3177 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3179 if (IsPIC && Subtarget.isSVR4ABI()) {
3180 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3182 PPCII::MO_PIC_FLAG);
3183 return getTOCEntry(DAG, DL, GA);
3187 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3189 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3191 return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3194 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3195 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3198 if (Op.getValueType() == MVT::v2i64) {
3199 // When the operands themselves are v2i64 values, we need to do something
3200 // special because VSX has no underlying comparison operations for these.
3201 if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3202 // Equality can be handled by casting to the legal type for Altivec
3203 // comparisons, everything else needs to be expanded.
3204 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3205 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3206 DAG.getSetCC(dl, MVT::v4i32,
3207 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3208 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3215 // We handle most of these in the usual way.
3219 // If we're comparing for equality to zero, expose the fact that this is
3220 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3221 // fold the new nodes.
3222 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3225 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3226 // Leave comparisons against 0 and -1 alone for now, since they're usually
3227 // optimized. FIXME: revisit this when we can custom lower all setcc
3229 if (C->isAllOnesValue() || C->isNullValue())
3233 // If we have an integer seteq/setne, turn it into a compare against zero
3234 // by xor'ing the rhs with the lhs, which is faster than setting a
3235 // condition register, reading it back out, and masking the correct bit. The
3236 // normal approach here uses sub to do this instead of xor. Using xor exposes
3237 // the result to other bit-twiddling opportunities.
3238 EVT LHSVT = Op.getOperand(0).getValueType();
3239 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3240 EVT VT = Op.getValueType();
3241 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3243 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3248 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3249 SDNode *Node = Op.getNode();
3250 EVT VT = Node->getValueType(0);
3251 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3252 SDValue InChain = Node->getOperand(0);
3253 SDValue VAListPtr = Node->getOperand(1);
3254 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3257 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3260 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3261 VAListPtr, MachinePointerInfo(SV), MVT::i8);
3262 InChain = GprIndex.getValue(1);
3264 if (VT == MVT::i64) {
3265 // Check if GprIndex is even
3266 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3267 DAG.getConstant(1, dl, MVT::i32));
3268 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3269 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3270 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3271 DAG.getConstant(1, dl, MVT::i32));
3272 // Align GprIndex to be even if it isn't
3273 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3277 // fpr index is 1 byte after gpr
3278 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3279 DAG.getConstant(1, dl, MVT::i32));
3282 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3283 FprPtr, MachinePointerInfo(SV), MVT::i8);
3284 InChain = FprIndex.getValue(1);
3286 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3287 DAG.getConstant(8, dl, MVT::i32));
3289 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3290 DAG.getConstant(4, dl, MVT::i32));
3293 SDValue OverflowArea =
3294 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3295 InChain = OverflowArea.getValue(1);
3297 SDValue RegSaveArea =
3298 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3299 InChain = RegSaveArea.getValue(1);
3301 // select overflow_area if index > 8
3302 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3303 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3305 // adjustment constant gpr_index * 4/8
3306 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3307 VT.isInteger() ? GprIndex : FprIndex,
3308 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3311 // OurReg = RegSaveArea + RegConstant
3312 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3315 // Floating types are 32 bytes into RegSaveArea
3316 if (VT.isFloatingPoint())
3317 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3318 DAG.getConstant(32, dl, MVT::i32));
3320 // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3321 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3322 VT.isInteger() ? GprIndex : FprIndex,
3323 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3326 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3327 VT.isInteger() ? VAListPtr : FprPtr,
3328 MachinePointerInfo(SV), MVT::i8);
3330 // determine if we should load from reg_save_area or overflow_area
3331 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3333 // increase overflow_area by 4/8 if gpr/fpr > 8
3334 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3335 DAG.getConstant(VT.isInteger() ? 4 : 8,
3338 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3341 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3342 MachinePointerInfo(), MVT::i32);
3344 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3347 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3348 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3350 // We have to copy the entire va_list struct:
3351 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3352 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3353 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3354 false, true, false, MachinePointerInfo(),
3355 MachinePointerInfo());
3358 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3359 SelectionDAG &DAG) const {
3360 if (Subtarget.isAIXABI())
3361 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3363 return Op.getOperand(0);
3366 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3367 SelectionDAG &DAG) const {
3368 if (Subtarget.isAIXABI())
3369 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3371 SDValue Chain = Op.getOperand(0);
3372 SDValue Trmp = Op.getOperand(1); // trampoline
3373 SDValue FPtr = Op.getOperand(2); // nested function
3374 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3377 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3378 bool isPPC64 = (PtrVT == MVT::i64);
3379 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3381 TargetLowering::ArgListTy Args;
3382 TargetLowering::ArgListEntry Entry;
3384 Entry.Ty = IntPtrTy;
3385 Entry.Node = Trmp; Args.push_back(Entry);
3387 // TrampSize == (isPPC64 ? 48 : 40);
3388 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3389 isPPC64 ? MVT::i64 : MVT::i32);
3390 Args.push_back(Entry);
3392 Entry.Node = FPtr; Args.push_back(Entry);
3393 Entry.Node = Nest; Args.push_back(Entry);
3395 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3396 TargetLowering::CallLoweringInfo CLI(DAG);
3397 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3398 CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3399 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3401 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3402 return CallResult.second;
3405 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3406 MachineFunction &MF = DAG.getMachineFunction();
3407 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3408 EVT PtrVT = getPointerTy(MF.getDataLayout());
3412 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3413 // vastart just stores the address of the VarArgsFrameIndex slot into the
3414 // memory location argument.
3415 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3416 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3417 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3418 MachinePointerInfo(SV));
3421 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3422 // We suppose the given va_list is already allocated.
3425 // char gpr; /* index into the array of 8 GPRs
3426 // * stored in the register save area
3427 // * gpr=0 corresponds to r3,
3428 // * gpr=1 to r4, etc.
3430 // char fpr; /* index into the array of 8 FPRs
3431 // * stored in the register save area
3432 // * fpr=0 corresponds to f1,
3433 // * fpr=1 to f2, etc.
3435 // char *overflow_arg_area;
3436 // /* location on stack that holds
3437 // * the next overflow argument
3439 // char *reg_save_area;
3440 // /* where r3:r10 and f1:f8 (if saved)
3445 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3446 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3447 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3449 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3452 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3453 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3455 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3456 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3458 uint64_t FPROffset = 1;
3459 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3461 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3463 // Store first byte : number of int regs
3464 SDValue firstStore =
3465 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3466 MachinePointerInfo(SV), MVT::i8);
3467 uint64_t nextOffset = FPROffset;
3468 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3471 // Store second byte : number of float regs
3472 SDValue secondStore =
3473 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3474 MachinePointerInfo(SV, nextOffset), MVT::i8);
3475 nextOffset += StackOffset;
3476 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3478 // Store second word : arguments given on stack
3479 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3480 MachinePointerInfo(SV, nextOffset));
3481 nextOffset += FrameOffset;
3482 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3484 // Store third word : arguments given in registers
3485 return DAG.getStore(thirdStore, dl, FR, nextPtr,
3486 MachinePointerInfo(SV, nextOffset));
3489 /// FPR - The set of FP registers that should be allocated for arguments
3490 /// on Darwin and AIX.
3491 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3492 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3493 PPC::F11, PPC::F12, PPC::F13};
3495 /// QFPR - The set of QPX registers that should be allocated for arguments.
3496 static const MCPhysReg QFPR[] = {
3497 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
3498 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3500 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3502 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3503 unsigned PtrByteSize) {
3504 unsigned ArgSize = ArgVT.getStoreSize();
3505 if (Flags.isByVal())
3506 ArgSize = Flags.getByValSize();
3508 // Round up to multiples of the pointer size, except for array members,
3509 // which are always packed.
3510 if (!Flags.isInConsecutiveRegs())
3511 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3516 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3518 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3519 ISD::ArgFlagsTy Flags,
3520 unsigned PtrByteSize) {
3521 Align Alignment(PtrByteSize);
3523 // Altivec parameters are padded to a 16 byte boundary.
3524 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3525 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3526 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3527 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3528 Alignment = Align(16);
3529 // QPX vector types stored in double-precision are padded to a 32 byte
3531 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3532 Alignment = Align(32);
3534 // ByVal parameters are aligned as requested.
3535 if (Flags.isByVal()) {
3536 auto BVAlign = Flags.getNonZeroByValAlign();
3537 if (BVAlign > PtrByteSize) {
3538 if (BVAlign.value() % PtrByteSize != 0)
3540 "ByVal alignment is not a multiple of the pointer size");
3542 Alignment = BVAlign;
3546 // Array members are always packed to their original alignment.
3547 if (Flags.isInConsecutiveRegs()) {
3548 // If the array member was split into multiple registers, the first
3549 // needs to be aligned to the size of the full type. (Except for
3550 // ppcf128, which is only aligned as its f64 components.)
3551 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3552 Alignment = Align(OrigVT.getStoreSize());
3554 Alignment = Align(ArgVT.getStoreSize());
3560 /// CalculateStackSlotUsed - Return whether this argument will use its
3561 /// stack slot (instead of being passed in registers). ArgOffset,
3562 /// AvailableFPRs, and AvailableVRs must hold the current argument
3563 /// position, and will be updated to account for this argument.
3564 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3565 ISD::ArgFlagsTy Flags,
3566 unsigned PtrByteSize,
3567 unsigned LinkageSize,
3568 unsigned ParamAreaSize,
3569 unsigned &ArgOffset,
3570 unsigned &AvailableFPRs,
3571 unsigned &AvailableVRs, bool HasQPX) {
3572 bool UseMemory = false;
3574 // Respect alignment of argument on the stack.
3576 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3577 ArgOffset = alignTo(ArgOffset, Alignment);
3578 // If there's no space left in the argument save area, we must
3579 // use memory (this check also catches zero-sized arguments).
3580 if (ArgOffset >= LinkageSize + ParamAreaSize)
3583 // Allocate argument on the stack.
3584 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3585 if (Flags.isInConsecutiveRegsLast())
3586 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3587 // If we overran the argument save area, we must use memory
3588 // (this check catches arguments passed partially in memory)
3589 if (ArgOffset > LinkageSize + ParamAreaSize)
3592 // However, if the argument is actually passed in an FPR or a VR,
3593 // we don't use memory after all.
3594 if (!Flags.isByVal()) {
3595 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3596 // QPX registers overlap with the scalar FP registers.
3597 (HasQPX && (ArgVT == MVT::v4f32 ||
3598 ArgVT == MVT::v4f64 ||
3599 ArgVT == MVT::v4i1)))
3600 if (AvailableFPRs > 0) {
3604 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3605 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3606 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3607 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3608 if (AvailableVRs > 0) {
3617 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3618 /// ensure minimum alignment required for target.
3619 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3620 unsigned NumBytes) {
3621 return alignTo(NumBytes, Lowering->getStackAlign());
3624 SDValue PPCTargetLowering::LowerFormalArguments(
3625 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3626 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3627 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3628 if (Subtarget.isAIXABI())
3629 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3631 if (Subtarget.is64BitELFABI())
3632 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3634 if (Subtarget.is32BitELFABI())
3635 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3638 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3642 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3643 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3644 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3645 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3647 // 32-bit SVR4 ABI Stack Frame Layout:
3648 // +-----------------------------------+
3649 // +--> | Back chain |
3650 // | +-----------------------------------+
3651 // | | Floating-point register save area |
3652 // | +-----------------------------------+
3653 // | | General register save area |
3654 // | +-----------------------------------+
3655 // | | CR save word |
3656 // | +-----------------------------------+
3657 // | | VRSAVE save word |
3658 // | +-----------------------------------+
3659 // | | Alignment padding |
3660 // | +-----------------------------------+
3661 // | | Vector register save area |
3662 // | +-----------------------------------+
3663 // | | Local variable space |
3664 // | +-----------------------------------+
3665 // | | Parameter list area |
3666 // | +-----------------------------------+
3667 // | | LR save word |
3668 // | +-----------------------------------+
3669 // SP--> +--- | Back chain |
3670 // +-----------------------------------+
3673 // System V Application Binary Interface PowerPC Processor Supplement
3674 // AltiVec Technology Programming Interface Manual
3676 MachineFunction &MF = DAG.getMachineFunction();
3677 MachineFrameInfo &MFI = MF.getFrameInfo();
3678 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3680 EVT PtrVT = getPointerTy(MF.getDataLayout());
3681 // Potential tail calls could cause overwriting of argument stack slots.
3682 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3683 (CallConv == CallingConv::Fast));
3684 const Align PtrAlign(4);
3686 // Assign locations to all of the incoming arguments.
3687 SmallVector<CCValAssign, 16> ArgLocs;
3688 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3691 // Reserve space for the linkage area on the stack.
3692 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3693 CCInfo.AllocateStack(LinkageSize, PtrAlign);
3695 CCInfo.PreAnalyzeFormalArguments(Ins);
3697 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3698 CCInfo.clearWasPPCF128();
3700 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3701 CCValAssign &VA = ArgLocs[i];
3703 // Arguments stored in registers.
3704 if (VA.isRegLoc()) {
3705 const TargetRegisterClass *RC;
3706 EVT ValVT = VA.getValVT();
3708 switch (ValVT.getSimpleVT().SimpleTy) {
3710 llvm_unreachable("ValVT not supported by formal arguments Lowering");
3713 RC = &PPC::GPRCRegClass;
3716 if (Subtarget.hasP8Vector())
3717 RC = &PPC::VSSRCRegClass;
3718 else if (Subtarget.hasSPE())
3719 RC = &PPC::GPRCRegClass;
3721 RC = &PPC::F4RCRegClass;
3724 if (Subtarget.hasVSX())
3725 RC = &PPC::VSFRCRegClass;
3726 else if (Subtarget.hasSPE())
3727 // SPE passes doubles in GPR pairs.
3728 RC = &PPC::GPRCRegClass;
3730 RC = &PPC::F8RCRegClass;
3735 RC = &PPC::VRRCRegClass;
3738 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3742 RC = &PPC::VRRCRegClass;
3745 RC = &PPC::QFRCRegClass;
3748 RC = &PPC::QBRCRegClass;
3753 // Transform the arguments stored in physical registers into
3755 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3756 assert(i + 1 < e && "No second half of double precision argument");
3757 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3758 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3759 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3760 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3761 if (!Subtarget.isLittleEndian())
3762 std::swap (ArgValueLo, ArgValueHi);
3763 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3766 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3767 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3768 ValVT == MVT::i1 ? MVT::i32 : ValVT);
3769 if (ValVT == MVT::i1)
3770 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3773 InVals.push_back(ArgValue);
3775 // Argument stored in memory.
3776 assert(VA.isMemLoc());
3778 // Get the extended size of the argument type in stack
3779 unsigned ArgSize = VA.getLocVT().getStoreSize();
3780 // Get the actual size of the argument type
3781 unsigned ObjSize = VA.getValVT().getStoreSize();
3782 unsigned ArgOffset = VA.getLocMemOffset();
3783 // Stack objects in PPC32 are right justified.
3784 ArgOffset += ArgSize - ObjSize;
3785 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3787 // Create load nodes to retrieve arguments from the stack.
3788 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3790 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3794 // Assign locations to all of the incoming aggregate by value arguments.
3795 // Aggregates passed by value are stored in the local variable space of the
3796 // caller's stack frame, right above the parameter list area.
3797 SmallVector<CCValAssign, 16> ByValArgLocs;
3798 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3799 ByValArgLocs, *DAG.getContext());
3801 // Reserve stack space for the allocations in CCInfo.
3802 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3804 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3806 // Area that is at least reserved in the caller of this function.
3807 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3808 MinReservedArea = std::max(MinReservedArea, LinkageSize);
3810 // Set the size that is at least reserved in caller of this function. Tail
3811 // call optimized function's reserved stack space needs to be aligned so that
3812 // taking the difference between two stack areas will result in an aligned
3815 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3816 FuncInfo->setMinReservedArea(MinReservedArea);
3818 SmallVector<SDValue, 8> MemOps;
3820 // If the function takes variable number of arguments, make a frame index for
3821 // the start of the first vararg value... for expansion of llvm.va_start.
3823 static const MCPhysReg GPArgRegs[] = {
3824 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3825 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3827 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3829 static const MCPhysReg FPArgRegs[] = {
3830 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3833 unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3835 if (useSoftFloat() || hasSPE())
3838 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3839 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3841 // Make room for NumGPArgRegs and NumFPArgRegs.
3842 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3843 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3845 FuncInfo->setVarArgsStackOffset(
3846 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3847 CCInfo.getNextStackOffset(), true));
3849 FuncInfo->setVarArgsFrameIndex(
3850 MFI.CreateStackObject(Depth, Align(8), false));
3851 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3853 // The fixed integer arguments of a variadic function are stored to the
3854 // VarArgsFrameIndex on the stack so that they may be loaded by
3855 // dereferencing the result of va_next.
3856 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3857 // Get an existing live-in vreg, or add a new one.
3858 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3860 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3862 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3864 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3865 MemOps.push_back(Store);
3866 // Increment the address by four for the next argument to store
3867 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3868 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3871 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3873 // The double arguments are stored to the VarArgsFrameIndex
3875 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3876 // Get an existing live-in vreg, or add a new one.
3877 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3879 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3881 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3883 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3884 MemOps.push_back(Store);
3885 // Increment the address by eight for the next argument to store
3886 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3888 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3892 if (!MemOps.empty())
3893 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3898 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3899 // value to MVT::i64 and then truncate to the correct register size.
3900 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3901 EVT ObjectVT, SelectionDAG &DAG,
3903 const SDLoc &dl) const {
3905 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3906 DAG.getValueType(ObjectVT));
3907 else if (Flags.isZExt())
3908 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3909 DAG.getValueType(ObjectVT));
3911 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3914 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3915 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3916 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3917 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3918 // TODO: add description of PPC stack frame format, or at least some docs.
3920 bool isELFv2ABI = Subtarget.isELFv2ABI();
3921 bool isLittleEndian = Subtarget.isLittleEndian();
3922 MachineFunction &MF = DAG.getMachineFunction();
3923 MachineFrameInfo &MFI = MF.getFrameInfo();
3924 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3926 assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3927 "fastcc not supported on varargs functions");
3929 EVT PtrVT = getPointerTy(MF.getDataLayout());
3930 // Potential tail calls could cause overwriting of argument stack slots.
3931 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3932 (CallConv == CallingConv::Fast));
3933 unsigned PtrByteSize = 8;
3934 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3936 static const MCPhysReg GPR[] = {
3937 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3938 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3940 static const MCPhysReg VR[] = {
3941 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3942 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3945 const unsigned Num_GPR_Regs = array_lengthof(GPR);
3946 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3947 const unsigned Num_VR_Regs = array_lengthof(VR);
3948 const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3950 // Do a first pass over the arguments to determine whether the ABI
3951 // guarantees that our caller has allocated the parameter save area
3952 // on its stack frame. In the ELFv1 ABI, this is always the case;
3953 // in the ELFv2 ABI, it is true if this is a vararg function or if
3954 // any parameter is located in a stack slot.
3956 bool HasParameterArea = !isELFv2ABI || isVarArg;
3957 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3958 unsigned NumBytes = LinkageSize;
3959 unsigned AvailableFPRs = Num_FPR_Regs;
3960 unsigned AvailableVRs = Num_VR_Regs;
3961 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3962 if (Ins[i].Flags.isNest())
3965 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3966 PtrByteSize, LinkageSize, ParamAreaSize,
3967 NumBytes, AvailableFPRs, AvailableVRs,
3968 Subtarget.hasQPX()))
3969 HasParameterArea = true;
3972 // Add DAG nodes to load the arguments or copy them out of registers. On
3973 // entry to a function on PPC, the arguments start after the linkage area,
3974 // although the first ones are often in registers.
3976 unsigned ArgOffset = LinkageSize;
3977 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3978 unsigned &QFPR_idx = FPR_idx;
3979 SmallVector<SDValue, 8> MemOps;
3980 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3981 unsigned CurArgIdx = 0;
3982 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3984 bool needsLoad = false;
3985 EVT ObjectVT = Ins[ArgNo].VT;
3986 EVT OrigVT = Ins[ArgNo].ArgVT;
3987 unsigned ObjSize = ObjectVT.getStoreSize();
3988 unsigned ArgSize = ObjSize;
3989 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3990 if (Ins[ArgNo].isOrigArg()) {
3991 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3992 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3994 // We re-align the argument offset for each argument, except when using the
3995 // fast calling convention, when we need to make sure we do that only when
3996 // we'll actually use a stack slot.
3997 unsigned CurArgOffset;
3999 auto ComputeArgOffset = [&]() {
4000 /* Respect alignment of argument on the stack. */
4002 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4003 ArgOffset = alignTo(ArgOffset, Alignment);
4004 CurArgOffset = ArgOffset;
4007 if (CallConv != CallingConv::Fast) {
4010 /* Compute GPR index associated with argument offset. */
4011 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4012 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4015 // FIXME the codegen can be much improved in some cases.
4016 // We do not have to keep everything in memory.
4017 if (Flags.isByVal()) {
4018 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4020 if (CallConv == CallingConv::Fast)
4023 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4024 ObjSize = Flags.getByValSize();
4025 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4026 // Empty aggregate parameters do not take up registers. Examples:
4030 // etc. However, we have to provide a place-holder in InVals, so
4031 // pretend we have an 8-byte item at the current address for that
4034 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4035 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4036 InVals.push_back(FIN);
4040 // Create a stack object covering all stack doublewords occupied
4041 // by the argument. If the argument is (fully or partially) on
4042 // the stack, or if the argument is fully in registers but the
4043 // caller has allocated the parameter save anyway, we can refer
4044 // directly to the caller's stack frame. Otherwise, create a
4045 // local copy in our own frame.
4047 if (HasParameterArea ||
4048 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4049 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4051 FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4052 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4054 // Handle aggregates smaller than 8 bytes.
4055 if (ObjSize < PtrByteSize) {
4056 // The value of the object is its address, which differs from the
4057 // address of the enclosing doubleword on big-endian systems.
4059 if (!isLittleEndian) {
4060 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4061 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4063 InVals.push_back(Arg);
4065 if (GPR_idx != Num_GPR_Regs) {
4066 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4067 FuncInfo->addLiveInAttr(VReg, Flags);
4068 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4071 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4072 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4073 (ObjSize == 2 ? MVT::i16 : MVT::i32));
4074 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4075 MachinePointerInfo(&*FuncArg), ObjType);
4077 // For sizes that don't fit a truncating store (3, 5, 6, 7),
4078 // store the whole register as-is to the parameter save area
4080 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4081 MachinePointerInfo(&*FuncArg));
4084 MemOps.push_back(Store);
4086 // Whether we copied from a register or not, advance the offset
4087 // into the parameter save area by a full doubleword.
4088 ArgOffset += PtrByteSize;
4092 // The value of the object is its address, which is the address of
4093 // its first stack doubleword.
4094 InVals.push_back(FIN);
4096 // Store whatever pieces of the object are in registers to memory.
4097 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4098 if (GPR_idx == Num_GPR_Regs)
4101 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4102 FuncInfo->addLiveInAttr(VReg, Flags);
4103 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4106 SDValue Off = DAG.getConstant(j, dl, PtrVT);
4107 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4109 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4110 MachinePointerInfo(&*FuncArg, j));
4111 MemOps.push_back(Store);
4114 ArgOffset += ArgSize;
4118 switch (ObjectVT.getSimpleVT().SimpleTy) {
4119 default: llvm_unreachable("Unhandled argument type!");
4123 if (Flags.isNest()) {
4124 // The 'nest' parameter, if any, is passed in R11.
4125 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4126 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4128 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4129 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4134 // These can be scalar arguments or elements of an integer array type
4135 // passed directly. Clang may use those instead of "byval" aggregate
4136 // types to avoid forcing arguments to memory unnecessarily.
4137 if (GPR_idx != Num_GPR_Regs) {
4138 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4139 FuncInfo->addLiveInAttr(VReg, Flags);
4140 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4142 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4143 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4144 // value to MVT::i64 and then truncate to the correct register size.
4145 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4147 if (CallConv == CallingConv::Fast)
4151 ArgSize = PtrByteSize;
4153 if (CallConv != CallingConv::Fast || needsLoad)
4159 // These can be scalar arguments or elements of a float array type
4160 // passed directly. The latter are used to implement ELFv2 homogenous
4161 // float aggregates.
4162 if (FPR_idx != Num_FPR_Regs) {
4165 if (ObjectVT == MVT::f32)
4166 VReg = MF.addLiveIn(FPR[FPR_idx],
4167 Subtarget.hasP8Vector()
4168 ? &PPC::VSSRCRegClass
4169 : &PPC::F4RCRegClass);
4171 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4172 ? &PPC::VSFRCRegClass
4173 : &PPC::F8RCRegClass);
4175 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4177 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4178 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4179 // once we support fp <-> gpr moves.
4181 // This can only ever happen in the presence of f32 array types,
4182 // since otherwise we never run out of FPRs before running out
4184 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4185 FuncInfo->addLiveInAttr(VReg, Flags);
4186 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4188 if (ObjectVT == MVT::f32) {
4189 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4190 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4191 DAG.getConstant(32, dl, MVT::i32));
4192 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4195 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4197 if (CallConv == CallingConv::Fast)
4203 // When passing an array of floats, the array occupies consecutive
4204 // space in the argument area; only round up to the next doubleword
4205 // at the end of the array. Otherwise, each float takes 8 bytes.
4206 if (CallConv != CallingConv::Fast || needsLoad) {
4207 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4208 ArgOffset += ArgSize;
4209 if (Flags.isInConsecutiveRegsLast())
4210 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4221 if (!Subtarget.hasQPX()) {
4222 // These can be scalar arguments or elements of a vector array type
4223 // passed directly. The latter are used to implement ELFv2 homogenous
4224 // vector aggregates.
4225 if (VR_idx != Num_VR_Regs) {
4226 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4227 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4230 if (CallConv == CallingConv::Fast)
4234 if (CallConv != CallingConv::Fast || needsLoad)
4239 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4240 "Invalid QPX parameter type");
4245 // QPX vectors are treated like their scalar floating-point subregisters
4246 // (except that they're larger).
4247 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4248 if (QFPR_idx != Num_QFPR_Regs) {
4249 const TargetRegisterClass *RC;
4250 switch (ObjectVT.getSimpleVT().SimpleTy) {
4251 case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4252 case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4253 default: RC = &PPC::QBRCRegClass; break;
4256 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4257 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4260 if (CallConv == CallingConv::Fast)
4264 if (CallConv != CallingConv::Fast || needsLoad)
4269 // We need to load the argument to a virtual register if we determined
4270 // above that we ran out of physical registers of the appropriate type.
4272 if (ObjSize < ArgSize && !isLittleEndian)
4273 CurArgOffset += ArgSize - ObjSize;
4274 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4275 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4276 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4279 InVals.push_back(ArgVal);
4282 // Area that is at least reserved in the caller of this function.
4283 unsigned MinReservedArea;
4284 if (HasParameterArea)
4285 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4287 MinReservedArea = LinkageSize;
4289 // Set the size that is at least reserved in caller of this function. Tail
4290 // call optimized functions' reserved stack space needs to be aligned so that
4291 // taking the difference between two stack areas will result in an aligned
4294 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4295 FuncInfo->setMinReservedArea(MinReservedArea);
4297 // If the function takes variable number of arguments, make a frame index for
4298 // the start of the first vararg value... for expansion of llvm.va_start.
4299 // On ELFv2ABI spec, it writes:
4300 // C programs that are intended to be *portable* across different compilers
4301 // and architectures must use the header file <stdarg.h> to deal with variable
4303 if (isVarArg && MFI.hasVAStart()) {
4304 int Depth = ArgOffset;
4306 FuncInfo->setVarArgsFrameIndex(
4307 MFI.CreateFixedObject(PtrByteSize, Depth, true));
4308 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4310 // If this function is vararg, store any remaining integer argument regs
4311 // to their spots on the stack so that they may be loaded by dereferencing
4312 // the result of va_next.
4313 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4314 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4315 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4316 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4318 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4319 MemOps.push_back(Store);
4320 // Increment the address by four for the next argument to store
4321 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4322 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4326 if (!MemOps.empty())
4327 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4332 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4333 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4334 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4335 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4336 // TODO: add description of PPC stack frame format, or at least some docs.
4338 MachineFunction &MF = DAG.getMachineFunction();
4339 MachineFrameInfo &MFI = MF.getFrameInfo();
4340 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4342 EVT PtrVT = getPointerTy(MF.getDataLayout());
4343 bool isPPC64 = PtrVT == MVT::i64;
4344 // Potential tail calls could cause overwriting of argument stack slots.
4345 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4346 (CallConv == CallingConv::Fast));
4347 unsigned PtrByteSize = isPPC64 ? 8 : 4;
4348 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4349 unsigned ArgOffset = LinkageSize;
4350 // Area that is at least reserved in caller of this function.
4351 unsigned MinReservedArea = ArgOffset;
4353 static const MCPhysReg GPR_32[] = { // 32-bit registers.
4354 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4355 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4357 static const MCPhysReg GPR_64[] = { // 64-bit registers.
4358 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4359 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4361 static const MCPhysReg VR[] = {
4362 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4363 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4366 const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4367 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4368 const unsigned Num_VR_Regs = array_lengthof( VR);
4370 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4372 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4374 // In 32-bit non-varargs functions, the stack space for vectors is after the
4375 // stack space for non-vectors. We do not use this space unless we have
4376 // too many vectors to fit in registers, something that only occurs in
4377 // constructed examples:), but we have to walk the arglist to figure
4378 // that out...for the pathological case, compute VecArgOffset as the
4379 // start of the vector parameter area. Computing VecArgOffset is the
4380 // entire point of the following loop.
4381 unsigned VecArgOffset = ArgOffset;
4382 if (!isVarArg && !isPPC64) {
4383 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4385 EVT ObjectVT = Ins[ArgNo].VT;
4386 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4388 if (Flags.isByVal()) {
4389 // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4390 unsigned ObjSize = Flags.getByValSize();
4392 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4393 VecArgOffset += ArgSize;
4397 switch(ObjectVT.getSimpleVT().SimpleTy) {
4398 default: llvm_unreachable("Unhandled argument type!");
4404 case MVT::i64: // PPC64
4406 // FIXME: We are guaranteed to be !isPPC64 at this point.
4407 // Does MVT::i64 apply?
4414 // Nothing to do, we're only looking at Nonvector args here.
4419 // We've found where the vector parameter area in memory is. Skip the
4420 // first 12 parameters; these don't use that memory.
4421 VecArgOffset = ((VecArgOffset+15)/16)*16;
4422 VecArgOffset += 12*16;
4424 // Add DAG nodes to load the arguments or copy them out of registers. On
4425 // entry to a function on PPC, the arguments start after the linkage area,
4426 // although the first ones are often in registers.
4428 SmallVector<SDValue, 8> MemOps;
4429 unsigned nAltivecParamsAtEnd = 0;
4430 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4431 unsigned CurArgIdx = 0;
4432 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4434 bool needsLoad = false;
4435 EVT ObjectVT = Ins[ArgNo].VT;
4436 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4437 unsigned ArgSize = ObjSize;
4438 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4439 if (Ins[ArgNo].isOrigArg()) {
4440 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4441 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4443 unsigned CurArgOffset = ArgOffset;
4445 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4446 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4447 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4448 if (isVarArg || isPPC64) {
4449 MinReservedArea = ((MinReservedArea+15)/16)*16;
4450 MinReservedArea += CalculateStackSlotSize(ObjectVT,
4453 } else nAltivecParamsAtEnd++;
4455 // Calculate min reserved area.
4456 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4460 // FIXME the codegen can be much improved in some cases.
4461 // We do not have to keep everything in memory.
4462 if (Flags.isByVal()) {
4463 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4465 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4466 ObjSize = Flags.getByValSize();
4467 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4468 // Objects of size 1 and 2 are right justified, everything else is
4469 // left justified. This means the memory address is adjusted forwards.
4470 if (ObjSize==1 || ObjSize==2) {
4471 CurArgOffset = CurArgOffset + (4 - ObjSize);
4473 // The value of the object is its address.
4474 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4475 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4476 InVals.push_back(FIN);
4477 if (ObjSize==1 || ObjSize==2) {
4478 if (GPR_idx != Num_GPR_Regs) {
4481 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4483 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4484 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4485 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4487 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4488 MachinePointerInfo(&*FuncArg), ObjType);
4489 MemOps.push_back(Store);
4493 ArgOffset += PtrByteSize;
4497 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4498 // Store whatever pieces of the object are in registers
4499 // to memory. ArgOffset will be the address of the beginning
4501 if (GPR_idx != Num_GPR_Regs) {
4504 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4506 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4507 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4508 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4509 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4510 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4511 MachinePointerInfo(&*FuncArg, j));
4512 MemOps.push_back(Store);
4514 ArgOffset += PtrByteSize;
4516 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4523 switch (ObjectVT.getSimpleVT().SimpleTy) {
4524 default: llvm_unreachable("Unhandled argument type!");
4528 if (GPR_idx != Num_GPR_Regs) {
4529 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4530 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4532 if (ObjectVT == MVT::i1)
4533 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4538 ArgSize = PtrByteSize;
4540 // All int arguments reserve stack space in the Darwin ABI.
4541 ArgOffset += PtrByteSize;
4545 case MVT::i64: // PPC64
4546 if (GPR_idx != Num_GPR_Regs) {
4547 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4548 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4550 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4551 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4552 // value to MVT::i64 and then truncate to the correct register size.
4553 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4558 ArgSize = PtrByteSize;
4560 // All int arguments reserve stack space in the Darwin ABI.
4566 // Every 4 bytes of argument space consumes one of the GPRs available for
4567 // argument passing.
4568 if (GPR_idx != Num_GPR_Regs) {
4570 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4573 if (FPR_idx != Num_FPR_Regs) {
4576 if (ObjectVT == MVT::f32)
4577 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4579 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4581 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4587 // All FP arguments reserve stack space in the Darwin ABI.
4588 ArgOffset += isPPC64 ? 8 : ObjSize;
4594 // Note that vector arguments in registers don't reserve stack space,
4595 // except in varargs functions.
4596 if (VR_idx != Num_VR_Regs) {
4597 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4598 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4600 while ((ArgOffset % 16) != 0) {
4601 ArgOffset += PtrByteSize;
4602 if (GPR_idx != Num_GPR_Regs)
4606 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4610 if (!isVarArg && !isPPC64) {
4611 // Vectors go after all the nonvectors.
4612 CurArgOffset = VecArgOffset;
4615 // Vectors are aligned.
4616 ArgOffset = ((ArgOffset+15)/16)*16;
4617 CurArgOffset = ArgOffset;
4625 // We need to load the argument to a virtual register if we determined above
4626 // that we ran out of physical registers of the appropriate type.
4628 int FI = MFI.CreateFixedObject(ObjSize,
4629 CurArgOffset + (ArgSize - ObjSize),
4631 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4632 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4635 InVals.push_back(ArgVal);
4638 // Allow for Altivec parameters at the end, if needed.
4639 if (nAltivecParamsAtEnd) {
4640 MinReservedArea = ((MinReservedArea+15)/16)*16;
4641 MinReservedArea += 16*nAltivecParamsAtEnd;
4644 // Area that is at least reserved in the caller of this function.
4645 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4647 // Set the size that is at least reserved in caller of this function. Tail
4648 // call optimized functions' reserved stack space needs to be aligned so that
4649 // taking the difference between two stack areas will result in an aligned
4652 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4653 FuncInfo->setMinReservedArea(MinReservedArea);
4655 // If the function takes variable number of arguments, make a frame index for
4656 // the start of the first vararg value... for expansion of llvm.va_start.
4658 int Depth = ArgOffset;
4660 FuncInfo->setVarArgsFrameIndex(
4661 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4663 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4665 // If this function is vararg, store any remaining integer argument regs
4666 // to their spots on the stack so that they may be loaded by dereferencing
4667 // the result of va_next.
4668 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4672 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4674 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4676 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4678 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4679 MemOps.push_back(Store);
4680 // Increment the address by four for the next argument to store
4681 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4682 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4686 if (!MemOps.empty())
4687 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4692 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4693 /// adjusted to accommodate the arguments for the tailcall.
4694 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4695 unsigned ParamSize) {
4697 if (!isTailCall) return 0;
4699 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4700 unsigned CallerMinReservedArea = FI->getMinReservedArea();
4701 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4702 // Remember only if the new adjustment is bigger.
4703 if (SPDiff < FI->getTailCallSPDelta())
4704 FI->setTailCallSPDelta(SPDiff);
4709 static bool isFunctionGlobalAddress(SDValue Callee);
4711 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4712 const TargetMachine &TM) {
4713 // It does not make sense to call callsShareTOCBase() with a caller that
4714 // is PC Relative since PC Relative callers do not have a TOC.
4716 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4717 assert(!STICaller->isUsingPCRelativeCalls() &&
4718 "PC Relative callers do not have a TOC and cannot share a TOC Base");
4721 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4722 // don't have enough information to determine if the caller and callee share
4723 // the same TOC base, so we have to pessimistically assume they don't for
4725 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4729 const GlobalValue *GV = G->getGlobal();
4731 // If the callee is preemptable, then the static linker will use a plt-stub
4732 // which saves the toc to the stack, and needs a nop after the call
4733 // instruction to convert to a toc-restore.
4734 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4737 // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4738 // We may need a TOC restore in the situation where the caller requires a
4739 // valid TOC but the callee is PC Relative and does not.
4740 const Function *F = dyn_cast<Function>(GV);
4741 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4743 // If we have an Alias we can try to get the function from there.
4745 const GlobalObject *GlobalObj = Alias->getBaseObject();
4746 F = dyn_cast<Function>(GlobalObj);
4749 // If we still have no valid function pointer we do not have enough
4750 // information to determine if the callee uses PC Relative calls so we must
4751 // assume that it does.
4755 // If the callee uses PC Relative we cannot guarantee that the callee won't
4756 // clobber the TOC of the caller and so we must assume that the two
4757 // functions do not share a TOC base.
4758 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4759 if (STICallee->isUsingPCRelativeCalls())
4762 // The medium and large code models are expected to provide a sufficiently
4763 // large TOC to provide all data addressing needs of a module with a
4765 if (CodeModel::Medium == TM.getCodeModel() ||
4766 CodeModel::Large == TM.getCodeModel())
4769 // Otherwise we need to ensure callee and caller are in the same section,
4770 // since the linker may allocate multiple TOCs, and we don't know which
4771 // sections will belong to the same TOC base.
4772 if (!GV->isStrongDefinitionForLinker())
4775 // Any explicitly-specified sections and section prefixes must also match.
4776 // Also, if we're using -ffunction-sections, then each function is always in
4777 // a different section (the same is true for COMDAT functions).
4778 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4779 GV->getSection() != Caller->getSection())
4781 if (const auto *F = dyn_cast<Function>(GV)) {
4782 if (F->getSectionPrefix() != Caller->getSectionPrefix())
4790 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4791 const SmallVectorImpl<ISD::OutputArg> &Outs) {
4792 assert(Subtarget.is64BitELFABI());
4794 const unsigned PtrByteSize = 8;
4795 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4797 static const MCPhysReg GPR[] = {
4798 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4799 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4801 static const MCPhysReg VR[] = {
4802 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4803 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4806 const unsigned NumGPRs = array_lengthof(GPR);
4807 const unsigned NumFPRs = 13;
4808 const unsigned NumVRs = array_lengthof(VR);
4809 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4811 unsigned NumBytes = LinkageSize;
4812 unsigned AvailableFPRs = NumFPRs;
4813 unsigned AvailableVRs = NumVRs;
4815 for (const ISD::OutputArg& Param : Outs) {
4816 if (Param.Flags.isNest()) continue;
4818 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4819 PtrByteSize, LinkageSize, ParamAreaSize,
4820 NumBytes, AvailableFPRs, AvailableVRs,
4821 Subtarget.hasQPX()))
4827 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4828 if (CB.arg_size() != CallerFn->arg_size())
4831 auto CalleeArgIter = CB.arg_begin();
4832 auto CalleeArgEnd = CB.arg_end();
4833 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4835 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4836 const Value* CalleeArg = *CalleeArgIter;
4837 const Value* CallerArg = &(*CallerArgIter);
4838 if (CalleeArg == CallerArg)
4841 // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4842 // tail call @callee([4 x i64] undef, [4 x i64] %b)
4844 // 1st argument of callee is undef and has the same type as caller.
4845 if (CalleeArg->getType() == CallerArg->getType() &&
4846 isa<UndefValue>(CalleeArg))
4855 // Returns true if TCO is possible between the callers and callees
4856 // calling conventions.
4858 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4859 CallingConv::ID CalleeCC) {
4860 // Tail calls are possible with fastcc and ccc.
4861 auto isTailCallableCC = [] (CallingConv::ID CC){
4862 return CC == CallingConv::C || CC == CallingConv::Fast;
4864 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4867 // We can safely tail call both fastcc and ccc callees from a c calling
4868 // convention caller. If the caller is fastcc, we may have less stack space
4869 // than a non-fastcc caller with the same signature so disable tail-calls in
4871 return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4874 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4875 SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4876 const SmallVectorImpl<ISD::OutputArg> &Outs,
4877 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4878 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4880 if (DisableSCO && !TailCallOpt) return false;
4882 // Variadic argument functions are not supported.
4883 if (isVarArg) return false;
4885 auto &Caller = DAG.getMachineFunction().getFunction();
4886 // Check that the calling conventions are compatible for tco.
4887 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4890 // Caller contains any byval parameter is not supported.
4891 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4894 // Callee contains any byval parameter is not supported, too.
4895 // Note: This is a quick work around, because in some cases, e.g.
4896 // caller's stack size > callee's stack size, we are still able to apply
4897 // sibling call optimization. For example, gcc is able to do SCO for caller1
4898 // in the following example, but not for caller2.
4903 // __attribute__((noinline)) int callee(struct test v, struct test *b) {
4907 // void caller1(struct test a, struct test c, struct test *b) {
4908 // callee(gTest, b); }
4909 // void caller2(struct test *b) { callee(gTest, b); }
4910 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4913 // If callee and caller use different calling conventions, we cannot pass
4914 // parameters on stack since offsets for the parameter area may be different.
4915 if (Caller.getCallingConv() != CalleeCC &&
4916 needStackSlotPassParameters(Subtarget, Outs))
4919 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4920 // the caller and callee share the same TOC for TCO/SCO. If the caller and
4921 // callee potentially have different TOC bases then we cannot tail call since
4922 // we need to restore the TOC pointer after the call.
4923 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4924 // We cannot guarantee this for indirect calls or calls to external functions.
4925 // When PC-Relative addressing is used, the concept of the TOC is no longer
4926 // applicable so this check is not required.
4927 // Check first for indirect calls.
4928 if (!Subtarget.isUsingPCRelativeCalls() &&
4929 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4932 // Check if we share the TOC base.
4933 if (!Subtarget.isUsingPCRelativeCalls() &&
4934 !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4937 // TCO allows altering callee ABI, so we don't have to check further.
4938 if (CalleeCC == CallingConv::Fast && TailCallOpt)
4941 if (DisableSCO) return false;
4943 // If callee use the same argument list that caller is using, then we can
4944 // apply SCO on this case. If it is not, then we need to check if callee needs
4945 // stack for passing arguments.
4946 // PC Relative tail calls may not have a CallBase.
4947 // If there is no CallBase we cannot verify if we have the same argument
4948 // list so assume that we don't have the same argument list.
4949 if (CB && !hasSameArgumentList(&Caller, *CB) &&
4950 needStackSlotPassParameters(Subtarget, Outs))
4952 else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4958 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4959 /// for tail call optimization. Targets which want to do tail call
4960 /// optimization should implement this function.
4962 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4963 CallingConv::ID CalleeCC,
4965 const SmallVectorImpl<ISD::InputArg> &Ins,
4966 SelectionDAG& DAG) const {
4967 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4970 // Variable argument functions are not supported.
4974 MachineFunction &MF = DAG.getMachineFunction();
4975 CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4976 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4977 // Functions containing by val parameters are not supported.
4978 for (unsigned i = 0; i != Ins.size(); i++) {
4979 ISD::ArgFlagsTy Flags = Ins[i].Flags;
4980 if (Flags.isByVal()) return false;
4983 // Non-PIC/GOT tail calls are supported.
4984 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4987 // At the moment we can only do local tail calls (in same module, hidden
4988 // or protected) if we are generating PIC.
4989 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4990 return G->getGlobal()->hasHiddenVisibility()
4991 || G->getGlobal()->hasProtectedVisibility();
4997 /// isCallCompatibleAddress - Return the immediate to use if the specified
4998 /// 32-bit value is representable in the immediate field of a BxA instruction.
4999 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
5000 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5001 if (!C) return nullptr;
5003 int Addr = C->getZExtValue();
5004 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
5005 SignExtend32<26>(Addr) != Addr)
5006 return nullptr; // Top 6 bits have to be sext of immediate.
5010 (int)C->getZExtValue() >> 2, SDLoc(Op),
5011 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
5017 struct TailCallArgumentInfo {
5022 TailCallArgumentInfo() = default;
5025 } // end anonymous namespace
5027 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
5028 static void StoreTailCallArgumentsToStackSlot(
5029 SelectionDAG &DAG, SDValue Chain,
5030 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
5031 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
5032 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
5033 SDValue Arg = TailCallArgs[i].Arg;
5034 SDValue FIN = TailCallArgs[i].FrameIdxOp;
5035 int FI = TailCallArgs[i].FrameIdx;
5036 // Store relative to framepointer.
5037 MemOpChains.push_back(DAG.getStore(
5038 Chain, dl, Arg, FIN,
5039 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
5043 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
5044 /// the appropriate stack slot for the tail call optimized function call.
5045 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
5046 SDValue OldRetAddr, SDValue OldFP,
5047 int SPDiff, const SDLoc &dl) {
5049 // Calculate the new stack slot for the return address.
5050 MachineFunction &MF = DAG.getMachineFunction();
5051 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
5052 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
5053 bool isPPC64 = Subtarget.isPPC64();
5054 int SlotSize = isPPC64 ? 8 : 4;
5055 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5056 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
5057 NewRetAddrLoc, true);
5058 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5059 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
5060 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5061 MachinePointerInfo::getFixedStack(MF, NewRetAddr));
5066 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
5067 /// the position of the argument.
5069 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
5070 SDValue Arg, int SPDiff, unsigned ArgOffset,
5071 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
5072 int Offset = ArgOffset + SPDiff;
5073 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
5074 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
5075 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5076 SDValue FIN = DAG.getFrameIndex(FI, VT);
5077 TailCallArgumentInfo Info;
5079 Info.FrameIdxOp = FIN;
5081 TailCallArguments.push_back(Info);
5084 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5085 /// stack slot. Returns the chain as result and the loaded frame pointers in
5086 /// LROpOut/FPOpout. Used when tail calling.
5087 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5088 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5089 SDValue &FPOpOut, const SDLoc &dl) const {
5091 // Load the LR and FP stack slot for later adjusting.
5092 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5093 LROpOut = getReturnAddrFrameIndex(DAG);
5094 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5095 Chain = SDValue(LROpOut.getNode(), 1);
5100 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5101 /// by "Src" to address "Dst" of size "Size". Alignment information is
5102 /// specified by the specific parameter attribute. The copy will be passed as
5103 /// a byval function parameter.
5104 /// Sometimes what we are copying is the end of a larger object, the part that
5105 /// does not fit in registers.
5106 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5107 SDValue Chain, ISD::ArgFlagsTy Flags,
5108 SelectionDAG &DAG, const SDLoc &dl) {
5109 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5110 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5111 Flags.getNonZeroByValAlign(), false, false, false,
5112 MachinePointerInfo(), MachinePointerInfo());
5115 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5117 static void LowerMemOpCallTo(
5118 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5119 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5120 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5121 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5122 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5127 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5129 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5130 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5131 DAG.getConstant(ArgOffset, dl, PtrVT));
5133 MemOpChains.push_back(
5134 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5135 // Calculate and remember argument location.
5136 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5141 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5142 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5144 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5145 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5146 // might overwrite each other in case of tail call optimization.
5147 SmallVector<SDValue, 8> MemOpChains2;
5148 // Do not flag preceding copytoreg stuff together with the following stuff.
5150 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5152 if (!MemOpChains2.empty())
5153 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5155 // Store the return address to the appropriate stack slot.
5156 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5158 // Emit callseq_end just before tailcall node.
5159 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5160 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5161 InFlag = Chain.getValue(1);
5164 // Is this global address that of a function that can be called by name? (as
5165 // opposed to something that must hold a descriptor for an indirect call).
5166 static bool isFunctionGlobalAddress(SDValue Callee) {
5167 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5168 if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5169 Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5172 return G->getGlobal()->getValueType()->isFunctionTy();
5178 SDValue PPCTargetLowering::LowerCallResult(
5179 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5180 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5181 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5182 SmallVector<CCValAssign, 16> RVLocs;
5183 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5186 CCRetInfo.AnalyzeCallResult(
5187 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5191 // Copy all of the result registers out of their specified physreg.
5192 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5193 CCValAssign &VA = RVLocs[i];
5194 assert(VA.isRegLoc() && "Can only return in registers!");
5198 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5199 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5201 Chain = Lo.getValue(1);
5202 InFlag = Lo.getValue(2);
5203 VA = RVLocs[++i]; // skip ahead to next loc
5204 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5206 Chain = Hi.getValue(1);
5207 InFlag = Hi.getValue(2);
5208 if (!Subtarget.isLittleEndian())
5210 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5212 Val = DAG.getCopyFromReg(Chain, dl,
5213 VA.getLocReg(), VA.getLocVT(), InFlag);
5214 Chain = Val.getValue(1);
5215 InFlag = Val.getValue(2);
5218 switch (VA.getLocInfo()) {
5219 default: llvm_unreachable("Unknown loc info!");
5220 case CCValAssign::Full: break;
5221 case CCValAssign::AExt:
5222 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5224 case CCValAssign::ZExt:
5225 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5226 DAG.getValueType(VA.getValVT()));
5227 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5229 case CCValAssign::SExt:
5230 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5231 DAG.getValueType(VA.getValVT()));
5232 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5236 InVals.push_back(Val);
5242 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5243 const PPCSubtarget &Subtarget, bool isPatchPoint) {
5244 // PatchPoint calls are not indirect.
5248 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5251 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5252 // becuase the immediate function pointer points to a descriptor instead of
5253 // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5254 // pointer immediate points to the global entry point, while the BLA would
5255 // need to jump to the local entry point (see rL211174).
5256 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5257 isBLACompatibleAddress(Callee, DAG))
5263 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5264 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5265 return Subtarget.isAIXABI() ||
5266 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5269 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5270 const Function &Caller,
5271 const SDValue &Callee,
5272 const PPCSubtarget &Subtarget,
5273 const TargetMachine &TM) {
5274 if (CFlags.IsTailCall)
5275 return PPCISD::TC_RETURN;
5277 // This is a call through a function pointer.
5278 if (CFlags.IsIndirect) {
5279 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5280 // indirect calls. The save of the caller's TOC pointer to the stack will be
5281 // inserted into the DAG as part of call lowering. The restore of the TOC
5282 // pointer is modeled by using a pseudo instruction for the call opcode that
5283 // represents the 2 instruction sequence of an indirect branch and link,
5284 // immediately followed by a load of the TOC pointer from the the stack save
5285 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5286 // as it is not saved or used.
5287 return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5291 if (Subtarget.isUsingPCRelativeCalls()) {
5292 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5293 return PPCISD::CALL_NOTOC;
5296 // The ABIs that maintain a TOC pointer accross calls need to have a nop
5297 // immediately following the call instruction if the caller and callee may
5298 // have different TOC bases. At link time if the linker determines the calls
5299 // may not share a TOC base, the call is redirected to a trampoline inserted
5300 // by the linker. The trampoline will (among other things) save the callers
5301 // TOC pointer at an ABI designated offset in the linkage area and the linker
5302 // will rewrite the nop to be a load of the TOC pointer from the linkage area
5304 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5305 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5308 return PPCISD::CALL;
5311 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5312 const SDLoc &dl, const PPCSubtarget &Subtarget) {
5313 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5314 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5315 return SDValue(Dest, 0);
5317 // Returns true if the callee is local, and false otherwise.
5318 auto isLocalCallee = [&]() {
5319 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5320 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5321 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5323 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5324 !dyn_cast_or_null<GlobalIFunc>(GV);
5327 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in
5328 // a static relocation model causes some versions of GNU LD (2.17.50, at
5329 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5330 // built with secure-PLT.
5332 Subtarget.is32BitELFABI() && !isLocalCallee() &&
5333 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5335 // On AIX, direct function calls reference the symbol for the function's
5336 // entry point, which is named by prepending a "." before the function's
5338 const auto getAIXFuncEntryPointSymbolSDNode =
5339 [&](StringRef FuncName, bool IsDeclaration,
5340 const XCOFF::StorageClass &SC) {
5341 auto &Context = DAG.getMachineFunction().getMMI().getContext();
5343 MCSymbolXCOFF *S = cast<MCSymbolXCOFF>(
5344 Context.getOrCreateSymbol(Twine(".") + Twine(FuncName)));
5346 if (IsDeclaration && !S->hasRepresentedCsectSet()) {
5347 // On AIX, an undefined symbol needs to be associated with a
5348 // MCSectionXCOFF to get the correct storage mapping class.
5349 // In this case, XCOFF::XMC_PR.
5350 MCSectionXCOFF *Sec = Context.getXCOFFSection(
5351 S->getSymbolTableName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5352 SectionKind::getMetadata());
5353 S->setRepresentedCsect(Sec);
5357 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5358 return DAG.getMCSymbol(S, PtrVT);
5361 if (isFunctionGlobalAddress(Callee)) {
5362 const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5363 const GlobalValue *GV = G->getGlobal();
5365 if (!Subtarget.isAIXABI())
5366 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5367 UsePlt ? PPCII::MO_PLT : 0);
5369 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5370 const GlobalObject *GO = cast<GlobalObject>(GV);
5371 const XCOFF::StorageClass SC =
5372 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO);
5373 return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(),
5377 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5378 const char *SymName = S->getSymbol();
5379 if (!Subtarget.isAIXABI())
5380 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5381 UsePlt ? PPCII::MO_PLT : 0);
5383 // If there exists a user-declared function whose name is the same as the
5384 // ExternalSymbol's, then we pick up the user-declared version.
5385 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5386 if (const Function *F =
5387 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5388 const XCOFF::StorageClass SC =
5389 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5390 return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(),
5394 return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT);
5397 // No transformation needed.
5398 assert(Callee.getNode() && "What no callee?");
5402 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5403 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5404 "Expected a CALLSEQ_STARTSDNode.");
5406 // The last operand is the chain, except when the node has glue. If the node
5407 // has glue, then the last operand is the glue, and the chain is the second
5409 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5410 if (LastValue.getValueType() != MVT::Glue)
5413 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5416 // Creates the node that moves a functions address into the count register
5417 // to prepare for an indirect call instruction.
5418 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5419 SDValue &Glue, SDValue &Chain,
5421 SDValue MTCTROps[] = {Chain, Callee, Glue};
5422 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5423 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5424 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5425 // The glue is the second value produced.
5426 Glue = Chain.getValue(1);
5429 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5430 SDValue &Glue, SDValue &Chain,
5431 SDValue CallSeqStart,
5432 const CallBase *CB, const SDLoc &dl,
5434 const PPCSubtarget &Subtarget) {
5435 // Function pointers in the 64-bit SVR4 ABI do not point to the function
5436 // entry point, but to the function descriptor (the function entry point
5437 // address is part of the function descriptor though).
5438 // The function descriptor is a three doubleword structure with the
5439 // following fields: function entry point, TOC base address and
5440 // environment pointer.
5441 // Thus for a call through a function pointer, the following actions need
5443 // 1. Save the TOC of the caller in the TOC save area of its stack
5444 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5445 // 2. Load the address of the function entry point from the function
5447 // 3. Load the TOC of the callee from the function descriptor into r2.
5448 // 4. Load the environment pointer from the function descriptor into
5450 // 5. Branch to the function entry point address.
5451 // 6. On return of the callee, the TOC of the caller needs to be
5452 // restored (this is done in FinishCall()).
5454 // The loads are scheduled at the beginning of the call sequence, and the
5455 // register copies are flagged together to ensure that no other
5456 // operations can be scheduled in between. E.g. without flagging the
5457 // copies together, a TOC access in the caller could be scheduled between
5458 // the assignment of the callee TOC and the branch to the callee, which leads
5459 // to incorrect code.
5461 // Start by loading the function address from the descriptor.
5462 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5463 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5464 ? (MachineMemOperand::MODereferenceable |
5465 MachineMemOperand::MOInvariant)
5466 : MachineMemOperand::MONone;
5468 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5470 // Registers used in building the DAG.
5471 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5472 const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5474 // Offsets of descriptor members.
5475 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5476 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5478 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5479 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5481 // One load for the functions entry point address.
5482 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5483 Alignment, MMOFlags);
5485 // One for loading the TOC anchor for the module that contains the called
5487 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5488 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5490 DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5491 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5493 // One for loading the environment pointer.
5494 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5495 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5496 SDValue LoadEnvPtr =
5497 DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5498 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5501 // Then copy the newly loaded TOC anchor to the TOC pointer.
5502 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5503 Chain = TOCVal.getValue(0);
5504 Glue = TOCVal.getValue(1);
5506 // If the function call has an explicit 'nest' parameter, it takes the
5507 // place of the environment pointer.
5508 assert((!hasNest || !Subtarget.isAIXABI()) &&
5509 "Nest parameter is not supported on AIX.");
5511 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5512 Chain = EnvVal.getValue(0);
5513 Glue = EnvVal.getValue(1);
5516 // The rest of the indirect call sequence is the same as the non-descriptor
5518 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5522 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5523 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5525 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5526 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5527 const PPCSubtarget &Subtarget) {
5528 const bool IsPPC64 = Subtarget.isPPC64();
5529 // MVT for a general purpose register.
5530 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5532 // First operand is always the chain.
5533 Ops.push_back(Chain);
5535 // If it's a direct call pass the callee as the second operand.
5536 if (!CFlags.IsIndirect)
5537 Ops.push_back(Callee);
5539 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5541 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5542 // on the stack (this would have been done in `LowerCall_64SVR4` or
5543 // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5544 // represents both the indirect branch and a load that restores the TOC
5545 // pointer from the linkage area. The operand for the TOC restore is an add
5546 // of the TOC save offset to the stack pointer. This must be the second
5547 // operand: after the chain input but before any other variadic arguments.
5548 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5550 if (isTOCSaveRestoreRequired(Subtarget)) {
5551 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5553 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5554 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5555 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5556 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5557 Ops.push_back(AddTOC);
5560 // Add the register used for the environment pointer.
5561 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5562 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5566 // Add CTR register as callee so a bctr can be emitted later.
5567 if (CFlags.IsTailCall)
5568 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5571 // If this is a tail call add stack pointer delta.
5572 if (CFlags.IsTailCall)
5573 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5575 // Add argument registers to the end of the list so that they are known live
5577 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5578 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5579 RegsToPass[i].second.getValueType()));
5581 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5582 // no way to mark dependencies as implicit here.
5583 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5584 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5585 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5586 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5588 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5589 if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5590 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5592 // Add a register mask operand representing the call-preserved registers.
5593 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5594 const uint32_t *Mask =
5595 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5596 assert(Mask && "Missing call preserved mask for calling convention");
5597 Ops.push_back(DAG.getRegisterMask(Mask));
5599 // If the glue is valid, it is the last operand.
5601 Ops.push_back(Glue);
5604 SDValue PPCTargetLowering::FinishCall(
5605 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5606 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5607 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5608 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5609 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5611 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5612 Subtarget.isAIXABI())
5613 setUsesTOCBasePtr(DAG);
5616 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5617 Subtarget, DAG.getTarget());
5619 if (!CFlags.IsIndirect)
5620 Callee = transformCallee(Callee, DAG, dl, Subtarget);
5621 else if (Subtarget.usesFunctionDescriptors())
5622 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5623 dl, CFlags.HasNest, Subtarget);
5625 prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5627 // Build the operand list for the call instruction.
5628 SmallVector<SDValue, 8> Ops;
5629 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5633 if (CFlags.IsTailCall) {
5634 // Indirect tail call when using PC Relative calls do not have the same
5636 assert(((Callee.getOpcode() == ISD::Register &&
5637 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5638 Callee.getOpcode() == ISD::TargetExternalSymbol ||
5639 Callee.getOpcode() == ISD::TargetGlobalAddress ||
5640 isa<ConstantSDNode>(Callee) ||
5641 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5642 "Expecting a global address, external symbol, absolute value, "
5643 "register or an indirect tail call when PC Relative calls are "
5645 // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5646 assert(CallOpc == PPCISD::TC_RETURN &&
5647 "Unexpected call opcode for a tail call.");
5648 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5649 return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5652 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5653 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5654 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5655 Glue = Chain.getValue(1);
5657 // When performing tail call optimization the callee pops its arguments off
5658 // the stack. Account for this here so these bytes can be pushed back on in
5659 // PPCFrameLowering::eliminateCallFramePseudoInstr.
5660 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5661 getTargetMachine().Options.GuaranteedTailCallOpt)
5665 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5666 DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5668 Glue = Chain.getValue(1);
5670 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5675 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5676 SmallVectorImpl<SDValue> &InVals) const {
5677 SelectionDAG &DAG = CLI.DAG;
5679 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5680 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5681 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
5682 SDValue Chain = CLI.Chain;
5683 SDValue Callee = CLI.Callee;
5684 bool &isTailCall = CLI.IsTailCall;
5685 CallingConv::ID CallConv = CLI.CallConv;
5686 bool isVarArg = CLI.IsVarArg;
5687 bool isPatchPoint = CLI.IsPatchPoint;
5688 const CallBase *CB = CLI.CB;
5691 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5693 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5694 isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5695 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5697 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5701 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5704 // PC Relative calls no longer guarantee that the callee is a Global
5705 // Address Node. The callee could be an indirect tail call in which
5706 // case the SDValue for the callee could be a load (to load the address
5707 // of a function pointer) or it may be a register copy (to move the
5708 // address of the callee from a function parameter into a virtual
5709 // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5710 assert((Subtarget.isUsingPCRelativeCalls() ||
5711 isa<GlobalAddressSDNode>(Callee)) &&
5712 "Callee should be an llvm::Function object.");
5714 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5715 << "\nTCO callee: ");
5716 LLVM_DEBUG(Callee.dump());
5720 if (!isTailCall && CB && CB->isMustTailCall())
5721 report_fatal_error("failed to perform tail call elimination on a call "
5722 "site marked musttail");
5724 // When long calls (i.e. indirect calls) are always used, calls are always
5725 // made via function pointer. If we have a function name, first translate it
5727 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5729 Callee = LowerGlobalAddress(Callee, DAG);
5732 CallConv, isTailCall, isVarArg, isPatchPoint,
5733 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5735 Subtarget.is64BitELFABI() &&
5736 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5739 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5740 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5743 if (Subtarget.isSVR4ABI())
5744 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5747 if (Subtarget.isAIXABI())
5748 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5751 return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5755 SDValue PPCTargetLowering::LowerCall_32SVR4(
5756 SDValue Chain, SDValue Callee, CallFlags CFlags,
5757 const SmallVectorImpl<ISD::OutputArg> &Outs,
5758 const SmallVectorImpl<SDValue> &OutVals,
5759 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5760 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5761 const CallBase *CB) const {
5762 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5763 // of the 32-bit SVR4 ABI stack frame layout.
5765 const CallingConv::ID CallConv = CFlags.CallConv;
5766 const bool IsVarArg = CFlags.IsVarArg;
5767 const bool IsTailCall = CFlags.IsTailCall;
5769 assert((CallConv == CallingConv::C ||
5770 CallConv == CallingConv::Cold ||
5771 CallConv == CallingConv::Fast) && "Unknown calling convention!");
5773 const Align PtrAlign(4);
5775 MachineFunction &MF = DAG.getMachineFunction();
5777 // Mark this function as potentially containing a function that contains a
5778 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5779 // and restoring the callers stack pointer in this functions epilog. This is
5780 // done because by tail calling the called function might overwrite the value
5781 // in this function's (MF) stack pointer stack slot 0(SP).
5782 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5783 CallConv == CallingConv::Fast)
5784 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5786 // Count how many bytes are to be pushed on the stack, including the linkage
5787 // area, parameter list area and the part of the local variable space which
5788 // contains copies of aggregates which are passed by value.
5790 // Assign locations to all of the outgoing arguments.
5791 SmallVector<CCValAssign, 16> ArgLocs;
5792 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5794 // Reserve space for the linkage area on the stack.
5795 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5798 CCInfo.PreAnalyzeCallOperands(Outs);
5801 // Handle fixed and variable vector arguments differently.
5802 // Fixed vector arguments go into registers as long as registers are
5803 // available. Variable vector arguments always go into memory.
5804 unsigned NumArgs = Outs.size();
5806 for (unsigned i = 0; i != NumArgs; ++i) {
5807 MVT ArgVT = Outs[i].VT;
5808 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5811 if (Outs[i].IsFixed) {
5812 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5815 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5821 errs() << "Call operand #" << i << " has unhandled type "
5822 << EVT(ArgVT).getEVTString() << "\n";
5824 llvm_unreachable(nullptr);
5828 // All arguments are treated the same.
5829 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5831 CCInfo.clearWasPPCF128();
5833 // Assign locations to all of the outgoing aggregate by value arguments.
5834 SmallVector<CCValAssign, 16> ByValArgLocs;
5835 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5837 // Reserve stack space for the allocations in CCInfo.
5838 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5840 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5842 // Size of the linkage area, parameter list area and the part of the local
5843 // space variable where copies of aggregates which are passed by value are
5845 unsigned NumBytes = CCByValInfo.getNextStackOffset();
5847 // Calculate by how many bytes the stack has to be adjusted in case of tail
5848 // call optimization.
5849 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5851 // Adjust the stack pointer for the new arguments...
5852 // These operations are automatically eliminated by the prolog/epilog pass
5853 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5854 SDValue CallSeqStart = Chain;
5856 // Load the return address and frame pointer so it can be moved somewhere else
5859 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5861 // Set up a copy of the stack pointer for use loading and storing any
5862 // arguments that may not fit in the registers available for argument
5864 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5866 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5867 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5868 SmallVector<SDValue, 8> MemOpChains;
5870 bool seenFloatArg = false;
5871 // Walk the register/memloc assignments, inserting copies/loads.
5872 // i - Tracks the index into the list of registers allocated for the call
5873 // RealArgIdx - Tracks the index into the list of actual function arguments
5874 // j - Tracks the index into the list of byval arguments
5875 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5877 ++i, ++RealArgIdx) {
5878 CCValAssign &VA = ArgLocs[i];
5879 SDValue Arg = OutVals[RealArgIdx];
5880 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5882 if (Flags.isByVal()) {
5883 // Argument is an aggregate which is passed by value, thus we need to
5884 // create a copy of it in the local variable space of the current stack
5885 // frame (which is the stack frame of the caller) and pass the address of
5886 // this copy to the callee.
5887 assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5888 CCValAssign &ByValVA = ByValArgLocs[j++];
5889 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5891 // Memory reserved in the local variable space of the callers stack frame.
5892 unsigned LocMemOffset = ByValVA.getLocMemOffset();
5894 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5895 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5898 // Create a copy of the argument in the local area of the current
5900 SDValue MemcpyCall =
5901 CreateCopyOfByValArgument(Arg, PtrOff,
5902 CallSeqStart.getNode()->getOperand(0),
5905 // This must go outside the CALLSEQ_START..END.
5906 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5908 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5909 NewCallSeqStart.getNode());
5910 Chain = CallSeqStart = NewCallSeqStart;
5912 // Pass the address of the aggregate copy on the stack either in a
5913 // physical register or in the parameter list area of the current stack
5914 // frame to the callee.
5918 // When useCRBits() is true, there can be i1 arguments.
5919 // It is because getRegisterType(MVT::i1) => MVT::i1,
5920 // and for other integer types getRegisterType() => MVT::i32.
5921 // Extend i1 and ensure callee will get i32.
5922 if (Arg.getValueType() == MVT::i1)
5923 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5926 if (VA.isRegLoc()) {
5927 seenFloatArg |= VA.getLocVT().isFloatingPoint();
5928 // Put argument in a physical register.
5929 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5930 bool IsLE = Subtarget.isLittleEndian();
5931 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5932 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5933 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5934 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5935 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5936 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5939 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5941 // Put argument in the parameter list area of the current stack frame.
5942 assert(VA.isMemLoc());
5943 unsigned LocMemOffset = VA.getLocMemOffset();
5946 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5947 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5950 MemOpChains.push_back(
5951 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5953 // Calculate and remember argument location.
5954 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5960 if (!MemOpChains.empty())
5961 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5963 // Build a sequence of copy-to-reg nodes chained together with token chain
5964 // and flag operands which copy the outgoing args into the appropriate regs.
5966 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5967 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5968 RegsToPass[i].second, InFlag);
5969 InFlag = Chain.getValue(1);
5972 // Set CR bit 6 to true if this is a vararg call with floating args passed in
5975 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5976 SDValue Ops[] = { Chain, InFlag };
5978 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5979 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5981 InFlag = Chain.getValue(1);
5985 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5988 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5989 Callee, SPDiff, NumBytes, Ins, InVals, CB);
5992 // Copy an argument into memory, being careful to do this outside the
5993 // call sequence for the call to which the argument belongs.
5994 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5995 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5996 SelectionDAG &DAG, const SDLoc &dl) const {
5997 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5998 CallSeqStart.getNode()->getOperand(0),
6000 // The MEMCPY must go outside the CALLSEQ_START..END.
6001 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
6002 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
6004 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6005 NewCallSeqStart.getNode());
6006 return NewCallSeqStart;
6009 SDValue PPCTargetLowering::LowerCall_64SVR4(
6010 SDValue Chain, SDValue Callee, CallFlags CFlags,
6011 const SmallVectorImpl<ISD::OutputArg> &Outs,
6012 const SmallVectorImpl<SDValue> &OutVals,
6013 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6014 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6015 const CallBase *CB) const {
6016 bool isELFv2ABI = Subtarget.isELFv2ABI();
6017 bool isLittleEndian = Subtarget.isLittleEndian();
6018 unsigned NumOps = Outs.size();
6019 bool IsSibCall = false;
6020 bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
6022 EVT PtrVT = getPointerTy(DAG.getDataLayout());
6023 unsigned PtrByteSize = 8;
6025 MachineFunction &MF = DAG.getMachineFunction();
6027 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
6030 // Mark this function as potentially containing a function that contains a
6031 // tail call. As a consequence the frame pointer will be used for dynamicalloc
6032 // and restoring the callers stack pointer in this functions epilog. This is
6033 // done because by tail calling the called function might overwrite the value
6034 // in this function's (MF) stack pointer stack slot 0(SP).
6035 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6036 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6038 assert(!(IsFastCall && CFlags.IsVarArg) &&
6039 "fastcc not supported on varargs functions");
6041 // Count how many bytes are to be pushed on the stack, including the linkage
6042 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
6043 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
6044 // area is 32 bytes reserved space for [SP][CR][LR][TOC].
6045 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6046 unsigned NumBytes = LinkageSize;
6047 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6048 unsigned &QFPR_idx = FPR_idx;
6050 static const MCPhysReg GPR[] = {
6051 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6052 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6054 static const MCPhysReg VR[] = {
6055 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6056 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6059 const unsigned NumGPRs = array_lengthof(GPR);
6060 const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
6061 const unsigned NumVRs = array_lengthof(VR);
6062 const unsigned NumQFPRs = NumFPRs;
6064 // On ELFv2, we can avoid allocating the parameter area if all the arguments
6065 // can be passed to the callee in registers.
6066 // For the fast calling convention, there is another check below.
6067 // Note: We should keep consistent with LowerFormalArguments_64SVR4()
6068 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6069 if (!HasParameterArea) {
6070 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6071 unsigned AvailableFPRs = NumFPRs;
6072 unsigned AvailableVRs = NumVRs;
6073 unsigned NumBytesTmp = NumBytes;
6074 for (unsigned i = 0; i != NumOps; ++i) {
6075 if (Outs[i].Flags.isNest()) continue;
6076 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
6077 PtrByteSize, LinkageSize, ParamAreaSize,
6078 NumBytesTmp, AvailableFPRs, AvailableVRs,
6079 Subtarget.hasQPX()))
6080 HasParameterArea = true;
6084 // When using the fast calling convention, we don't provide backing for
6085 // arguments that will be in registers.
6086 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6088 // Avoid allocating parameter area for fastcc functions if all the arguments
6089 // can be passed in the registers.
6091 HasParameterArea = false;
6093 // Add up all the space actually used.
6094 for (unsigned i = 0; i != NumOps; ++i) {
6095 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6096 EVT ArgVT = Outs[i].VT;
6097 EVT OrigVT = Outs[i].ArgVT;
6103 if (Flags.isByVal()) {
6104 NumGPRsUsed += (Flags.getByValSize()+7)/8;
6105 if (NumGPRsUsed > NumGPRs)
6106 HasParameterArea = true;
6108 switch (ArgVT.getSimpleVT().SimpleTy) {
6109 default: llvm_unreachable("Unexpected ValueType for argument!");
6113 if (++NumGPRsUsed <= NumGPRs)
6123 if (++NumVRsUsed <= NumVRs)
6127 // When using QPX, this is handled like a FP register, otherwise, it
6128 // is an Altivec register.
6129 if (Subtarget.hasQPX()) {
6130 if (++NumFPRsUsed <= NumFPRs)
6133 if (++NumVRsUsed <= NumVRs)
6139 case MVT::v4f64: // QPX
6140 case MVT::v4i1: // QPX
6141 if (++NumFPRsUsed <= NumFPRs)
6145 HasParameterArea = true;
6149 /* Respect alignment of argument on the stack. */
6151 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6152 NumBytes = alignTo(NumBytes, Alignement);
6154 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6155 if (Flags.isInConsecutiveRegsLast())
6156 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6159 unsigned NumBytesActuallyUsed = NumBytes;
6161 // In the old ELFv1 ABI,
6162 // the prolog code of the callee may store up to 8 GPR argument registers to
6163 // the stack, allowing va_start to index over them in memory if its varargs.
6164 // Because we cannot tell if this is needed on the caller side, we have to
6165 // conservatively assume that it is needed. As such, make sure we have at
6166 // least enough stack space for the caller to store the 8 GPRs.
6167 // In the ELFv2 ABI, we allocate the parameter area iff a callee
6168 // really requires memory operands, e.g. a vararg function.
6169 if (HasParameterArea)
6170 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6172 NumBytes = LinkageSize;
6174 // Tail call needs the stack to be aligned.
6175 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6176 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6180 // Calculate by how many bytes the stack has to be adjusted in case of tail
6181 // call optimization.
6183 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6185 // To protect arguments on the stack from being clobbered in a tail call,
6186 // force all the loads to happen before doing any other lowering.
6187 if (CFlags.IsTailCall)
6188 Chain = DAG.getStackArgumentTokenFactor(Chain);
6190 // Adjust the stack pointer for the new arguments...
6191 // These operations are automatically eliminated by the prolog/epilog pass
6193 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6194 SDValue CallSeqStart = Chain;
6196 // Load the return address and frame pointer so it can be move somewhere else
6199 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6201 // Set up a copy of the stack pointer for use loading and storing any
6202 // arguments that may not fit in the registers available for argument
6204 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6206 // Figure out which arguments are going to go in registers, and which in
6207 // memory. Also, if this is a vararg function, floating point operations
6208 // must be stored to our stack, and loaded into integer regs as well, if
6209 // any integer regs are available for argument passing.
6210 unsigned ArgOffset = LinkageSize;
6212 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6213 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6215 SmallVector<SDValue, 8> MemOpChains;
6216 for (unsigned i = 0; i != NumOps; ++i) {
6217 SDValue Arg = OutVals[i];
6218 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6219 EVT ArgVT = Outs[i].VT;
6220 EVT OrigVT = Outs[i].ArgVT;
6222 // PtrOff will be used to store the current argument to the stack if a
6223 // register cannot be found for it.
6226 // We re-align the argument offset for each argument, except when using the
6227 // fast calling convention, when we need to make sure we do that only when
6228 // we'll actually use a stack slot.
6229 auto ComputePtrOff = [&]() {
6230 /* Respect alignment of argument on the stack. */
6232 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6233 ArgOffset = alignTo(ArgOffset, Alignment);
6235 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6237 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6243 /* Compute GPR index associated with argument offset. */
6244 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6245 GPR_idx = std::min(GPR_idx, NumGPRs);
6248 // Promote integers to 64-bit values.
6249 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6250 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6251 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6252 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6255 // FIXME memcpy is used way more than necessary. Correctness first.
6256 // Note: "by value" is code for passing a structure by value, not
6258 if (Flags.isByVal()) {
6259 // Note: Size includes alignment padding, so
6260 // struct x { short a; char b; }
6261 // will have Size = 4. With #pragma pack(1), it will have Size = 3.
6262 // These are the proper values we need for right-justifying the
6263 // aggregate in a parameter register.
6264 unsigned Size = Flags.getByValSize();
6266 // An empty aggregate parameter takes up no storage and no
6274 // All aggregates smaller than 8 bytes must be passed right-justified.
6275 if (Size==1 || Size==2 || Size==4) {
6276 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6277 if (GPR_idx != NumGPRs) {
6278 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6279 MachinePointerInfo(), VT);
6280 MemOpChains.push_back(Load.getValue(1));
6281 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6283 ArgOffset += PtrByteSize;
6288 if (GPR_idx == NumGPRs && Size < 8) {
6289 SDValue AddPtr = PtrOff;
6290 if (!isLittleEndian) {
6291 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6292 PtrOff.getValueType());
6293 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6295 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6298 ArgOffset += PtrByteSize;
6301 // Copy entire object into memory. There are cases where gcc-generated
6302 // code assumes it is there, even if it could be put entirely into
6303 // registers. (This is not what the doc says.)
6305 // FIXME: The above statement is likely due to a misunderstanding of the
6306 // documents. All arguments must be copied into the parameter area BY
6307 // THE CALLEE in the event that the callee takes the address of any
6308 // formal argument. That has not yet been implemented. However, it is
6309 // reasonable to use the stack area as a staging area for the register
6312 // Skip this for small aggregates, as we will use the same slot for a
6313 // right-justified copy, below.
6315 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6319 // When a register is available, pass a small aggregate right-justified.
6320 if (Size < 8 && GPR_idx != NumGPRs) {
6321 // The easiest way to get this right-justified in a register
6322 // is to copy the structure into the rightmost portion of a
6323 // local variable slot, then load the whole slot into the
6325 // FIXME: The memcpy seems to produce pretty awful code for
6326 // small aggregates, particularly for packed ones.
6327 // FIXME: It would be preferable to use the slot in the
6328 // parameter save area instead of a new local variable.
6329 SDValue AddPtr = PtrOff;
6330 if (!isLittleEndian) {
6331 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6332 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6334 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6338 // Load the slot into the register.
6340 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6341 MemOpChains.push_back(Load.getValue(1));
6342 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6344 // Done with this argument.
6345 ArgOffset += PtrByteSize;
6349 // For aggregates larger than PtrByteSize, copy the pieces of the
6350 // object that fit into registers from the parameter save area.
6351 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6352 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6353 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6354 if (GPR_idx != NumGPRs) {
6356 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6357 MemOpChains.push_back(Load.getValue(1));
6358 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6359 ArgOffset += PtrByteSize;
6361 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6368 switch (Arg.getSimpleValueType().SimpleTy) {
6369 default: llvm_unreachable("Unexpected ValueType for argument!");
6373 if (Flags.isNest()) {
6374 // The 'nest' parameter, if any, is passed in R11.
6375 RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6379 // These can be scalar arguments or elements of an integer array type
6380 // passed directly. Clang may use those instead of "byval" aggregate
6381 // types to avoid forcing arguments to memory unnecessarily.
6382 if (GPR_idx != NumGPRs) {
6383 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6388 assert(HasParameterArea &&
6389 "Parameter area must exist to pass an argument in memory.");
6390 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6391 true, CFlags.IsTailCall, false, MemOpChains,
6392 TailCallArguments, dl);
6394 ArgOffset += PtrByteSize;
6397 ArgOffset += PtrByteSize;
6401 // These can be scalar arguments or elements of a float array type
6402 // passed directly. The latter are used to implement ELFv2 homogenous
6403 // float aggregates.
6405 // Named arguments go into FPRs first, and once they overflow, the
6406 // remaining arguments go into GPRs and then the parameter save area.
6407 // Unnamed arguments for vararg functions always go to GPRs and
6408 // then the parameter save area. For now, put all arguments to vararg
6409 // routines always in both locations (FPR *and* GPR or stack slot).
6410 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6411 bool NeededLoad = false;
6413 // First load the argument into the next available FPR.
6414 if (FPR_idx != NumFPRs)
6415 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6417 // Next, load the argument into GPR or stack slot if needed.
6418 if (!NeedGPROrStack)
6420 else if (GPR_idx != NumGPRs && !IsFastCall) {
6421 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6422 // once we support fp <-> gpr moves.
6424 // In the non-vararg case, this can only ever happen in the
6425 // presence of f32 array types, since otherwise we never run
6426 // out of FPRs before running out of GPRs.
6429 // Double values are always passed in a single GPR.
6430 if (Arg.getValueType() != MVT::f32) {
6431 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6433 // Non-array float values are extended and passed in a GPR.
6434 } else if (!Flags.isInConsecutiveRegs()) {
6435 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6436 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6438 // If we have an array of floats, we collect every odd element
6439 // together with its predecessor into one GPR.
6440 } else if (ArgOffset % PtrByteSize != 0) {
6442 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6443 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6444 if (!isLittleEndian)
6446 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6448 // The final element, if even, goes into the first half of a GPR.
6449 } else if (Flags.isInConsecutiveRegsLast()) {
6450 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6451 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6452 if (!isLittleEndian)
6453 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6454 DAG.getConstant(32, dl, MVT::i32));
6456 // Non-final even elements are skipped; they will be handled
6457 // together the with subsequent argument on the next go-around.
6461 if (ArgVal.getNode())
6462 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6467 // Single-precision floating-point values are mapped to the
6468 // second (rightmost) word of the stack doubleword.
6469 if (Arg.getValueType() == MVT::f32 &&
6470 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6471 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6472 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6475 assert(HasParameterArea &&
6476 "Parameter area must exist to pass an argument in memory.");
6477 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6478 true, CFlags.IsTailCall, false, MemOpChains,
6479 TailCallArguments, dl);
6483 // When passing an array of floats, the array occupies consecutive
6484 // space in the argument area; only round up to the next doubleword
6485 // at the end of the array. Otherwise, each float takes 8 bytes.
6486 if (!IsFastCall || NeededLoad) {
6487 ArgOffset += (Arg.getValueType() == MVT::f32 &&
6488 Flags.isInConsecutiveRegs()) ? 4 : 8;
6489 if (Flags.isInConsecutiveRegsLast())
6490 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6502 if (!Subtarget.hasQPX()) {
6503 // These can be scalar arguments or elements of a vector array type
6504 // passed directly. The latter are used to implement ELFv2 homogenous
6505 // vector aggregates.
6507 // For a varargs call, named arguments go into VRs or on the stack as
6508 // usual; unnamed arguments always go to the stack or the corresponding
6509 // GPRs when within range. For now, we always put the value in both
6510 // locations (or even all three).
6511 if (CFlags.IsVarArg) {
6512 assert(HasParameterArea &&
6513 "Parameter area must exist if we have a varargs call.");
6514 // We could elide this store in the case where the object fits
6515 // entirely in R registers. Maybe later.
6517 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6518 MemOpChains.push_back(Store);
6519 if (VR_idx != NumVRs) {
6521 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6522 MemOpChains.push_back(Load.getValue(1));
6523 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6526 for (unsigned i=0; i<16; i+=PtrByteSize) {
6527 if (GPR_idx == NumGPRs)
6529 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6530 DAG.getConstant(i, dl, PtrVT));
6532 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6533 MemOpChains.push_back(Load.getValue(1));
6534 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6539 // Non-varargs Altivec params go into VRs or on the stack.
6540 if (VR_idx != NumVRs) {
6541 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6546 assert(HasParameterArea &&
6547 "Parameter area must exist to pass an argument in memory.");
6548 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6549 true, CFlags.IsTailCall, true, MemOpChains,
6550 TailCallArguments, dl);
6560 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6561 "Invalid QPX parameter type");
6566 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6567 if (CFlags.IsVarArg) {
6568 assert(HasParameterArea &&
6569 "Parameter area must exist if we have a varargs call.");
6570 // We could elide this store in the case where the object fits
6571 // entirely in R registers. Maybe later.
6573 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6574 MemOpChains.push_back(Store);
6575 if (QFPR_idx != NumQFPRs) {
6576 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6577 PtrOff, MachinePointerInfo());
6578 MemOpChains.push_back(Load.getValue(1));
6579 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6581 ArgOffset += (IsF32 ? 16 : 32);
6582 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6583 if (GPR_idx == NumGPRs)
6585 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6586 DAG.getConstant(i, dl, PtrVT));
6588 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6589 MemOpChains.push_back(Load.getValue(1));
6590 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6595 // Non-varargs QPX params go into registers or on the stack.
6596 if (QFPR_idx != NumQFPRs) {
6597 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6602 assert(HasParameterArea &&
6603 "Parameter area must exist to pass an argument in memory.");
6604 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6605 true, CFlags.IsTailCall, true, MemOpChains,
6606 TailCallArguments, dl);
6608 ArgOffset += (IsF32 ? 16 : 32);
6612 ArgOffset += (IsF32 ? 16 : 32);
6618 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6619 "mismatch in size of parameter area");
6620 (void)NumBytesActuallyUsed;
6622 if (!MemOpChains.empty())
6623 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6625 // Check if this is an indirect call (MTCTR/BCTRL).
6626 // See prepareDescriptorIndirectCall and buildCallOperands for more
6627 // information about calls through function pointers in the 64-bit SVR4 ABI.
6628 if (CFlags.IsIndirect) {
6629 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6630 // caller in the TOC save area.
6631 if (isTOCSaveRestoreRequired(Subtarget)) {
6632 assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6633 // Load r2 into a virtual register and store it to the TOC save area.
6634 setUsesTOCBasePtr(DAG);
6635 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6636 // TOC save area offset.
6637 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6638 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6639 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6640 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6641 MachinePointerInfo::getStack(
6642 DAG.getMachineFunction(), TOCSaveOffset));
6644 // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6645 // This does not mean the MTCTR instruction must use R12; it's easier
6646 // to model this as an extra parameter, so do that.
6647 if (isELFv2ABI && !CFlags.IsPatchPoint)
6648 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6651 // Build a sequence of copy-to-reg nodes chained together with token chain
6652 // and flag operands which copy the outgoing args into the appropriate regs.
6654 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6655 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6656 RegsToPass[i].second, InFlag);
6657 InFlag = Chain.getValue(1);
6660 if (CFlags.IsTailCall && !IsSibCall)
6661 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6664 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6665 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6668 SDValue PPCTargetLowering::LowerCall_Darwin(
6669 SDValue Chain, SDValue Callee, CallFlags CFlags,
6670 const SmallVectorImpl<ISD::OutputArg> &Outs,
6671 const SmallVectorImpl<SDValue> &OutVals,
6672 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6673 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6674 const CallBase *CB) const {
6675 unsigned NumOps = Outs.size();
6677 EVT PtrVT = getPointerTy(DAG.getDataLayout());
6678 bool isPPC64 = PtrVT == MVT::i64;
6679 unsigned PtrByteSize = isPPC64 ? 8 : 4;
6681 MachineFunction &MF = DAG.getMachineFunction();
6683 // Mark this function as potentially containing a function that contains a
6684 // tail call. As a consequence the frame pointer will be used for dynamicalloc
6685 // and restoring the callers stack pointer in this functions epilog. This is
6686 // done because by tail calling the called function might overwrite the value
6687 // in this function's (MF) stack pointer stack slot 0(SP).
6688 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6689 CFlags.CallConv == CallingConv::Fast)
6690 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6692 // Count how many bytes are to be pushed on the stack, including the linkage
6693 // area, and parameter passing area. We start with 24/48 bytes, which is
6694 // prereserved space for [SP][CR][LR][3 x unused].
6695 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6696 unsigned NumBytes = LinkageSize;
6698 // Add up all the space actually used.
6699 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6700 // they all go in registers, but we must reserve stack space for them for
6701 // possible use by the caller. In varargs or 64-bit calls, parameters are
6702 // assigned stack space in order, with padding so Altivec parameters are
6704 unsigned nAltivecParamsAtEnd = 0;
6705 for (unsigned i = 0; i != NumOps; ++i) {
6706 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6707 EVT ArgVT = Outs[i].VT;
6708 // Varargs Altivec parameters are padded to a 16 byte boundary.
6709 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6710 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6711 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6712 if (!CFlags.IsVarArg && !isPPC64) {
6713 // Non-varargs Altivec parameters go after all the non-Altivec
6714 // parameters; handle those later so we know how much padding we need.
6715 nAltivecParamsAtEnd++;
6718 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6719 NumBytes = ((NumBytes+15)/16)*16;
6721 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6724 // Allow for Altivec parameters at the end, if needed.
6725 if (nAltivecParamsAtEnd) {
6726 NumBytes = ((NumBytes+15)/16)*16;
6727 NumBytes += 16*nAltivecParamsAtEnd;
6730 // The prolog code of the callee may store up to 8 GPR argument registers to
6731 // the stack, allowing va_start to index over them in memory if its varargs.
6732 // Because we cannot tell if this is needed on the caller side, we have to
6733 // conservatively assume that it is needed. As such, make sure we have at
6734 // least enough stack space for the caller to store the 8 GPRs.
6735 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6737 // Tail call needs the stack to be aligned.
6738 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6739 CFlags.CallConv == CallingConv::Fast)
6740 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6742 // Calculate by how many bytes the stack has to be adjusted in case of tail
6743 // call optimization.
6744 int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6746 // To protect arguments on the stack from being clobbered in a tail call,
6747 // force all the loads to happen before doing any other lowering.
6748 if (CFlags.IsTailCall)
6749 Chain = DAG.getStackArgumentTokenFactor(Chain);
6751 // Adjust the stack pointer for the new arguments...
6752 // These operations are automatically eliminated by the prolog/epilog pass
6753 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6754 SDValue CallSeqStart = Chain;
6756 // Load the return address and frame pointer so it can be move somewhere else
6759 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6761 // Set up a copy of the stack pointer for use loading and storing any
6762 // arguments that may not fit in the registers available for argument
6766 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6768 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6770 // Figure out which arguments are going to go in registers, and which in
6771 // memory. Also, if this is a vararg function, floating point operations
6772 // must be stored to our stack, and loaded into integer regs as well, if
6773 // any integer regs are available for argument passing.
6774 unsigned ArgOffset = LinkageSize;
6775 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6777 static const MCPhysReg GPR_32[] = { // 32-bit registers.
6778 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6779 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6781 static const MCPhysReg GPR_64[] = { // 64-bit registers.
6782 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6783 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6785 static const MCPhysReg VR[] = {
6786 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6787 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6789 const unsigned NumGPRs = array_lengthof(GPR_32);
6790 const unsigned NumFPRs = 13;
6791 const unsigned NumVRs = array_lengthof(VR);
6793 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6795 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6796 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6798 SmallVector<SDValue, 8> MemOpChains;
6799 for (unsigned i = 0; i != NumOps; ++i) {
6800 SDValue Arg = OutVals[i];
6801 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6803 // PtrOff will be used to store the current argument to the stack if a
6804 // register cannot be found for it.
6807 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6809 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6811 // On PPC64, promote integers to 64-bit values.
6812 if (isPPC64 && Arg.getValueType() == MVT::i32) {
6813 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6814 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6815 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6818 // FIXME memcpy is used way more than necessary. Correctness first.
6819 // Note: "by value" is code for passing a structure by value, not
6821 if (Flags.isByVal()) {
6822 unsigned Size = Flags.getByValSize();
6823 // Very small objects are passed right-justified. Everything else is
6824 // passed left-justified.
6825 if (Size==1 || Size==2) {
6826 EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6827 if (GPR_idx != NumGPRs) {
6828 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6829 MachinePointerInfo(), VT);
6830 MemOpChains.push_back(Load.getValue(1));
6831 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6833 ArgOffset += PtrByteSize;
6835 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6836 PtrOff.getValueType());
6837 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6838 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6841 ArgOffset += PtrByteSize;
6845 // Copy entire object into memory. There are cases where gcc-generated
6846 // code assumes it is there, even if it could be put entirely into
6847 // registers. (This is not what the doc says.)
6848 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6852 // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6853 // copy the pieces of the object that fit into registers from the
6854 // parameter save area.
6855 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6856 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6857 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6858 if (GPR_idx != NumGPRs) {
6860 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6861 MemOpChains.push_back(Load.getValue(1));
6862 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6863 ArgOffset += PtrByteSize;
6865 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6872 switch (Arg.getSimpleValueType().SimpleTy) {
6873 default: llvm_unreachable("Unexpected ValueType for argument!");
6877 if (GPR_idx != NumGPRs) {
6878 if (Arg.getValueType() == MVT::i1)
6879 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6881 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6883 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6884 isPPC64, CFlags.IsTailCall, false, MemOpChains,
6885 TailCallArguments, dl);
6887 ArgOffset += PtrByteSize;
6891 if (FPR_idx != NumFPRs) {
6892 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6894 if (CFlags.IsVarArg) {
6896 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6897 MemOpChains.push_back(Store);
6899 // Float varargs are always shadowed in available integer registers
6900 if (GPR_idx != NumGPRs) {
6902 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6903 MemOpChains.push_back(Load.getValue(1));
6904 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6906 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6907 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6908 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6910 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6911 MemOpChains.push_back(Load.getValue(1));
6912 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6915 // If we have any FPRs remaining, we may also have GPRs remaining.
6916 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6918 if (GPR_idx != NumGPRs)
6920 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6921 !isPPC64) // PPC64 has 64-bit GPR's obviously :)
6925 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6926 isPPC64, CFlags.IsTailCall, false, MemOpChains,
6927 TailCallArguments, dl);
6931 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6937 if (CFlags.IsVarArg) {
6938 // These go aligned on the stack, or in the corresponding R registers
6939 // when within range. The Darwin PPC ABI doc claims they also go in
6940 // V registers; in fact gcc does this only for arguments that are
6941 // prototyped, not for those that match the ... We do it for all
6942 // arguments, seems to work.
6943 while (ArgOffset % 16 !=0) {
6944 ArgOffset += PtrByteSize;
6945 if (GPR_idx != NumGPRs)
6948 // We could elide this store in the case where the object fits
6949 // entirely in R registers. Maybe later.
6950 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6951 DAG.getConstant(ArgOffset, dl, PtrVT));
6953 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6954 MemOpChains.push_back(Store);
6955 if (VR_idx != NumVRs) {
6957 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6958 MemOpChains.push_back(Load.getValue(1));
6959 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6962 for (unsigned i=0; i<16; i+=PtrByteSize) {
6963 if (GPR_idx == NumGPRs)
6965 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6966 DAG.getConstant(i, dl, PtrVT));
6968 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6969 MemOpChains.push_back(Load.getValue(1));
6970 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6975 // Non-varargs Altivec params generally go in registers, but have
6976 // stack space allocated at the end.
6977 if (VR_idx != NumVRs) {
6978 // Doesn't have GPR space allocated.
6979 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6980 } else if (nAltivecParamsAtEnd==0) {
6981 // We are emitting Altivec params in order.
6982 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6983 isPPC64, CFlags.IsTailCall, true, MemOpChains,
6984 TailCallArguments, dl);
6990 // If all Altivec parameters fit in registers, as they usually do,
6991 // they get stack space following the non-Altivec parameters. We
6992 // don't track this here because nobody below needs it.
6993 // If there are more Altivec parameters than fit in registers emit
6995 if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6997 // Offset is aligned; skip 1st 12 params which go in V registers.
6998 ArgOffset = ((ArgOffset+15)/16)*16;
7000 for (unsigned i = 0; i != NumOps; ++i) {
7001 SDValue Arg = OutVals[i];
7002 EVT ArgType = Outs[i].VT;
7003 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
7004 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
7007 // We are emitting Altivec params in order.
7008 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
7009 isPPC64, CFlags.IsTailCall, true, MemOpChains,
7010 TailCallArguments, dl);
7017 if (!MemOpChains.empty())
7018 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7020 // On Darwin, R12 must contain the address of an indirect callee. This does
7021 // not mean the MTCTR instruction must use R12; it's easier to model this as
7022 // an extra parameter, so do that.
7023 if (CFlags.IsIndirect) {
7024 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7025 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
7026 PPC::R12), Callee));
7029 // Build a sequence of copy-to-reg nodes chained together with token chain
7030 // and flag operands which copy the outgoing args into the appropriate regs.
7032 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
7033 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
7034 RegsToPass[i].second, InFlag);
7035 InFlag = Chain.getValue(1);
7038 if (CFlags.IsTailCall)
7039 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
7042 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7043 Callee, SPDiff, NumBytes, Ins, InVals, CB);
7046 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
7047 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
7050 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
7051 State.getMachineFunction().getSubtarget());
7052 const bool IsPPC64 = Subtarget.isPPC64();
7053 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
7054 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
7056 assert((!ValVT.isInteger() ||
7057 (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
7058 "Integer argument exceeds register size: should have been legalized");
7060 if (ValVT == MVT::f128)
7061 report_fatal_error("f128 is unimplemented on AIX.");
7063 if (ArgFlags.isNest())
7064 report_fatal_error("Nest arguments are unimplemented.");
7066 if (ValVT.isVector() || LocVT.isVector())
7067 report_fatal_error("Vector arguments are unimplemented on AIX.");
7069 static const MCPhysReg GPR_32[] = {// 32-bit registers.
7070 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7071 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7072 static const MCPhysReg GPR_64[] = {// 64-bit registers.
7073 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7074 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7076 if (ArgFlags.isByVal()) {
7077 if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
7078 report_fatal_error("Pass-by-value arguments with alignment greater than "
7079 "register width are not supported.");
7081 const unsigned ByValSize = ArgFlags.getByValSize();
7083 // An empty aggregate parameter takes up no storage and no registers,
7084 // but needs a MemLoc for a stack slot for the formal arguments side.
7085 if (ByValSize == 0) {
7086 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7087 State.getNextStackOffset(), RegVT,
7092 const unsigned StackSize = alignTo(ByValSize, PtrAlign);
7093 unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
7094 for (const unsigned E = Offset + StackSize; Offset < E;
7095 Offset += PtrAlign.value()) {
7096 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7097 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7099 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7100 Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
7108 // Arguments always reserve parameter save area.
7109 switch (ValVT.SimpleTy) {
7111 report_fatal_error("Unhandled value type for argument.");
7113 // i64 arguments should have been split to i32 for PPC32.
7114 assert(IsPPC64 && "PPC32 should have split i64 values.");
7118 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
7119 // AIX integer arguments are always passed in register width.
7120 if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
7121 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
7122 : CCValAssign::LocInfo::ZExt;
7123 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7124 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7126 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
7132 // Parameter save area (PSA) is reserved even if the float passes in fpr.
7133 const unsigned StoreSize = LocVT.getStoreSize();
7134 // Floats are always 4-byte aligned in the PSA on AIX.
7135 // This includes f64 in 64-bit mode for ABI compatibility.
7136 const unsigned Offset =
7137 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
7138 unsigned FReg = State.AllocateReg(FPR);
7140 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
7142 // Reserve and initialize GPRs or initialize the PSA as required.
7143 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
7144 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
7145 assert(FReg && "An FPR should be available when a GPR is reserved.");
7146 if (State.isVarArg()) {
7147 // Successfully reserved GPRs are only initialized for vararg calls.
7148 // Custom handling is required for:
7149 // f64 in PPC32 needs to be split into 2 GPRs.
7150 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
7152 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7155 // If there are insufficient GPRs, the PSA needs to be initialized.
7156 // Initialization occurs even if an FPR was initialized for
7157 // compatibility with the AIX XL compiler. The full memory for the
7158 // argument will be initialized even if a prior word is saved in GPR.
7159 // A custom memLoc is used when the argument also passes in FPR so
7160 // that the callee handling can skip over it easily.
7162 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
7164 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
7175 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
7177 assert((IsPPC64 || SVT != MVT::i64) &&
7178 "i64 should have been split for 32-bit codegen.");
7182 report_fatal_error("Unexpected value type for formal argument");
7186 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7188 return &PPC::F4RCRegClass;
7190 return &PPC::F8RCRegClass;
7194 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7195 SelectionDAG &DAG, SDValue ArgValue,
7196 MVT LocVT, const SDLoc &dl) {
7197 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7198 assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
7201 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7202 DAG.getValueType(ValVT));
7203 else if (Flags.isZExt())
7204 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7205 DAG.getValueType(ValVT));
7207 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7210 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7211 const unsigned LASize = FL->getLinkageSize();
7213 if (PPC::GPRCRegClass.contains(Reg)) {
7214 assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7215 "Reg must be a valid argument register!");
7216 return LASize + 4 * (Reg - PPC::R3);
7219 if (PPC::G8RCRegClass.contains(Reg)) {
7220 assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7221 "Reg must be a valid argument register!");
7222 return LASize + 8 * (Reg - PPC::X3);
7225 llvm_unreachable("Only general purpose registers expected.");
7228 // AIX ABI Stack Frame Layout:
7230 // Low Memory +--------------------------------------------+
7231 // SP +---> | Back chain | ---+
7232 // | +--------------------------------------------+ |
7233 // | | Saved Condition Register | |
7234 // | +--------------------------------------------+ |
7235 // | | Saved Linkage Register | |
7236 // | +--------------------------------------------+ | Linkage Area
7237 // | | Reserved for compilers | |
7238 // | +--------------------------------------------+ |
7239 // | | Reserved for binders | |
7240 // | +--------------------------------------------+ |
7241 // | | Saved TOC pointer | ---+
7242 // | +--------------------------------------------+
7243 // | | Parameter save area |
7244 // | +--------------------------------------------+
7245 // | | Alloca space |
7246 // | +--------------------------------------------+
7247 // | | Local variable space |
7248 // | +--------------------------------------------+
7249 // | | Float/int conversion temporary |
7250 // | +--------------------------------------------+
7251 // | | Save area for AltiVec registers |
7252 // | +--------------------------------------------+
7253 // | | AltiVec alignment padding |
7254 // | +--------------------------------------------+
7255 // | | Save area for VRSAVE register |
7256 // | +--------------------------------------------+
7257 // | | Save area for General Purpose registers |
7258 // | +--------------------------------------------+
7259 // | | Save area for Floating Point registers |
7260 // | +--------------------------------------------+
7261 // +---- | Back chain |
7262 // High Memory +--------------------------------------------+
7265 // AIX 7.2 Assembler Language Reference
7266 // Subroutine linkage convention
7268 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7269 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7270 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7271 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7273 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7274 CallConv == CallingConv::Fast) &&
7275 "Unexpected calling convention!");
7277 if (getTargetMachine().Options.GuaranteedTailCallOpt)
7278 report_fatal_error("Tail call support is unimplemented on AIX.");
7281 report_fatal_error("Soft float support is unimplemented on AIX.");
7283 const PPCSubtarget &Subtarget =
7284 static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7285 if (Subtarget.hasQPX())
7286 report_fatal_error("QPX support is not supported on AIX.");
7288 const bool IsPPC64 = Subtarget.isPPC64();
7289 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7291 // Assign locations to all of the incoming arguments.
7292 SmallVector<CCValAssign, 16> ArgLocs;
7293 MachineFunction &MF = DAG.getMachineFunction();
7294 MachineFrameInfo &MFI = MF.getFrameInfo();
7295 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7297 const EVT PtrVT = getPointerTy(MF.getDataLayout());
7298 // Reserve space for the linkage area on the stack.
7299 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7300 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7301 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7303 SmallVector<SDValue, 8> MemOps;
7305 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7306 CCValAssign &VA = ArgLocs[I++];
7307 MVT LocVT = VA.getLocVT();
7308 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7310 // For compatibility with the AIX XL compiler, the float args in the
7311 // parameter save area are initialized even if the argument is available
7312 // in register. The caller is required to initialize both the register
7313 // and memory, however, the callee can choose to expect it in either.
7314 // The memloc is dismissed here because the argument is retrieved from
7316 if (VA.isMemLoc() && VA.needsCustom())
7319 if (Flags.isByVal() && VA.isMemLoc()) {
7320 const unsigned Size =
7321 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7323 const int FI = MF.getFrameInfo().CreateFixedObject(
7324 Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7325 /* IsAliased */ true);
7326 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7327 InVals.push_back(FIN);
7332 if (Flags.isByVal()) {
7333 assert(VA.isRegLoc() && "MemLocs should already be handled.");
7335 const MCPhysReg ArgReg = VA.getLocReg();
7336 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7338 if (Flags.getNonZeroByValAlign() > PtrByteSize)
7339 report_fatal_error("Over aligned byvals not supported yet.");
7341 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7342 const int FI = MF.getFrameInfo().CreateFixedObject(
7343 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7344 /* IsAliased */ true);
7345 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7346 InVals.push_back(FIN);
7348 // Add live ins for all the RegLocs for the same ByVal.
7349 const TargetRegisterClass *RegClass =
7350 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7352 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7354 const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7355 // Since the callers side has left justified the aggregate in the
7356 // register, we can simply store the entire register into the stack
7358 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7359 // The store to the fixedstack object is needed becuase accessing a
7360 // field of the ByVal will use a gep and load. Ideally we will optimize
7361 // to extracting the value from the register directly, and elide the
7362 // stores when the arguments address is not taken, but that will need to
7365 DAG.getStore(CopyFrom.getValue(1), dl, CopyFrom,
7366 DAG.getObjectPtrOffset(dl, FIN, Offset),
7367 MachinePointerInfo::getFixedStack(MF, FI, Offset));
7369 MemOps.push_back(Store);
7372 unsigned Offset = 0;
7373 HandleRegLoc(VA.getLocReg(), Offset);
7374 Offset += PtrByteSize;
7375 for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7376 Offset += PtrByteSize) {
7377 assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7378 "RegLocs should be for ByVal argument.");
7380 const CCValAssign RL = ArgLocs[I++];
7381 HandleRegLoc(RL.getLocReg(), Offset);
7384 if (Offset != StackSize) {
7385 assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7386 "Expected MemLoc for remaining bytes.");
7387 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7388 // Consume the MemLoc.The InVal has already been emitted, so nothing
7389 // more needs to be done.
7396 EVT ValVT = VA.getValVT();
7397 if (VA.isRegLoc() && !VA.needsCustom()) {
7398 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7400 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7401 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7402 if (ValVT.isScalarInteger() &&
7403 (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7405 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7407 InVals.push_back(ArgValue);
7410 if (VA.isMemLoc()) {
7411 const unsigned LocSize = LocVT.getStoreSize();
7412 const unsigned ValSize = ValVT.getStoreSize();
7413 assert((ValSize <= LocSize) &&
7414 "Object size is larger than size of MemLoc");
7415 int CurArgOffset = VA.getLocMemOffset();
7416 // Objects are right-justified because AIX is big-endian.
7417 if (LocSize > ValSize)
7418 CurArgOffset += LocSize - ValSize;
7419 // Potential tail calls could cause overwriting of argument stack slots.
7420 const bool IsImmutable =
7421 !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7422 (CallConv == CallingConv::Fast));
7423 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7424 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7426 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7427 InVals.push_back(ArgValue);
7432 // On AIX a minimum of 8 words is saved to the parameter save area.
7433 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7434 // Area that is at least reserved in the caller of this function.
7435 unsigned CallerReservedArea =
7436 std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7438 // Set the size that is at least reserved in caller of this function. Tail
7439 // call optimized function's reserved stack space needs to be aligned so
7440 // that taking the difference between two stack areas will result in an
7442 CallerReservedArea =
7443 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7444 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7445 FuncInfo->setMinReservedArea(CallerReservedArea);
7448 FuncInfo->setVarArgsFrameIndex(
7449 MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7450 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7452 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7453 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7455 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7456 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7457 const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7459 // The fixed integer arguments of a variadic function are stored to the
7460 // VarArgsFrameIndex on the stack so that they may be loaded by
7461 // dereferencing the result of va_next.
7462 for (unsigned GPRIndex =
7463 (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7464 GPRIndex < NumGPArgRegs; ++GPRIndex) {
7466 const unsigned VReg =
7467 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7468 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7470 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7472 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7473 MemOps.push_back(Store);
7474 // Increment the address for the next argument to store.
7475 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7476 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7480 if (!MemOps.empty())
7481 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7486 SDValue PPCTargetLowering::LowerCall_AIX(
7487 SDValue Chain, SDValue Callee, CallFlags CFlags,
7488 const SmallVectorImpl<ISD::OutputArg> &Outs,
7489 const SmallVectorImpl<SDValue> &OutVals,
7490 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7491 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7492 const CallBase *CB) const {
7493 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7494 // AIX ABI stack frame layout.
7496 assert((CFlags.CallConv == CallingConv::C ||
7497 CFlags.CallConv == CallingConv::Cold ||
7498 CFlags.CallConv == CallingConv::Fast) &&
7499 "Unexpected calling convention!");
7501 if (CFlags.IsPatchPoint)
7502 report_fatal_error("This call type is unimplemented on AIX.");
7504 const PPCSubtarget& Subtarget =
7505 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7506 if (Subtarget.hasQPX())
7507 report_fatal_error("QPX is not supported on AIX.");
7508 if (Subtarget.hasAltivec())
7509 report_fatal_error("Altivec support is unimplemented on AIX.");
7511 MachineFunction &MF = DAG.getMachineFunction();
7512 SmallVector<CCValAssign, 16> ArgLocs;
7513 CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7516 // Reserve space for the linkage save area (LSA) on the stack.
7517 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7518 // [SP][CR][LR][2 x reserved][TOC].
7519 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7520 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7521 const bool IsPPC64 = Subtarget.isPPC64();
7522 const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7523 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7524 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7525 CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7527 // The prolog code of the callee may store up to 8 GPR argument registers to
7528 // the stack, allowing va_start to index over them in memory if the callee
7530 // Because we cannot tell if this is needed on the caller side, we have to
7531 // conservatively assume that it is needed. As such, make sure we have at
7532 // least enough stack space for the caller to store the 8 GPRs.
7533 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7534 const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7535 CCInfo.getNextStackOffset());
7537 // Adjust the stack pointer for the new arguments...
7538 // These operations are automatically eliminated by the prolog/epilog pass.
7539 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7540 SDValue CallSeqStart = Chain;
7542 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7543 SmallVector<SDValue, 8> MemOpChains;
7545 // Set up a copy of the stack pointer for loading and storing any
7546 // arguments that may not fit in the registers available for argument
7548 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7549 : DAG.getRegister(PPC::R1, MVT::i32);
7551 for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7552 const unsigned ValNo = ArgLocs[I].getValNo();
7553 SDValue Arg = OutVals[ValNo];
7554 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7556 if (Flags.isByVal()) {
7557 const unsigned ByValSize = Flags.getByValSize();
7559 // Nothing to do for zero-sized ByVals on the caller side.
7565 auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7566 return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7568 ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7570 MachinePointerInfo(), VT);
7573 unsigned LoadOffset = 0;
7575 // Initialize registers, which are fully occupied by the by-val argument.
7576 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7577 SDValue Load = GetLoad(PtrVT, LoadOffset);
7578 MemOpChains.push_back(Load.getValue(1));
7579 LoadOffset += PtrByteSize;
7580 const CCValAssign &ByValVA = ArgLocs[I++];
7581 assert(ByValVA.getValNo() == ValNo &&
7582 "Unexpected location for pass-by-value argument.");
7583 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7586 if (LoadOffset == ByValSize)
7589 // There must be one more loc to handle the remainder.
7590 assert(ArgLocs[I].getValNo() == ValNo &&
7591 "Expected additional location for by-value argument.");
7593 if (ArgLocs[I].isMemLoc()) {
7594 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7595 const CCValAssign &ByValVA = ArgLocs[I++];
7596 ISD::ArgFlagsTy MemcpyFlags = Flags;
7597 // Only memcpy the bytes that don't pass in register.
7598 MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7599 Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7600 (LoadOffset != 0) ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7602 DAG.getObjectPtrOffset(dl, StackPtr, ByValVA.getLocMemOffset()),
7603 CallSeqStart, MemcpyFlags, DAG, dl);
7607 // Initialize the final register residue.
7608 // Any residue that occupies the final by-val arg register must be
7609 // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7610 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7611 // 2 and 1 byte loads.
7612 const unsigned ResidueBytes = ByValSize % PtrByteSize;
7613 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7614 "Unexpected register residue for by-value argument.");
7616 for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7617 const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7620 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7621 SDValue Load = GetLoad(VT, LoadOffset);
7622 MemOpChains.push_back(Load.getValue(1));
7626 // By-val arguments are passed left-justfied in register.
7627 // Every load here needs to be shifted, otherwise a full register load
7628 // should have been used.
7629 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7630 "Unexpected load emitted during handling of pass-by-value "
7632 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7634 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7635 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7636 SDValue ShiftedLoad =
7637 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7638 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7643 const CCValAssign &ByValVA = ArgLocs[I++];
7644 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7648 CCValAssign &VA = ArgLocs[I++];
7649 const MVT LocVT = VA.getLocVT();
7650 const MVT ValVT = VA.getValVT();
7652 switch (VA.getLocInfo()) {
7654 report_fatal_error("Unexpected argument extension type.");
7655 case CCValAssign::Full:
7657 case CCValAssign::ZExt:
7658 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7660 case CCValAssign::SExt:
7661 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7665 if (VA.isRegLoc() && !VA.needsCustom()) {
7666 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7670 if (VA.isMemLoc()) {
7672 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7673 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7674 MemOpChains.push_back(
7675 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7680 // Custom handling is used for GPR initializations for vararg float
7682 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7683 ValVT.isFloatingPoint() && LocVT.isInteger() &&
7684 "Unexpected register handling for calling convention.");
7687 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7689 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7690 // f32 in 32-bit GPR
7691 // f64 in 64-bit GPR
7692 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7693 else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7694 // f32 in 64-bit GPR.
7695 RegsToPass.push_back(std::make_pair(
7696 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7698 // f64 in two 32-bit GPRs
7699 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7700 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7701 "Unexpected custom register for argument!");
7702 CCValAssign &GPR1 = VA;
7703 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7704 DAG.getConstant(32, dl, MVT::i8));
7705 RegsToPass.push_back(std::make_pair(
7706 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7709 // If only 1 GPR was available, there will only be one custom GPR and
7710 // the argument will also pass in memory.
7711 CCValAssign &PeekArg = ArgLocs[I];
7712 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7713 assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7714 CCValAssign &GPR2 = ArgLocs[I++];
7715 RegsToPass.push_back(std::make_pair(
7716 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7722 if (!MemOpChains.empty())
7723 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7725 // For indirect calls, we need to save the TOC base to the stack for
7726 // restoration after the call.
7727 if (CFlags.IsIndirect) {
7728 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7729 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7730 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7731 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7732 const unsigned TOCSaveOffset =
7733 Subtarget.getFrameLowering()->getTOCSaveOffset();
7735 setUsesTOCBasePtr(DAG);
7736 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7737 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7738 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7739 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7740 Chain = DAG.getStore(
7741 Val.getValue(1), dl, Val, AddPtr,
7742 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7745 // Build a sequence of copy-to-reg nodes chained together with token chain
7746 // and flag operands which copy the outgoing args into the appropriate regs.
7748 for (auto Reg : RegsToPass) {
7749 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7750 InFlag = Chain.getValue(1);
7753 const int SPDiff = 0;
7754 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7755 Callee, SPDiff, NumBytes, Ins, InVals, CB);
7759 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7760 MachineFunction &MF, bool isVarArg,
7761 const SmallVectorImpl<ISD::OutputArg> &Outs,
7762 LLVMContext &Context) const {
7763 SmallVector<CCValAssign, 16> RVLocs;
7764 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7765 return CCInfo.CheckReturn(
7766 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7772 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7774 const SmallVectorImpl<ISD::OutputArg> &Outs,
7775 const SmallVectorImpl<SDValue> &OutVals,
7776 const SDLoc &dl, SelectionDAG &DAG) const {
7777 SmallVector<CCValAssign, 16> RVLocs;
7778 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7780 CCInfo.AnalyzeReturn(Outs,
7781 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7786 SmallVector<SDValue, 4> RetOps(1, Chain);
7788 // Copy the result values into the output registers.
7789 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7790 CCValAssign &VA = RVLocs[i];
7791 assert(VA.isRegLoc() && "Can only return in registers!");
7793 SDValue Arg = OutVals[RealResIdx];
7795 switch (VA.getLocInfo()) {
7796 default: llvm_unreachable("Unknown loc info!");
7797 case CCValAssign::Full: break;
7798 case CCValAssign::AExt:
7799 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7801 case CCValAssign::ZExt:
7802 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7804 case CCValAssign::SExt:
7805 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7808 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7809 bool isLittleEndian = Subtarget.isLittleEndian();
7810 // Legalize ret f64 -> ret 2 x i32.
7812 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7813 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7814 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7815 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7816 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7817 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7818 Flag = Chain.getValue(1);
7819 VA = RVLocs[++i]; // skip ahead to next loc
7820 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7822 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7823 Flag = Chain.getValue(1);
7824 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7827 RetOps[0] = Chain; // Update chain.
7829 // Add the flag if we have it.
7831 RetOps.push_back(Flag);
7833 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7837 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7838 SelectionDAG &DAG) const {
7841 // Get the correct type for integers.
7842 EVT IntVT = Op.getValueType();
7845 SDValue Chain = Op.getOperand(0);
7846 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7847 // Build a DYNAREAOFFSET node.
7848 SDValue Ops[2] = {Chain, FPSIdx};
7849 SDVTList VTs = DAG.getVTList(IntVT);
7850 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7853 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7854 SelectionDAG &DAG) const {
7855 // When we pop the dynamic allocation we need to restore the SP link.
7858 // Get the correct type for pointers.
7859 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7861 // Construct the stack pointer operand.
7862 bool isPPC64 = Subtarget.isPPC64();
7863 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7864 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7866 // Get the operands for the STACKRESTORE.
7867 SDValue Chain = Op.getOperand(0);
7868 SDValue SaveSP = Op.getOperand(1);
7870 // Load the old link SP.
7871 SDValue LoadLinkSP =
7872 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7874 // Restore the stack pointer.
7875 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7877 // Store the old link SP.
7878 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7881 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7882 MachineFunction &MF = DAG.getMachineFunction();
7883 bool isPPC64 = Subtarget.isPPC64();
7884 EVT PtrVT = getPointerTy(MF.getDataLayout());
7886 // Get current frame pointer save index. The users of this index will be
7887 // primarily DYNALLOC instructions.
7888 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7889 int RASI = FI->getReturnAddrSaveIndex();
7891 // If the frame pointer save index hasn't been defined yet.
7893 // Find out what the fix offset of the frame pointer save area.
7894 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7895 // Allocate the frame index for frame pointer save area.
7896 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7898 FI->setReturnAddrSaveIndex(RASI);
7900 return DAG.getFrameIndex(RASI, PtrVT);
7904 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7905 MachineFunction &MF = DAG.getMachineFunction();
7906 bool isPPC64 = Subtarget.isPPC64();
7907 EVT PtrVT = getPointerTy(MF.getDataLayout());
7909 // Get current frame pointer save index. The users of this index will be
7910 // primarily DYNALLOC instructions.
7911 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7912 int FPSI = FI->getFramePointerSaveIndex();
7914 // If the frame pointer save index hasn't been defined yet.
7916 // Find out what the fix offset of the frame pointer save area.
7917 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7918 // Allocate the frame index for frame pointer save area.
7919 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7921 FI->setFramePointerSaveIndex(FPSI);
7923 return DAG.getFrameIndex(FPSI, PtrVT);
7926 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7927 SelectionDAG &DAG) const {
7928 MachineFunction &MF = DAG.getMachineFunction();
7930 SDValue Chain = Op.getOperand(0);
7931 SDValue Size = Op.getOperand(1);
7934 // Get the correct type for pointers.
7935 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7937 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7938 DAG.getConstant(0, dl, PtrVT), Size);
7939 // Construct a node for the frame pointer save index.
7940 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7941 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7942 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7943 if (hasInlineStackProbe(MF))
7944 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7945 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7948 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7949 SelectionDAG &DAG) const {
7950 MachineFunction &MF = DAG.getMachineFunction();
7952 bool isPPC64 = Subtarget.isPPC64();
7953 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7955 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7956 return DAG.getFrameIndex(FI, PtrVT);
7959 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7960 SelectionDAG &DAG) const {
7962 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7963 DAG.getVTList(MVT::i32, MVT::Other),
7964 Op.getOperand(0), Op.getOperand(1));
7967 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7968 SelectionDAG &DAG) const {
7970 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7971 Op.getOperand(0), Op.getOperand(1));
7974 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7975 if (Op.getValueType().isVector())
7976 return LowerVectorLoad(Op, DAG);
7978 assert(Op.getValueType() == MVT::i1 &&
7979 "Custom lowering only for i1 loads");
7981 // First, load 8 bits into 32 bits, then truncate to 1 bit.
7984 LoadSDNode *LD = cast<LoadSDNode>(Op);
7986 SDValue Chain = LD->getChain();
7987 SDValue BasePtr = LD->getBasePtr();
7988 MachineMemOperand *MMO = LD->getMemOperand();
7991 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7992 BasePtr, MVT::i8, MMO);
7993 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7995 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7996 return DAG.getMergeValues(Ops, dl);
7999 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
8000 if (Op.getOperand(1).getValueType().isVector())
8001 return LowerVectorStore(Op, DAG);
8003 assert(Op.getOperand(1).getValueType() == MVT::i1 &&
8004 "Custom lowering only for i1 stores");
8006 // First, zero extend to 32 bits, then use a truncating store to 8 bits.
8009 StoreSDNode *ST = cast<StoreSDNode>(Op);
8011 SDValue Chain = ST->getChain();
8012 SDValue BasePtr = ST->getBasePtr();
8013 SDValue Value = ST->getValue();
8014 MachineMemOperand *MMO = ST->getMemOperand();
8016 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
8018 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
8021 // FIXME: Remove this once the ANDI glue bug is fixed:
8022 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
8023 assert(Op.getValueType() == MVT::i1 &&
8024 "Custom lowering only for i1 results");
8027 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
8030 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
8031 SelectionDAG &DAG) const {
8033 // Implements a vector truncate that fits in a vector register as a shuffle.
8034 // We want to legalize vector truncates down to where the source fits in
8035 // a vector register (and target is therefore smaller than vector register
8036 // size). At that point legalization will try to custom lower the sub-legal
8037 // result and get here - where we can contain the truncate as a single target
8040 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
8041 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
8043 // We will implement it for big-endian ordering as this (where x denotes
8045 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
8046 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
8048 // The same operation in little-endian ordering will be:
8049 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
8050 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
8052 assert(Op.getValueType().isVector() && "Vector type expected.");
8055 SDValue N1 = Op.getOperand(0);
8056 unsigned SrcSize = N1.getValueType().getSizeInBits();
8057 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
8058 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
8060 EVT TrgVT = Op.getValueType();
8061 unsigned TrgNumElts = TrgVT.getVectorNumElements();
8062 EVT EltVT = TrgVT.getVectorElementType();
8063 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8064 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8066 // First list the elements we want to keep.
8067 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
8068 SmallVector<int, 16> ShuffV;
8069 if (Subtarget.isLittleEndian())
8070 for (unsigned i = 0; i < TrgNumElts; ++i)
8071 ShuffV.push_back(i * SizeMult);
8073 for (unsigned i = 1; i <= TrgNumElts; ++i)
8074 ShuffV.push_back(i * SizeMult - 1);
8076 // Populate the remaining elements with undefs.
8077 for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
8078 // ShuffV.push_back(i + WideNumElts);
8079 ShuffV.push_back(WideNumElts + 1);
8081 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
8082 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
8085 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
8087 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
8088 // Not FP? Not a fsel.
8089 if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
8090 !Op.getOperand(2).getValueType().isFloatingPoint())
8093 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8095 EVT ResVT = Op.getValueType();
8096 EVT CmpVT = Op.getOperand(0).getValueType();
8097 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8098 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
8100 SDNodeFlags Flags = Op.getNode()->getFlags();
8102 // We have xsmaxcdp/xsmincdp which are OK to emit even in the
8103 // presence of infinities.
8104 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8110 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
8113 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
8117 // We might be able to do better than this under some circumstances, but in
8118 // general, fsel-based lowering of select is a finite-math-only optimization.
8119 // For more information, see section F.3 of the 2.06 ISA specification.
8121 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
8122 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
8125 // If the RHS of the comparison is a 0.0, we don't need to do the
8126 // subtraction at all.
8128 if (isFloatingPointZero(RHS))
8130 default: break; // SETUO etc aren't handled by fsel.
8135 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
8136 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8137 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8138 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
8139 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8140 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8141 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
8144 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
8148 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
8149 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8150 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8153 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
8157 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
8158 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8159 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8160 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8165 default: break; // SETUO etc aren't handled by fsel.
8170 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8171 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8172 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8173 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8174 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
8175 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8176 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8177 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8180 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8181 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8182 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8183 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8186 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8187 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8188 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8189 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8192 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8193 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8194 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8195 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8198 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8199 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
8200 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8201 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8206 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8208 const SDLoc &dl) const {
8209 assert(Op.getOperand(0).getValueType().isFloatingPoint());
8210 SDValue Src = Op.getOperand(0);
8211 if (Src.getValueType() == MVT::f32)
8212 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8215 switch (Op.getSimpleValueType().SimpleTy) {
8216 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8219 Op.getOpcode() == ISD::FP_TO_SINT
8221 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8225 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8226 "i64 FP_TO_UINT is supported only with FPCVT");
8227 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8233 // Convert the FP value to an int value through memory.
8234 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8235 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
8236 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8237 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8238 MachinePointerInfo MPI =
8239 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8241 // Emit a store to the stack slot.
8243 Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8245 MachineFunction &MF = DAG.getMachineFunction();
8246 Alignment = Align(4);
8247 MachineMemOperand *MMO =
8248 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8249 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
8250 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8251 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8253 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
8255 // Result is a load from the stack slot. If loading 4 bytes, make sure to
8256 // add in a bias on big endian.
8257 if (Op.getValueType() == MVT::i32 && !i32Stack) {
8258 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8259 DAG.getConstant(4, dl, FIPtr.getValueType()));
8260 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8266 RLI.Alignment = Alignment;
8269 /// Custom lowers floating point to integer conversions to use
8270 /// the direct move instructions available in ISA 2.07 to avoid the
8271 /// need for load/store combinations.
8272 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8274 const SDLoc &dl) const {
8275 assert(Op.getOperand(0).getValueType().isFloatingPoint());
8276 SDValue Src = Op.getOperand(0);
8278 if (Src.getValueType() == MVT::f32)
8279 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8282 switch (Op.getSimpleValueType().SimpleTy) {
8283 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8286 Op.getOpcode() == ISD::FP_TO_SINT
8288 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8290 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
8293 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8294 "i64 FP_TO_UINT is supported only with FPCVT");
8295 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8298 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
8304 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8305 const SDLoc &dl) const {
8307 // FP to INT conversions are legal for f128.
8308 if (Op->getOperand(0).getValueType() == MVT::f128)
8311 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8312 // PPC (the libcall is not available).
8313 if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
8314 if (Op.getValueType() == MVT::i32) {
8315 if (Op.getOpcode() == ISD::FP_TO_SINT) {
8316 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8317 MVT::f64, Op.getOperand(0),
8318 DAG.getIntPtrConstant(0, dl));
8319 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8320 MVT::f64, Op.getOperand(0),
8321 DAG.getIntPtrConstant(1, dl));
8323 // Add the two halves of the long double in round-to-zero mode.
8324 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8326 // Now use a smaller FP_TO_SINT.
8327 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8329 if (Op.getOpcode() == ISD::FP_TO_UINT) {
8330 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8331 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8332 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8333 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8334 // FIXME: generated code sucks.
8335 // TODO: Are there fast-math-flags to propagate to this FSUB?
8336 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
8337 Op.getOperand(0), Tmp);
8338 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8339 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8340 DAG.getConstant(0x80000000, dl, MVT::i32));
8341 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
8343 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
8351 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8352 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8355 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8357 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8358 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8361 // We're trying to insert a regular store, S, and then a load, L. If the
8362 // incoming value, O, is a load, we might just be able to have our load use the
8363 // address used by O. However, we don't know if anything else will store to
8364 // that address before we can load from it. To prevent this situation, we need
8365 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8366 // the same chain operand as O, we create a token factor from the chain results
8367 // of O and L, and we replace all uses of O's chain result with that token
8368 // factor (see spliceIntoChain below for this last part).
8369 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8372 ISD::LoadExtType ET) const {
8374 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8375 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8376 if (ET == ISD::NON_EXTLOAD &&
8377 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8378 isOperationLegalOrCustom(Op.getOpcode(),
8379 Op.getOperand(0).getValueType())) {
8381 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8385 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8386 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8387 LD->isNonTemporal())
8389 if (LD->getMemoryVT() != MemVT)
8392 RLI.Ptr = LD->getBasePtr();
8393 if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8394 assert(LD->getAddressingMode() == ISD::PRE_INC &&
8395 "Non-pre-inc AM on PPC?");
8396 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8400 RLI.Chain = LD->getChain();
8401 RLI.MPI = LD->getPointerInfo();
8402 RLI.IsDereferenceable = LD->isDereferenceable();
8403 RLI.IsInvariant = LD->isInvariant();
8404 RLI.Alignment = LD->getAlign();
8405 RLI.AAInfo = LD->getAAInfo();
8406 RLI.Ranges = LD->getRanges();
8408 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8412 // Given the head of the old chain, ResChain, insert a token factor containing
8413 // it and NewResChain, and make users of ResChain now be users of that token
8415 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8416 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8417 SDValue NewResChain,
8418 SelectionDAG &DAG) const {
8422 SDLoc dl(NewResChain);
8424 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8425 NewResChain, DAG.getUNDEF(MVT::Other));
8426 assert(TF.getNode() != NewResChain.getNode() &&
8427 "A new TF really is required here");
8429 DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8430 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8433 /// Analyze profitability of direct move
8434 /// prefer float load to int load plus direct move
8435 /// when there is no integer use of int load
8436 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8437 SDNode *Origin = Op.getOperand(0).getNode();
8438 if (Origin->getOpcode() != ISD::LOAD)
8441 // If there is no LXSIBZX/LXSIHZX, like Power8,
8442 // prefer direct move if the memory size is 1 or 2 bytes.
8443 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8444 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8447 for (SDNode::use_iterator UI = Origin->use_begin(),
8448 UE = Origin->use_end();
8451 // Only look at the users of the loaded value.
8452 if (UI.getUse().get().getResNo() != 0)
8455 if (UI->getOpcode() != ISD::SINT_TO_FP &&
8456 UI->getOpcode() != ISD::UINT_TO_FP)
8463 /// Custom lowers integer to floating point conversions to use
8464 /// the direct move instructions available in ISA 2.07 to avoid the
8465 /// need for load/store combinations.
8466 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8468 const SDLoc &dl) const {
8469 assert((Op.getValueType() == MVT::f32 ||
8470 Op.getValueType() == MVT::f64) &&
8471 "Invalid floating point type as target of conversion");
8472 assert(Subtarget.hasFPCVT() &&
8473 "Int to FP conversions with direct moves require FPCVT");
8475 SDValue Src = Op.getOperand(0);
8476 bool SinglePrec = Op.getValueType() == MVT::f32;
8477 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8478 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
8479 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
8480 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
8483 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
8485 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8488 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
8489 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8495 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8497 EVT VecVT = Vec.getValueType();
8498 assert(VecVT.isVector() && "Expected a vector type.");
8499 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8501 EVT EltVT = VecVT.getVectorElementType();
8502 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8503 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8505 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8506 SmallVector<SDValue, 16> Ops(NumConcat);
8508 SDValue UndefVec = DAG.getUNDEF(VecVT);
8509 for (unsigned i = 1; i < NumConcat; ++i)
8512 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8515 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8516 const SDLoc &dl) const {
8518 unsigned Opc = Op.getOpcode();
8519 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8520 "Unexpected conversion type");
8521 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8522 "Supports conversions to v2f64/v4f32 only.");
8524 bool SignedConv = Opc == ISD::SINT_TO_FP;
8525 bool FourEltRes = Op.getValueType() == MVT::v4f32;
8527 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8528 EVT WideVT = Wide.getValueType();
8529 unsigned WideNumElts = WideVT.getVectorNumElements();
8530 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8532 SmallVector<int, 16> ShuffV;
8533 for (unsigned i = 0; i < WideNumElts; ++i)
8534 ShuffV.push_back(i + WideNumElts);
8536 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8537 int SaveElts = FourEltRes ? 4 : 2;
8538 if (Subtarget.isLittleEndian())
8539 for (int i = 0; i < SaveElts; i++)
8540 ShuffV[i * Stride] = i;
8542 for (int i = 1; i <= SaveElts; i++)
8543 ShuffV[i * Stride - 1] = i - 1;
8545 SDValue ShuffleSrc2 =
8546 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8547 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8551 Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8552 EVT ExtVT = Op.getOperand(0).getValueType();
8553 if (Subtarget.hasP9Altivec())
8554 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8555 IntermediateVT.getVectorNumElements());
8557 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8558 DAG.getValueType(ExtVT));
8560 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8562 return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8565 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8566 SelectionDAG &DAG) const {
8569 EVT InVT = Op.getOperand(0).getValueType();
8570 EVT OutVT = Op.getValueType();
8571 if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8572 isOperationCustom(Op.getOpcode(), InVT))
8573 return LowerINT_TO_FPVector(Op, DAG, dl);
8575 // Conversions to f128 are legal.
8576 if (Op.getValueType() == MVT::f128)
8579 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8580 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8583 SDValue Value = Op.getOperand(0);
8584 // The values are now known to be -1 (false) or 1 (true). To convert this
8585 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8586 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8587 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8589 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8591 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8593 if (Op.getValueType() != MVT::v4f64)
8594 Value = DAG.getNode(ISD::FP_ROUND, dl,
8595 Op.getValueType(), Value,
8596 DAG.getIntPtrConstant(1, dl));
8600 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8601 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8604 if (Op.getOperand(0).getValueType() == MVT::i1)
8605 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8606 DAG.getConstantFP(1.0, dl, Op.getValueType()),
8607 DAG.getConstantFP(0.0, dl, Op.getValueType()));
8609 // If we have direct moves, we can do all the conversion, skip the store/load
8610 // however, without FPCVT we can't do most conversions.
8611 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8612 Subtarget.isPPC64() && Subtarget.hasFPCVT())
8613 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8615 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8616 "UINT_TO_FP is supported only with FPCVT");
8618 // If we have FCFIDS, then use it when converting to single-precision.
8619 // Otherwise, convert to double-precision and then round.
8620 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8621 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8623 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8625 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8629 if (Op.getOperand(0).getValueType() == MVT::i64) {
8630 SDValue SINT = Op.getOperand(0);
8631 // When converting to single-precision, we actually need to convert
8632 // to double-precision first and then round to single-precision.
8633 // To avoid double-rounding effects during that operation, we have
8634 // to prepare the input operand. Bits that might be truncated when
8635 // converting to double-precision are replaced by a bit that won't
8636 // be lost at this stage, but is below the single-precision rounding
8639 // However, if -enable-unsafe-fp-math is in effect, accept double
8640 // rounding to avoid the extra overhead.
8641 if (Op.getValueType() == MVT::f32 &&
8642 !Subtarget.hasFPCVT() &&
8643 !DAG.getTarget().Options.UnsafeFPMath) {
8645 // Twiddle input to make sure the low 11 bits are zero. (If this
8646 // is the case, we are guaranteed the value will fit into the 53 bit
8647 // mantissa of an IEEE double-precision value without rounding.)
8648 // If any of those low 11 bits were not zero originally, make sure
8649 // bit 12 (value 2048) is set instead, so that the final rounding
8650 // to single-precision gets the correct result.
8651 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8652 SINT, DAG.getConstant(2047, dl, MVT::i64));
8653 Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8654 Round, DAG.getConstant(2047, dl, MVT::i64));
8655 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8656 Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8657 Round, DAG.getConstant(-2048, dl, MVT::i64));
8659 // However, we cannot use that value unconditionally: if the magnitude
8660 // of the input value is small, the bit-twiddling we did above might
8661 // end up visibly changing the output. Fortunately, in that case, we
8662 // don't need to twiddle bits since the original input will convert
8663 // exactly to double-precision floating-point already. Therefore,
8664 // construct a conditional to use the original value if the top 11
8665 // bits are all sign-bit copies, and use the rounded value computed
8667 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8668 SINT, DAG.getConstant(53, dl, MVT::i32));
8669 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8670 Cond, DAG.getConstant(1, dl, MVT::i64));
8671 Cond = DAG.getSetCC(
8673 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8674 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8676 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8682 MachineFunction &MF = DAG.getMachineFunction();
8683 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8684 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8685 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8686 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8687 } else if (Subtarget.hasLFIWAX() &&
8688 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8689 MachineMemOperand *MMO =
8690 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8691 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8692 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8693 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8694 DAG.getVTList(MVT::f64, MVT::Other),
8695 Ops, MVT::i32, MMO);
8696 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8697 } else if (Subtarget.hasFPCVT() &&
8698 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8699 MachineMemOperand *MMO =
8700 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8701 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8702 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8703 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8704 DAG.getVTList(MVT::f64, MVT::Other),
8705 Ops, MVT::i32, MMO);
8706 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8707 } else if (((Subtarget.hasLFIWAX() &&
8708 SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8709 (Subtarget.hasFPCVT() &&
8710 SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8711 SINT.getOperand(0).getValueType() == MVT::i32) {
8712 MachineFrameInfo &MFI = MF.getFrameInfo();
8713 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8715 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8716 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8719 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8720 MachinePointerInfo::getFixedStack(
8721 DAG.getMachineFunction(), FrameIdx));
8723 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8724 "Expected an i32 store");
8729 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8730 RLI.Alignment = Align(4);
8732 MachineMemOperand *MMO =
8733 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8734 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8735 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8736 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8737 PPCISD::LFIWZX : PPCISD::LFIWAX,
8738 dl, DAG.getVTList(MVT::f64, MVT::Other),
8739 Ops, MVT::i32, MMO);
8741 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8743 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8745 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8746 FP = DAG.getNode(ISD::FP_ROUND, dl,
8747 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8751 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8752 "Unhandled INT_TO_FP type in custom expander!");
8753 // Since we only generate this in 64-bit mode, we can take advantage of
8754 // 64-bit registers. In particular, sign extend the input value into the
8755 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8756 // then lfd it and fcfid it.
8757 MachineFunction &MF = DAG.getMachineFunction();
8758 MachineFrameInfo &MFI = MF.getFrameInfo();
8759 EVT PtrVT = getPointerTy(MF.getDataLayout());
8762 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8765 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8767 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8768 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8771 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8772 MachinePointerInfo::getFixedStack(
8773 DAG.getMachineFunction(), FrameIdx));
8775 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8776 "Expected an i32 store");
8781 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8782 RLI.Alignment = Align(4);
8785 MachineMemOperand *MMO =
8786 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8787 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8788 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8789 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8790 PPCISD::LFIWZX : PPCISD::LFIWAX,
8791 dl, DAG.getVTList(MVT::f64, MVT::Other),
8792 Ops, MVT::i32, MMO);
8794 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8796 assert(Subtarget.isPPC64() &&
8797 "i32->FP without LFIWAX supported only on PPC64");
8799 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8800 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8802 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8805 // STD the extended value into the stack slot.
8806 SDValue Store = DAG.getStore(
8807 DAG.getEntryNode(), dl, Ext64, FIdx,
8808 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8810 // Load the value as a double.
8812 MVT::f64, dl, Store, FIdx,
8813 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8816 // FCFID it and return it.
8817 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8818 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8819 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8820 DAG.getIntPtrConstant(0, dl));
8824 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8825 SelectionDAG &DAG) const {
8828 The rounding mode is in bits 30:31 of FPSR, and has the following
8835 FLT_ROUNDS, on the other hand, expects the following:
8842 To perform the conversion, we do:
8843 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8846 MachineFunction &MF = DAG.getMachineFunction();
8847 EVT VT = Op.getValueType();
8848 EVT PtrVT = getPointerTy(MF.getDataLayout());
8850 // Save FP Control Word to register
8851 SDValue Chain = Op.getOperand(0);
8852 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8853 Chain = MFFS.getValue(1);
8855 // Save FP register to stack slot
8856 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8857 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8858 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8860 // Load FP Control Word from low 32 bits of stack slot.
8861 SDValue Four = DAG.getConstant(4, dl, PtrVT);
8862 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8863 SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8864 Chain = CWD.getValue(1);
8866 // Transform as necessary
8868 DAG.getNode(ISD::AND, dl, MVT::i32,
8869 CWD, DAG.getConstant(3, dl, MVT::i32));
8871 DAG.getNode(ISD::SRL, dl, MVT::i32,
8872 DAG.getNode(ISD::AND, dl, MVT::i32,
8873 DAG.getNode(ISD::XOR, dl, MVT::i32,
8874 CWD, DAG.getConstant(3, dl, MVT::i32)),
8875 DAG.getConstant(3, dl, MVT::i32)),
8876 DAG.getConstant(1, dl, MVT::i32));
8879 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8882 DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8885 return DAG.getMergeValues({RetVal, Chain}, dl);
8888 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8889 EVT VT = Op.getValueType();
8890 unsigned BitWidth = VT.getSizeInBits();
8892 assert(Op.getNumOperands() == 3 &&
8893 VT == Op.getOperand(1).getValueType() &&
8896 // Expand into a bunch of logical ops. Note that these ops
8897 // depend on the PPC behavior for oversized shift amounts.
8898 SDValue Lo = Op.getOperand(0);
8899 SDValue Hi = Op.getOperand(1);
8900 SDValue Amt = Op.getOperand(2);
8901 EVT AmtVT = Amt.getValueType();
8903 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8904 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8905 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8906 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8907 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8908 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8909 DAG.getConstant(-BitWidth, dl, AmtVT));
8910 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8911 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8912 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8913 SDValue OutOps[] = { OutLo, OutHi };
8914 return DAG.getMergeValues(OutOps, dl);
8917 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8918 EVT VT = Op.getValueType();
8920 unsigned BitWidth = VT.getSizeInBits();
8921 assert(Op.getNumOperands() == 3 &&
8922 VT == Op.getOperand(1).getValueType() &&
8925 // Expand into a bunch of logical ops. Note that these ops
8926 // depend on the PPC behavior for oversized shift amounts.
8927 SDValue Lo = Op.getOperand(0);
8928 SDValue Hi = Op.getOperand(1);
8929 SDValue Amt = Op.getOperand(2);
8930 EVT AmtVT = Amt.getValueType();
8932 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8933 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8934 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8935 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8936 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8937 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8938 DAG.getConstant(-BitWidth, dl, AmtVT));
8939 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8940 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8941 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8942 SDValue OutOps[] = { OutLo, OutHi };
8943 return DAG.getMergeValues(OutOps, dl);
8946 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8948 EVT VT = Op.getValueType();
8949 unsigned BitWidth = VT.getSizeInBits();
8950 assert(Op.getNumOperands() == 3 &&
8951 VT == Op.getOperand(1).getValueType() &&
8954 // Expand into a bunch of logical ops, followed by a select_cc.
8955 SDValue Lo = Op.getOperand(0);
8956 SDValue Hi = Op.getOperand(1);
8957 SDValue Amt = Op.getOperand(2);
8958 EVT AmtVT = Amt.getValueType();
8960 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8961 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8962 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8963 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8964 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8965 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8966 DAG.getConstant(-BitWidth, dl, AmtVT));
8967 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8968 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8969 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8970 Tmp4, Tmp6, ISD::SETLE);
8971 SDValue OutOps[] = { OutLo, OutHi };
8972 return DAG.getMergeValues(OutOps, dl);
8975 //===----------------------------------------------------------------------===//
8976 // Vector related lowering.
8979 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8980 /// element size of SplatSize. Cast the result to VT.
8981 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8982 SelectionDAG &DAG, const SDLoc &dl) {
8983 static const MVT VTys[] = { // canonical VT to use for each size.
8984 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8987 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8989 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8990 if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8995 EVT CanonicalVT = VTys[SplatSize-1];
8997 // Build a canonical splat for this value.
8998 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
9001 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
9002 /// specified intrinsic ID.
9003 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
9004 const SDLoc &dl, EVT DestVT = MVT::Other) {
9005 if (DestVT == MVT::Other) DestVT = Op.getValueType();
9006 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9007 DAG.getConstant(IID, dl, MVT::i32), Op);
9010 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
9011 /// specified intrinsic ID.
9012 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
9013 SelectionDAG &DAG, const SDLoc &dl,
9014 EVT DestVT = MVT::Other) {
9015 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
9016 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9017 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
9020 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
9021 /// specified intrinsic ID.
9022 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
9023 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
9024 EVT DestVT = MVT::Other) {
9025 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
9026 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9027 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9030 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
9031 /// amount. The result has the specified value type.
9032 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
9033 SelectionDAG &DAG, const SDLoc &dl) {
9034 // Force LHS/RHS to be the right type.
9035 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
9036 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
9039 for (unsigned i = 0; i != 16; ++i)
9041 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9042 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9045 /// Do we have an efficient pattern in a .td file for this node?
9047 /// \param V - pointer to the BuildVectorSDNode being matched
9048 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9050 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9051 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9052 /// the opposite is true (expansion is beneficial) are:
9053 /// - The node builds a vector out of integers that are not 32 or 64-bits
9054 /// - The node builds a vector out of constants
9055 /// - The node is a "load-and-splat"
9056 /// In all other cases, we will choose to keep the BUILD_VECTOR.
9057 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
9060 EVT VecVT = V->getValueType(0);
9061 bool RightType = VecVT == MVT::v2f64 ||
9062 (HasP8Vector && VecVT == MVT::v4f32) ||
9063 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9067 bool IsSplat = true;
9068 bool IsLoad = false;
9069 SDValue Op0 = V->getOperand(0);
9071 // This function is called in a block that confirms the node is not a constant
9072 // splat. So a constant BUILD_VECTOR here means the vector is built out of
9073 // different constants.
9074 if (V->isConstant())
9076 for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9077 if (V->getOperand(i).isUndef())
9079 // We want to expand nodes that represent load-and-splat even if the
9080 // loaded value is a floating point truncation or conversion to int.
9081 if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9082 (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9083 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9084 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9085 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9086 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9087 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9089 // If the operands are different or the input is not a load and has more
9090 // uses than just this BV node, then it isn't a splat.
9091 if (V->getOperand(i) != Op0 ||
9092 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9095 return !(IsSplat && IsLoad);
9098 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
9099 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9102 SDValue Op0 = Op->getOperand(0);
9104 if ((Op.getValueType() != MVT::f128) ||
9105 (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9106 (Op0.getOperand(0).getValueType() != MVT::i64) ||
9107 (Op0.getOperand(1).getValueType() != MVT::i64))
9110 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9114 static const SDValue *getNormalLoadInput(const SDValue &Op) {
9115 const SDValue *InputLoad = &Op;
9116 if (InputLoad->getOpcode() == ISD::BITCAST)
9117 InputLoad = &InputLoad->getOperand(0);
9118 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9119 InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED)
9120 InputLoad = &InputLoad->getOperand(0);
9121 if (InputLoad->getOpcode() != ISD::LOAD)
9123 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9124 return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9127 // Convert the argument APFloat to a single precision APFloat if there is no
9128 // loss in information during the conversion to single precision APFloat and the
9129 // resulting number is not a denormal number. Return true if successful.
9130 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9131 APFloat APFloatToConvert = ArgAPFloat;
9132 bool LosesInfo = true;
9133 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9135 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9137 ArgAPFloat = APFloatToConvert;
9141 // Bitcast the argument APInt to a double and convert it to a single precision
9142 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9143 // argument if there is no loss in information during the conversion from
9144 // double to single precision APFloat and the resulting number is not a denormal
9145 // number. Return true if successful.
9146 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9147 double DpValue = ArgAPInt.bitsToDouble();
9148 APFloat APFloatDp(DpValue);
9149 bool Success = convertToNonDenormSingle(APFloatDp);
9151 ArgAPInt = APFloatDp.bitcastToAPInt();
9155 // If this is a case we can't handle, return null and let the default
9156 // expansion code take care of it. If we CAN select this case, and if it
9157 // selects to a single instruction, return Op. Otherwise, if we can codegen
9158 // this case more efficiently than a constant pool load, lower it to the
9159 // sequence of ops that should be used.
9160 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9161 SelectionDAG &DAG) const {
9163 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9164 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9166 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
9167 // We first build an i32 vector, load it into a QPX register,
9168 // then convert it to a floating-point vector and compare it
9169 // to a zero vector to get the boolean result.
9170 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9171 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
9172 MachinePointerInfo PtrInfo =
9173 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9174 EVT PtrVT = getPointerTy(DAG.getDataLayout());
9175 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9177 assert(BVN->getNumOperands() == 4 &&
9178 "BUILD_VECTOR for v4i1 does not have 4 operands");
9180 bool IsConst = true;
9181 for (unsigned i = 0; i < 4; ++i) {
9182 if (BVN->getOperand(i).isUndef()) continue;
9183 if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
9191 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
9193 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
9196 for (unsigned i = 0; i < 4; ++i) {
9197 if (BVN->getOperand(i).isUndef())
9198 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
9199 else if (isNullConstant(BVN->getOperand(i)))
9205 Constant *CP = ConstantVector::get(CV);
9207 DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), Align(16));
9209 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
9210 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
9211 return DAG.getMemIntrinsicNode(
9212 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
9213 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
9216 SmallVector<SDValue, 4> Stores;
9217 for (unsigned i = 0; i < 4; ++i) {
9218 if (BVN->getOperand(i).isUndef()) continue;
9220 unsigned Offset = 4*i;
9221 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9222 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9224 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
9225 if (StoreSize > 4) {
9227 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
9228 PtrInfo.getWithOffset(Offset), MVT::i32));
9230 SDValue StoreValue = BVN->getOperand(i);
9232 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
9234 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
9235 PtrInfo.getWithOffset(Offset)));
9240 if (!Stores.empty())
9241 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9243 StoreChain = DAG.getEntryNode();
9245 // Now load from v4i32 into the QPX register; this will extend it to
9246 // v4i64 but not yet convert it to a floating point. Nevertheless, this
9247 // is typed as v4f64 because the QPX register integer states are not
9248 // explicitly represented.
9250 SDValue Ops[] = {StoreChain,
9251 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
9253 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
9255 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
9256 dl, VTs, Ops, MVT::v4i32, PtrInfo);
9257 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9258 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
9261 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
9263 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
9266 // All other QPX vectors are handled by generic code.
9267 if (Subtarget.hasQPX())
9270 // Check if this is a splat of a constant value.
9271 APInt APSplatBits, APSplatUndef;
9272 unsigned SplatBitSize;
9274 bool BVNIsConstantSplat =
9275 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9276 HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9278 // If it is a splat of a double, check if we can shrink it to a 32 bit
9279 // non-denormal float which when converted back to double gives us the same
9280 // double. This is to exploit the XXSPLTIDP instruction.
9281 if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9282 (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9283 convertToNonDenormSingle(APSplatBits)) {
9284 SDValue SplatNode = DAG.getNode(
9285 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9286 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9287 return DAG.getBitcast(Op.getValueType(), SplatNode);
9290 if (!BVNIsConstantSplat || SplatBitSize > 32) {
9292 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
9293 // Handle load-and-splat patterns as we have instructions that will do this
9295 if (InputLoad && DAG.isSplatValue(Op, true)) {
9296 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9298 // We have handling for 4 and 8 byte elements.
9299 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9301 // Checking for a single use of this load, we have to check for vector
9302 // width (128 bits) / ElementSize uses (since each operand of the
9303 // BUILD_VECTOR is a separate use of the value.
9304 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
9305 ((Subtarget.hasVSX() && ElementSize == 64) ||
9306 (Subtarget.hasP9Vector() && ElementSize == 32))) {
9308 LD->getChain(), // Chain
9309 LD->getBasePtr(), // Ptr
9310 DAG.getValueType(Op.getValueType()) // VT
9313 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
9314 DAG.getVTList(Op.getValueType(), MVT::Other),
9315 Ops, LD->getMemoryVT(), LD->getMemOperand());
9319 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9320 // lowered to VSX instructions under certain conditions.
9321 // Without VSX, there is no pattern more efficient than expanding the node.
9322 if (Subtarget.hasVSX() &&
9323 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9324 Subtarget.hasP8Vector()))
9329 uint64_t SplatBits = APSplatBits.getZExtValue();
9330 uint64_t SplatUndef = APSplatUndef.getZExtValue();
9331 unsigned SplatSize = SplatBitSize / 8;
9333 // First, handle single instruction cases.
9336 if (SplatBits == 0) {
9337 // Canonicalize all zero vectors to be v4i32.
9338 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9339 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9340 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9345 // We have XXSPLTIW for constant splats four bytes wide.
9346 // Given vector length is a multiple of 4, 2-byte splats can be replaced
9347 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9348 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9349 // turned into a 4-byte splat of 0xABABABAB.
9350 if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9351 return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
9352 Op.getValueType(), DAG, dl);
9354 if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9355 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9358 // We have XXSPLTIB for constant splats one byte wide.
9359 if (Subtarget.hasP9Vector() && SplatSize == 1)
9360 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9363 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9364 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9366 if (SextVal >= -16 && SextVal <= 15)
9367 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9370 // Two instruction sequences.
9372 // If this value is in the range [-32,30] and is even, use:
9373 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9374 // If this value is in the range [17,31] and is odd, use:
9375 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9376 // If this value is in the range [-31,-17] and is odd, use:
9377 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9378 // Note the last two are three-instruction sequences.
9379 if (SextVal >= -32 && SextVal <= 31) {
9380 // To avoid having these optimizations undone by constant folding,
9381 // we convert to a pseudo that will be expanded later into one of
9383 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9384 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9385 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9386 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9387 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9388 if (VT == Op.getValueType())
9391 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9394 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
9395 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
9397 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9398 // Make -1 and vspltisw -1:
9399 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9401 // Make the VSLW intrinsic, computing 0x8000_0000.
9402 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9405 // xor by OnesV to invert it.
9406 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9407 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9410 // Check to see if this is a wide variety of vsplti*, binop self cases.
9411 static const signed char SplatCsts[] = {
9412 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9413 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9416 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9417 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9418 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
9419 int i = SplatCsts[idx];
9421 // Figure out what shift amount will be used by altivec if shifted by i in
9423 unsigned TypeShiftAmt = i & (SplatBitSize-1);
9425 // vsplti + shl self.
9426 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9427 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9428 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9429 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9430 Intrinsic::ppc_altivec_vslw
9432 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9433 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9436 // vsplti + srl self.
9437 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9438 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9439 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9440 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9441 Intrinsic::ppc_altivec_vsrw
9443 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9444 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9447 // vsplti + sra self.
9448 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9449 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9450 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9451 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9452 Intrinsic::ppc_altivec_vsraw
9454 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9455 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9458 // vsplti + rol self.
9459 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9460 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9461 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9462 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9463 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9464 Intrinsic::ppc_altivec_vrlw
9466 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9467 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9470 // t = vsplti c, result = vsldoi t, t, 1
9471 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9472 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9473 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9474 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9476 // t = vsplti c, result = vsldoi t, t, 2
9477 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9478 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9479 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9480 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9482 // t = vsplti c, result = vsldoi t, t, 3
9483 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9484 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9485 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9486 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9493 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9494 /// the specified operations to build the shuffle.
9495 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9496 SDValue RHS, SelectionDAG &DAG,
9498 unsigned OpNum = (PFEntry >> 26) & 0x0F;
9499 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9500 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
9503 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9515 if (OpNum == OP_COPY) {
9516 if (LHSID == (1*9+2)*9+3) return LHS;
9517 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9521 SDValue OpLHS, OpRHS;
9522 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9523 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9527 default: llvm_unreachable("Unknown i32 permute!");
9529 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
9530 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9531 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
9532 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9535 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9536 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9537 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9538 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9541 for (unsigned i = 0; i != 16; ++i)
9542 ShufIdxs[i] = (i&3)+0;
9545 for (unsigned i = 0; i != 16; ++i)
9546 ShufIdxs[i] = (i&3)+4;
9549 for (unsigned i = 0; i != 16; ++i)
9550 ShufIdxs[i] = (i&3)+8;
9553 for (unsigned i = 0; i != 16; ++i)
9554 ShufIdxs[i] = (i&3)+12;
9557 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9559 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9561 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9563 EVT VT = OpLHS.getValueType();
9564 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9565 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9566 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9567 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9570 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9571 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9573 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9574 SelectionDAG &DAG) const {
9575 const unsigned BytesInVector = 16;
9576 bool IsLE = Subtarget.isLittleEndian();
9578 SDValue V1 = N->getOperand(0);
9579 SDValue V2 = N->getOperand(1);
9580 unsigned ShiftElts = 0, InsertAtByte = 0;
9583 // Shifts required to get the byte we want at element 7.
9584 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
9585 0, 15, 14, 13, 12, 11, 10, 9};
9586 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9587 1, 2, 3, 4, 5, 6, 7, 8};
9589 ArrayRef<int> Mask = N->getMask();
9590 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9592 // For each mask element, find out if we're just inserting something
9593 // from V2 into V1 or vice versa.
9594 // Possible permutations inserting an element from V2 into V1:
9595 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9596 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9598 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9599 // Inserting from V1 into V2 will be similar, except mask range will be
9602 bool FoundCandidate = false;
9603 // If both vector operands for the shuffle are the same vector, the mask
9604 // will contain only elements from the first one and the second one will be
9606 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9607 // Go through the mask of half-words to find an element that's being moved
9608 // from one vector to the other.
9609 for (unsigned i = 0; i < BytesInVector; ++i) {
9610 unsigned CurrentElement = Mask[i];
9611 // If 2nd operand is undefined, we should only look for element 7 in the
9613 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9616 bool OtherElementsInOrder = true;
9617 // Examine the other elements in the Mask to see if they're in original
9619 for (unsigned j = 0; j < BytesInVector; ++j) {
9622 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9623 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
9624 // in which we always assume we're always picking from the 1st operand.
9626 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9627 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9628 OtherElementsInOrder = false;
9632 // If other elements are in original order, we record the number of shifts
9633 // we need to get the element we want into element 7. Also record which byte
9634 // in the vector we should insert into.
9635 if (OtherElementsInOrder) {
9636 // If 2nd operand is undefined, we assume no shifts and no swapping.
9641 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9642 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9643 : BigEndianShifts[CurrentElement & 0xF];
9644 Swap = CurrentElement < BytesInVector;
9646 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9647 FoundCandidate = true;
9652 if (!FoundCandidate)
9655 // Candidate found, construct the proper SDAG sequence with VINSERTB,
9656 // optionally with VECSHL if shift is required.
9662 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9663 DAG.getConstant(ShiftElts, dl, MVT::i32));
9664 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9665 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9667 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9668 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9671 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9672 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9674 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9675 SelectionDAG &DAG) const {
9676 const unsigned NumHalfWords = 8;
9677 const unsigned BytesInVector = NumHalfWords * 2;
9678 // Check that the shuffle is on half-words.
9679 if (!isNByteElemShuffleMask(N, 2, 1))
9682 bool IsLE = Subtarget.isLittleEndian();
9684 SDValue V1 = N->getOperand(0);
9685 SDValue V2 = N->getOperand(1);
9686 unsigned ShiftElts = 0, InsertAtByte = 0;
9689 // Shifts required to get the half-word we want at element 3.
9690 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9691 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9694 uint32_t OriginalOrderLow = 0x1234567;
9695 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9696 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a
9697 // 32-bit space, only need 4-bit nibbles per element.
9698 for (unsigned i = 0; i < NumHalfWords; ++i) {
9699 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9700 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9703 // For each mask element, find out if we're just inserting something
9704 // from V2 into V1 or vice versa. Possible permutations inserting an element
9706 // X, 1, 2, 3, 4, 5, 6, 7
9707 // 0, X, 2, 3, 4, 5, 6, 7
9708 // 0, 1, X, 3, 4, 5, 6, 7
9709 // 0, 1, 2, X, 4, 5, 6, 7
9710 // 0, 1, 2, 3, X, 5, 6, 7
9711 // 0, 1, 2, 3, 4, X, 6, 7
9712 // 0, 1, 2, 3, 4, 5, X, 7
9713 // 0, 1, 2, 3, 4, 5, 6, X
9714 // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9716 bool FoundCandidate = false;
9717 // Go through the mask of half-words to find an element that's being moved
9718 // from one vector to the other.
9719 for (unsigned i = 0; i < NumHalfWords; ++i) {
9720 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9721 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9722 uint32_t MaskOtherElts = ~(0xF << MaskShift);
9723 uint32_t TargetOrder = 0x0;
9725 // If both vector operands for the shuffle are the same vector, the mask
9726 // will contain only elements from the first one and the second one will be
9730 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9731 TargetOrder = OriginalOrderLow;
9733 // Skip if not the correct element or mask of other elements don't equal
9734 // to our expected order.
9735 if (MaskOneElt == VINSERTHSrcElem &&
9736 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9737 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9738 FoundCandidate = true;
9741 } else { // If both operands are defined.
9742 // Target order is [8,15] if the current mask is between [0,7].
9744 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9745 // Skip if mask of other elements don't equal our expected order.
9746 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9747 // We only need the last 3 bits for the number of shifts.
9748 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9749 : BigEndianShifts[MaskOneElt & 0x7];
9750 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9751 Swap = MaskOneElt < NumHalfWords;
9752 FoundCandidate = true;
9758 if (!FoundCandidate)
9761 // Candidate found, construct the proper SDAG sequence with VINSERTH,
9762 // optionally with VECSHL if shift is required.
9767 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9769 // Double ShiftElts because we're left shifting on v16i8 type.
9770 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9771 DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9772 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9773 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9774 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9775 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9777 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9778 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9779 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9783 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9784 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9785 /// return the default SDValue.
9786 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9787 SelectionDAG &DAG) const {
9788 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9789 // to v16i8. Peek through the bitcasts to get the actual operands.
9790 SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9791 SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9793 auto ShuffleMask = SVN->getMask();
9794 SDValue VecShuffle(SVN, 0);
9797 // Check that we have a four byte shuffle.
9798 if (!isNByteElemShuffleMask(SVN, 4, 1))
9801 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9802 if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9803 std::swap(LHS, RHS);
9804 VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9805 ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9808 // Ensure that the RHS is a vector of constants.
9809 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9813 // Check if RHS is a splat of 4-bytes (or smaller).
9814 APInt APSplatValue, APSplatUndef;
9815 unsigned SplatBitSize;
9817 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9818 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9822 // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9823 // The instruction splats a constant C into two words of the source vector
9824 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9825 // Thus we check that the shuffle mask is the equivalent of
9826 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9827 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9828 // within each word are consecutive, so we only need to check the first byte.
9830 bool IsLE = Subtarget.isLittleEndian();
9831 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9832 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9833 ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9834 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9835 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9836 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9837 ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9838 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9842 // If the splat is narrower than 32-bits, we need to get the 32-bit value
9844 unsigned SplatVal = APSplatValue.getZExtValue();
9845 for (; SplatBitSize < 32; SplatBitSize <<= 1)
9846 SplatVal |= (SplatVal << SplatBitSize);
9848 SDValue SplatNode = DAG.getNode(
9849 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9850 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9851 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9854 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9855 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9856 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9857 /// i.e (or (shl x, C1), (srl x, 128-C1)).
9858 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9859 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9860 assert(Op.getValueType() == MVT::v1i128 &&
9861 "Only set v1i128 as custom, other type shouldn't reach here!");
9863 SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9864 SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9865 unsigned SHLAmt = N1.getConstantOperandVal(0);
9866 if (SHLAmt % 8 == 0) {
9867 SmallVector<int, 16> Mask(16, 0);
9868 std::iota(Mask.begin(), Mask.end(), 0);
9869 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9870 if (SDValue Shuffle =
9871 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9872 DAG.getUNDEF(MVT::v16i8), Mask))
9873 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9875 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9876 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9877 DAG.getConstant(SHLAmt, dl, MVT::i32));
9878 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9879 DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9880 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9881 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9884 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
9885 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
9886 /// return the code it can be lowered into. Worst case, it can always be
9887 /// lowered into a vperm.
9888 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9889 SelectionDAG &DAG) const {
9891 SDValue V1 = Op.getOperand(0);
9892 SDValue V2 = Op.getOperand(1);
9893 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9895 // Any nodes that were combined in the target-independent combiner prior
9896 // to vector legalization will not be sent to the target combine. Try to
9898 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9899 if (!isa<ShuffleVectorSDNode>(NewShuffle))
9902 SVOp = cast<ShuffleVectorSDNode>(Op);
9903 V1 = Op.getOperand(0);
9904 V2 = Op.getOperand(1);
9906 EVT VT = Op.getValueType();
9907 bool isLittleEndian = Subtarget.isLittleEndian();
9909 unsigned ShiftElts, InsertAtByte;
9912 // If this is a load-and-splat, we can do that with a single instruction
9913 // in some cases. However if the load has multiple uses, we don't want to
9914 // combine it because that will just produce multiple loads.
9915 const SDValue *InputLoad = getNormalLoadInput(V1);
9916 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9917 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9918 InputLoad->hasOneUse()) {
9919 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9921 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9923 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9924 // For 4-byte load-and-splat, we need Power9.
9925 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9926 uint64_t Offset = 0;
9928 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9930 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9932 // If we are loading a partial vector, it does not make sense to adjust
9933 // the base pointer. This happens with (splat (s_to_v_permuted (ld))).
9934 if (LD->getMemoryVT().getSizeInBits() == (IsFourByte ? 32 : 64))
9936 SDValue BasePtr = LD->getBasePtr();
9938 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9939 BasePtr, DAG.getIntPtrConstant(Offset, dl));
9941 LD->getChain(), // Chain
9943 DAG.getValueType(Op.getValueType()) // VT
9946 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9948 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9949 Ops, LD->getMemoryVT(), LD->getMemOperand());
9950 if (LdSplt.getValueType() != SVOp->getValueType(0))
9951 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9955 if (Subtarget.hasP9Vector() &&
9956 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9960 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9961 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9963 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9964 DAG.getConstant(ShiftElts, dl, MVT::i32));
9965 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9966 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9967 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9969 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9970 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9971 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9974 if (Subtarget.hasPrefixInstrs()) {
9975 SDValue SplatInsertNode;
9976 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9977 return SplatInsertNode;
9980 if (Subtarget.hasP9Altivec()) {
9982 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9985 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9989 if (Subtarget.hasVSX() &&
9990 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9993 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9995 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9997 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9998 DAG.getConstant(ShiftElts, dl, MVT::i32));
9999 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
10002 if (Subtarget.hasVSX() &&
10003 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10006 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10008 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
10010 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
10011 DAG.getConstant(ShiftElts, dl, MVT::i32));
10012 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
10015 if (Subtarget.hasP9Vector()) {
10016 if (PPC::isXXBRHShuffleMask(SVOp)) {
10017 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
10018 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
10019 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
10020 } else if (PPC::isXXBRWShuffleMask(SVOp)) {
10021 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10022 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
10023 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
10024 } else if (PPC::isXXBRDShuffleMask(SVOp)) {
10025 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10026 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
10027 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
10028 } else if (PPC::isXXBRQShuffleMask(SVOp)) {
10029 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
10030 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
10031 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
10035 if (Subtarget.hasVSX()) {
10036 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
10037 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
10039 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10040 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
10041 DAG.getConstant(SplatIdx, dl, MVT::i32));
10042 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
10045 // Left shifts of 8 bytes are actually swaps. Convert accordingly.
10046 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
10047 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
10048 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
10049 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
10053 if (Subtarget.hasQPX()) {
10054 if (VT.getVectorNumElements() != 4)
10057 if (V2.isUndef()) V2 = V1;
10059 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
10060 if (AlignIdx != -1) {
10061 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
10062 DAG.getConstant(AlignIdx, dl, MVT::i32));
10063 } else if (SVOp->isSplat()) {
10064 int SplatIdx = SVOp->getSplatIndex();
10065 if (SplatIdx >= 4) {
10070 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
10071 DAG.getConstant(SplatIdx, dl, MVT::i32));
10074 // Lower this into a qvgpci/qvfperm pair.
10076 // Compute the qvgpci literal
10078 for (unsigned i = 0; i < 4; ++i) {
10079 int m = SVOp->getMaskElt(i);
10080 unsigned mm = m >= 0 ? (unsigned) m : i;
10081 idx |= mm << (3-i)*3;
10084 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
10085 DAG.getConstant(idx, dl, MVT::i32));
10086 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
10089 // Cases that are handled by instructions that take permute immediates
10090 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
10091 // selected by the instruction selector.
10092 if (V2.isUndef()) {
10093 if (PPC::isSplatShuffleMask(SVOp, 1) ||
10094 PPC::isSplatShuffleMask(SVOp, 2) ||
10095 PPC::isSplatShuffleMask(SVOp, 4) ||
10096 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
10097 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
10098 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
10099 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
10100 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
10101 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
10102 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
10103 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
10104 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
10105 (Subtarget.hasP8Altivec() && (
10106 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
10107 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
10108 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
10113 // Altivec has a variety of "shuffle immediates" that take two vector inputs
10114 // and produce a fixed permutation. If any of these match, do not lower to
10116 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10117 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10118 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10119 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
10120 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10121 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10122 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10123 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10124 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10125 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10126 (Subtarget.hasP8Altivec() && (
10127 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10128 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
10129 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
10132 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
10133 // perfect shuffle table to emit an optimal matching sequence.
10134 ArrayRef<int> PermMask = SVOp->getMask();
10136 unsigned PFIndexes[4];
10137 bool isFourElementShuffle = true;
10138 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
10139 unsigned EltNo = 8; // Start out undef.
10140 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
10141 if (PermMask[i*4+j] < 0)
10142 continue; // Undef, ignore it.
10144 unsigned ByteSource = PermMask[i*4+j];
10145 if ((ByteSource & 3) != j) {
10146 isFourElementShuffle = false;
10151 EltNo = ByteSource/4;
10152 } else if (EltNo != ByteSource/4) {
10153 isFourElementShuffle = false;
10157 PFIndexes[i] = EltNo;
10160 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
10161 // perfect shuffle vector to determine if it is cost effective to do this as
10162 // discrete instructions, or whether we should use a vperm.
10163 // For now, we skip this for little endian until such time as we have a
10164 // little-endian perfect shuffle table.
10165 if (isFourElementShuffle && !isLittleEndian) {
10166 // Compute the index in the perfect shuffle table.
10167 unsigned PFTableIndex =
10168 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
10170 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10171 unsigned Cost = (PFEntry >> 30);
10173 // Determining when to avoid vperm is tricky. Many things affect the cost
10174 // of vperm, particularly how many times the perm mask needs to be computed.
10175 // For example, if the perm mask can be hoisted out of a loop or is already
10176 // used (perhaps because there are multiple permutes with the same shuffle
10177 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
10178 // the loop requires an extra register.
10180 // As a compromise, we only emit discrete instructions if the shuffle can be
10181 // generated in 3 or fewer operations. When we have loop information
10182 // available, if this block is within a loop, we should avoid using vperm
10183 // for 3-operation perms and use a constant pool load instead.
10185 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10188 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10189 // vector that will get spilled to the constant pool.
10190 if (V2.isUndef()) V2 = V1;
10192 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10193 // that it is in input element units, not in bytes. Convert now.
10195 // For little endian, the order of the input vectors is reversed, and
10196 // the permutation mask is complemented with respect to 31. This is
10197 // necessary to produce proper semantics with the big-endian-biased vperm
10199 EVT EltVT = V1.getValueType().getVectorElementType();
10200 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10202 SmallVector<SDValue, 16> ResultMask;
10203 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10204 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10206 for (unsigned j = 0; j != BytesPerElement; ++j)
10207 if (isLittleEndian)
10208 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10211 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10215 ShufflesHandledWithVPERM++;
10216 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10217 LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
10218 LLVM_DEBUG(SVOp->dump());
10219 LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
10220 LLVM_DEBUG(VPermMask.dump());
10222 if (isLittleEndian)
10223 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10224 V2, V1, VPermMask);
10226 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10227 V1, V2, VPermMask);
10230 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10231 /// vector comparison. If it is, return true and fill in Opc/isDot with
10232 /// information about the intrinsic.
10233 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10234 bool &isDot, const PPCSubtarget &Subtarget) {
10235 unsigned IntrinsicID =
10236 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10239 switch (IntrinsicID) {
10242 // Comparison predicates.
10243 case Intrinsic::ppc_altivec_vcmpbfp_p:
10247 case Intrinsic::ppc_altivec_vcmpeqfp_p:
10251 case Intrinsic::ppc_altivec_vcmpequb_p:
10255 case Intrinsic::ppc_altivec_vcmpequh_p:
10259 case Intrinsic::ppc_altivec_vcmpequw_p:
10263 case Intrinsic::ppc_altivec_vcmpequd_p:
10264 if (Subtarget.hasP8Altivec()) {
10270 case Intrinsic::ppc_altivec_vcmpneb_p:
10271 case Intrinsic::ppc_altivec_vcmpneh_p:
10272 case Intrinsic::ppc_altivec_vcmpnew_p:
10273 case Intrinsic::ppc_altivec_vcmpnezb_p:
10274 case Intrinsic::ppc_altivec_vcmpnezh_p:
10275 case Intrinsic::ppc_altivec_vcmpnezw_p:
10276 if (Subtarget.hasP9Altivec()) {
10277 switch (IntrinsicID) {
10279 llvm_unreachable("Unknown comparison intrinsic.");
10280 case Intrinsic::ppc_altivec_vcmpneb_p:
10283 case Intrinsic::ppc_altivec_vcmpneh_p:
10286 case Intrinsic::ppc_altivec_vcmpnew_p:
10289 case Intrinsic::ppc_altivec_vcmpnezb_p:
10292 case Intrinsic::ppc_altivec_vcmpnezh_p:
10295 case Intrinsic::ppc_altivec_vcmpnezw_p:
10303 case Intrinsic::ppc_altivec_vcmpgefp_p:
10307 case Intrinsic::ppc_altivec_vcmpgtfp_p:
10311 case Intrinsic::ppc_altivec_vcmpgtsb_p:
10315 case Intrinsic::ppc_altivec_vcmpgtsh_p:
10319 case Intrinsic::ppc_altivec_vcmpgtsw_p:
10323 case Intrinsic::ppc_altivec_vcmpgtsd_p:
10324 if (Subtarget.hasP8Altivec()) {
10330 case Intrinsic::ppc_altivec_vcmpgtub_p:
10334 case Intrinsic::ppc_altivec_vcmpgtuh_p:
10338 case Intrinsic::ppc_altivec_vcmpgtuw_p:
10342 case Intrinsic::ppc_altivec_vcmpgtud_p:
10343 if (Subtarget.hasP8Altivec()) {
10350 // VSX predicate comparisons use the same infrastructure
10351 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10352 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10353 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10354 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10355 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10356 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10357 if (Subtarget.hasVSX()) {
10358 switch (IntrinsicID) {
10359 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10362 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10365 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10368 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10371 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10374 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10383 // Normal Comparisons.
10384 case Intrinsic::ppc_altivec_vcmpbfp:
10387 case Intrinsic::ppc_altivec_vcmpeqfp:
10390 case Intrinsic::ppc_altivec_vcmpequb:
10393 case Intrinsic::ppc_altivec_vcmpequh:
10396 case Intrinsic::ppc_altivec_vcmpequw:
10399 case Intrinsic::ppc_altivec_vcmpequd:
10400 if (Subtarget.hasP8Altivec())
10405 case Intrinsic::ppc_altivec_vcmpneb:
10406 case Intrinsic::ppc_altivec_vcmpneh:
10407 case Intrinsic::ppc_altivec_vcmpnew:
10408 case Intrinsic::ppc_altivec_vcmpnezb:
10409 case Intrinsic::ppc_altivec_vcmpnezh:
10410 case Intrinsic::ppc_altivec_vcmpnezw:
10411 if (Subtarget.hasP9Altivec())
10412 switch (IntrinsicID) {
10414 llvm_unreachable("Unknown comparison intrinsic.");
10415 case Intrinsic::ppc_altivec_vcmpneb:
10418 case Intrinsic::ppc_altivec_vcmpneh:
10421 case Intrinsic::ppc_altivec_vcmpnew:
10424 case Intrinsic::ppc_altivec_vcmpnezb:
10427 case Intrinsic::ppc_altivec_vcmpnezh:
10430 case Intrinsic::ppc_altivec_vcmpnezw:
10437 case Intrinsic::ppc_altivec_vcmpgefp:
10440 case Intrinsic::ppc_altivec_vcmpgtfp:
10443 case Intrinsic::ppc_altivec_vcmpgtsb:
10446 case Intrinsic::ppc_altivec_vcmpgtsh:
10449 case Intrinsic::ppc_altivec_vcmpgtsw:
10452 case Intrinsic::ppc_altivec_vcmpgtsd:
10453 if (Subtarget.hasP8Altivec())
10458 case Intrinsic::ppc_altivec_vcmpgtub:
10461 case Intrinsic::ppc_altivec_vcmpgtuh:
10464 case Intrinsic::ppc_altivec_vcmpgtuw:
10467 case Intrinsic::ppc_altivec_vcmpgtud:
10468 if (Subtarget.hasP8Altivec())
10477 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10478 /// lower, do it, otherwise return null.
10479 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10480 SelectionDAG &DAG) const {
10481 unsigned IntrinsicID =
10482 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10486 if (IntrinsicID == Intrinsic::thread_pointer) {
10487 // Reads the thread pointer register, used for __builtin_thread_pointer.
10488 if (Subtarget.isPPC64())
10489 return DAG.getRegister(PPC::X13, MVT::i64);
10490 return DAG.getRegister(PPC::R2, MVT::i32);
10493 // If this is a lowered altivec predicate compare, CompareOpc is set to the
10494 // opcode number of the comparison.
10497 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10498 return SDValue(); // Don't custom lower most intrinsics.
10500 // If this is a non-dot comparison, make the VCMP node and we are done.
10502 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10503 Op.getOperand(1), Op.getOperand(2),
10504 DAG.getConstant(CompareOpc, dl, MVT::i32));
10505 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10508 // Create the PPCISD altivec 'dot' comparison node.
10510 Op.getOperand(2), // LHS
10511 Op.getOperand(3), // RHS
10512 DAG.getConstant(CompareOpc, dl, MVT::i32)
10514 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10515 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10517 // Now that we have the comparison, emit a copy from the CR to a GPR.
10518 // This is flagged to the above dot comparison.
10519 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10520 DAG.getRegister(PPC::CR6, MVT::i32),
10521 CompNode.getValue(1));
10523 // Unpack the result based on how the target uses it.
10524 unsigned BitNo; // Bit # of CR6.
10525 bool InvertBit; // Invert result?
10526 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10527 default: // Can't happen, don't crash on invalid number though.
10528 case 0: // Return the value of the EQ bit of CR6.
10529 BitNo = 0; InvertBit = false;
10531 case 1: // Return the inverted value of the EQ bit of CR6.
10532 BitNo = 0; InvertBit = true;
10534 case 2: // Return the value of the LT bit of CR6.
10535 BitNo = 2; InvertBit = false;
10537 case 3: // Return the inverted value of the LT bit of CR6.
10538 BitNo = 2; InvertBit = true;
10542 // Shift the bit into the low position.
10543 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10544 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10545 // Isolate the bit.
10546 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10547 DAG.getConstant(1, dl, MVT::i32));
10549 // If we are supposed to, toggle the bit.
10551 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10552 DAG.getConstant(1, dl, MVT::i32));
10556 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10557 SelectionDAG &DAG) const {
10558 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10559 // the beginning of the argument list.
10560 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10562 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10563 case Intrinsic::ppc_cfence: {
10564 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10565 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10566 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10567 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10568 Op.getOperand(ArgStart + 1)),
10578 // Lower scalar BSWAP64 to xxbrd.
10579 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10582 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10585 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10587 int VectorIndex = 0;
10588 if (Subtarget.isLittleEndian())
10590 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10591 DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10595 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10596 // compared to a value that is atomically loaded (atomic loads zero-extend).
10597 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10598 SelectionDAG &DAG) const {
10599 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10600 "Expecting an atomic compare-and-swap here.");
10602 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10603 EVT MemVT = AtomicNode->getMemoryVT();
10604 if (MemVT.getSizeInBits() >= 32)
10607 SDValue CmpOp = Op.getOperand(2);
10608 // If this is already correctly zero-extended, leave it alone.
10609 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10610 if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10613 // Clear the high bits of the compare operand.
10614 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10616 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10617 DAG.getConstant(MaskVal, dl, MVT::i32));
10619 // Replace the existing compare operand with the properly zero-extended one.
10620 SmallVector<SDValue, 4> Ops;
10621 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10622 Ops.push_back(AtomicNode->getOperand(i));
10624 MachineMemOperand *MMO = AtomicNode->getMemOperand();
10625 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10627 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10628 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10631 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10632 SelectionDAG &DAG) const {
10634 // Create a stack slot that is 16-byte aligned.
10635 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10636 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10637 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10638 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10640 // Store the input value into Value#0 of the stack slot.
10641 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10642 MachinePointerInfo());
10644 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10647 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10648 SelectionDAG &DAG) const {
10649 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10650 "Should only be called for ISD::INSERT_VECTOR_ELT");
10652 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10653 // We have legal lowering for constant indices but not for variable ones.
10657 EVT VT = Op.getValueType();
10659 SDValue V1 = Op.getOperand(0);
10660 SDValue V2 = Op.getOperand(1);
10661 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10662 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10663 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10664 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10665 unsigned InsertAtElement = C->getZExtValue();
10666 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10667 if (Subtarget.isLittleEndian()) {
10668 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10670 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10671 DAG.getConstant(InsertAtByte, dl, MVT::i32));
10676 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
10677 SelectionDAG &DAG) const {
10679 SDNode *N = Op.getNode();
10681 assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
10682 "Unknown extract_vector_elt type");
10684 SDValue Value = N->getOperand(0);
10686 // The first part of this is like the store lowering except that we don't
10687 // need to track the chain.
10689 // The values are now known to be -1 (false) or 1 (true). To convert this
10690 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10691 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10692 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10694 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10695 // understand how to form the extending load.
10696 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10698 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10700 // Now convert to an integer and store.
10701 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10702 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10705 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10706 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10707 MachinePointerInfo PtrInfo =
10708 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10709 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10710 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10712 SDValue StoreChain = DAG.getEntryNode();
10713 SDValue Ops[] = {StoreChain,
10714 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10716 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10718 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10719 dl, VTs, Ops, MVT::v4i32, PtrInfo);
10721 // Extract the value requested.
10722 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10723 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10724 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10727 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10729 if (!Subtarget.useCRBits())
10732 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10735 /// Lowering for QPX v4i1 loads
10736 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10737 SelectionDAG &DAG) const {
10739 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10740 SDValue LoadChain = LN->getChain();
10741 SDValue BasePtr = LN->getBasePtr();
10743 if (Op.getValueType() == MVT::v4f64 ||
10744 Op.getValueType() == MVT::v4f32) {
10745 EVT MemVT = LN->getMemoryVT();
10746 unsigned Alignment = LN->getAlignment();
10748 // If this load is properly aligned, then it is legal.
10749 if (Alignment >= MemVT.getStoreSize())
10752 EVT ScalarVT = Op.getValueType().getScalarType(),
10753 ScalarMemVT = MemVT.getScalarType();
10754 unsigned Stride = ScalarMemVT.getStoreSize();
10756 SDValue Vals[4], LoadChains[4];
10757 for (unsigned Idx = 0; Idx < 4; ++Idx) {
10759 if (ScalarVT != ScalarMemVT)
10760 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10762 LN->getPointerInfo().getWithOffset(Idx * Stride),
10763 ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10764 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10766 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10767 LN->getPointerInfo().getWithOffset(Idx * Stride),
10768 MinAlign(Alignment, Idx * Stride),
10769 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10771 if (Idx == 0 && LN->isIndexed()) {
10772 assert(LN->getAddressingMode() == ISD::PRE_INC &&
10773 "Unknown addressing mode on vector load");
10774 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10775 LN->getAddressingMode());
10779 LoadChains[Idx] = Load.getValue(1);
10781 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10782 DAG.getConstant(Stride, dl,
10783 BasePtr.getValueType()));
10786 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10787 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10789 if (LN->isIndexed()) {
10790 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10791 return DAG.getMergeValues(RetOps, dl);
10794 SDValue RetOps[] = { Value, TF };
10795 return DAG.getMergeValues(RetOps, dl);
10798 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10799 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10801 // To lower v4i1 from a byte array, we load the byte elements of the
10802 // vector and then reuse the BUILD_VECTOR logic.
10804 SDValue VectElmts[4], VectElmtChains[4];
10805 for (unsigned i = 0; i < 4; ++i) {
10806 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10807 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10809 VectElmts[i] = DAG.getExtLoad(
10810 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10811 LN->getPointerInfo().getWithOffset(i), MVT::i8,
10812 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10813 VectElmtChains[i] = VectElmts[i].getValue(1);
10816 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10817 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10819 SDValue RVals[] = { Value, LoadChain };
10820 return DAG.getMergeValues(RVals, dl);
10823 /// Lowering for QPX v4i1 stores
10824 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10825 SelectionDAG &DAG) const {
10827 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10828 SDValue StoreChain = SN->getChain();
10829 SDValue BasePtr = SN->getBasePtr();
10830 SDValue Value = SN->getValue();
10832 if (Value.getValueType() == MVT::v4f64 ||
10833 Value.getValueType() == MVT::v4f32) {
10834 EVT MemVT = SN->getMemoryVT();
10835 unsigned Alignment = SN->getAlignment();
10837 // If this store is properly aligned, then it is legal.
10838 if (Alignment >= MemVT.getStoreSize())
10841 EVT ScalarVT = Value.getValueType().getScalarType(),
10842 ScalarMemVT = MemVT.getScalarType();
10843 unsigned Stride = ScalarMemVT.getStoreSize();
10846 for (unsigned Idx = 0; Idx < 4; ++Idx) {
10847 SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10848 DAG.getVectorIdxConstant(Idx, dl));
10850 if (ScalarVT != ScalarMemVT)
10852 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10853 SN->getPointerInfo().getWithOffset(Idx * Stride),
10854 ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10855 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10857 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10858 SN->getPointerInfo().getWithOffset(Idx * Stride),
10859 MinAlign(Alignment, Idx * Stride),
10860 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10862 if (Idx == 0 && SN->isIndexed()) {
10863 assert(SN->getAddressingMode() == ISD::PRE_INC &&
10864 "Unknown addressing mode on vector store");
10865 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10866 SN->getAddressingMode());
10869 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10870 DAG.getConstant(Stride, dl,
10871 BasePtr.getValueType()));
10872 Stores[Idx] = Store;
10875 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10877 if (SN->isIndexed()) {
10878 SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10879 return DAG.getMergeValues(RetOps, dl);
10885 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10886 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10888 // The values are now known to be -1 (false) or 1 (true). To convert this
10889 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10890 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10891 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10893 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10894 // understand how to form the extending load.
10895 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10897 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10899 // Now convert to an integer and store.
10900 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10901 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10904 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10905 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10906 MachinePointerInfo PtrInfo =
10907 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10908 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10909 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10911 SDValue Ops[] = {StoreChain,
10912 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10914 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10916 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10917 dl, VTs, Ops, MVT::v4i32, PtrInfo);
10919 // Move data into the byte array.
10920 SDValue Loads[4], LoadChains[4];
10921 for (unsigned i = 0; i < 4; ++i) {
10922 unsigned Offset = 4*i;
10923 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10924 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10926 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10927 PtrInfo.getWithOffset(Offset));
10928 LoadChains[i] = Loads[i].getValue(1);
10931 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10934 for (unsigned i = 0; i < 4; ++i) {
10935 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10936 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10938 Stores[i] = DAG.getTruncStore(
10939 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10940 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10944 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10949 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10951 if (Op.getValueType() == MVT::v4i32) {
10952 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10954 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10955 // +16 as shift amt.
10956 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10957 SDValue RHSSwap = // = vrlw RHS, 16
10958 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10960 // Shrinkify inputs to v8i16.
10961 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10962 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10963 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10965 // Low parts multiplied together, generating 32-bit results (we ignore the
10967 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10968 LHS, RHS, DAG, dl, MVT::v4i32);
10970 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10971 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10972 // Shift the high parts up 16 bits.
10973 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10975 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10976 } else if (Op.getValueType() == MVT::v16i8) {
10977 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10978 bool isLittleEndian = Subtarget.isLittleEndian();
10980 // Multiply the even 8-bit parts, producing 16-bit sums.
10981 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10982 LHS, RHS, DAG, dl, MVT::v8i16);
10983 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10985 // Multiply the odd 8-bit parts, producing 16-bit sums.
10986 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10987 LHS, RHS, DAG, dl, MVT::v8i16);
10988 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10990 // Merge the results together. Because vmuleub and vmuloub are
10991 // instructions with a big-endian bias, we must reverse the
10992 // element numbering and reverse the meaning of "odd" and "even"
10993 // when generating little endian code.
10995 for (unsigned i = 0; i != 8; ++i) {
10996 if (isLittleEndian) {
10998 Ops[i*2+1] = 2*i+16;
11001 Ops[i*2+1] = 2*i+1+16;
11004 if (isLittleEndian)
11005 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
11007 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
11009 llvm_unreachable("Unknown mul to lower!");
11013 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
11015 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
11017 EVT VT = Op.getValueType();
11018 assert(VT.isVector() &&
11019 "Only set vector abs as custom, scalar abs shouldn't reach here!");
11020 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
11021 VT == MVT::v16i8) &&
11022 "Unexpected vector element type!");
11023 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
11024 "Current subtarget doesn't support smax v2i64!");
11026 // For vector abs, it can be lowered to:
11033 SDValue X = Op.getOperand(0);
11034 SDValue Zero = DAG.getConstant(0, dl, VT);
11035 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
11037 // SMAX patch https://reviews.llvm.org/D47332
11038 // hasn't landed yet, so use intrinsic first here.
11039 // TODO: Should use SMAX directly once SMAX patch landed
11040 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
11041 if (VT == MVT::v2i64)
11042 BifID = Intrinsic::ppc_altivec_vmaxsd;
11043 else if (VT == MVT::v8i16)
11044 BifID = Intrinsic::ppc_altivec_vmaxsh;
11045 else if (VT == MVT::v16i8)
11046 BifID = Intrinsic::ppc_altivec_vmaxsb;
11048 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
11051 // Custom lowering for fpext vf32 to v2f64
11052 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
11054 assert(Op.getOpcode() == ISD::FP_EXTEND &&
11055 "Should only be called for ISD::FP_EXTEND");
11057 // FIXME: handle extends from half precision float vectors on P9.
11058 // We only want to custom lower an extend from v2f32 to v2f64.
11059 if (Op.getValueType() != MVT::v2f64 ||
11060 Op.getOperand(0).getValueType() != MVT::v2f32)
11064 SDValue Op0 = Op.getOperand(0);
11066 switch (Op0.getOpcode()) {
11069 case ISD::EXTRACT_SUBVECTOR: {
11070 assert(Op0.getNumOperands() == 2 &&
11071 isa<ConstantSDNode>(Op0->getOperand(1)) &&
11072 "Node should have 2 operands with second one being a constant!");
11074 if (Op0.getOperand(0).getValueType() != MVT::v4f32)
11077 // Custom lower is only done for high or low doubleword.
11078 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
11082 // Since input is v4f32, at this point Idx is either 0 or 2.
11083 // Shift to get the doubleword position we want.
11084 int DWord = Idx >> 1;
11086 // High and low word positions are different on little endian.
11087 if (Subtarget.isLittleEndian())
11090 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
11091 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
11096 SDValue NewLoad[2];
11097 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
11098 // Ensure both input are loads.
11099 SDValue LdOp = Op0.getOperand(i);
11100 if (LdOp.getOpcode() != ISD::LOAD)
11102 // Generate new load node.
11103 LoadSDNode *LD = cast<LoadSDNode>(LdOp);
11104 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11105 NewLoad[i] = DAG.getMemIntrinsicNode(
11106 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11107 LD->getMemoryVT(), LD->getMemOperand());
11110 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
11111 NewLoad[1], Op0.getNode()->getFlags());
11112 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
11113 DAG.getConstant(0, dl, MVT::i32));
11116 LoadSDNode *LD = cast<LoadSDNode>(Op0);
11117 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11118 SDValue NewLd = DAG.getMemIntrinsicNode(
11119 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11120 LD->getMemoryVT(), LD->getMemOperand());
11121 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
11122 DAG.getConstant(0, dl, MVT::i32));
11125 llvm_unreachable("ERROR:Should return for all cases within swtich.");
11128 /// LowerOperation - Provide custom lowering hooks for some operations.
11130 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11131 switch (Op.getOpcode()) {
11132 default: llvm_unreachable("Wasn't expecting to be able to lower this!");
11133 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
11134 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
11135 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
11136 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
11137 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
11138 case ISD::SETCC: return LowerSETCC(Op, DAG);
11139 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
11140 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
11142 // Variable argument lowering.
11143 case ISD::VASTART: return LowerVASTART(Op, DAG);
11144 case ISD::VAARG: return LowerVAARG(Op, DAG);
11145 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
11147 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
11148 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
11149 case ISD::GET_DYNAMIC_AREA_OFFSET:
11150 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
11152 // Exception handling lowering.
11153 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
11154 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
11155 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
11157 case ISD::LOAD: return LowerLOAD(Op, DAG);
11158 case ISD::STORE: return LowerSTORE(Op, DAG);
11159 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
11160 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
11161 case ISD::FP_TO_UINT:
11162 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
11163 case ISD::UINT_TO_FP:
11164 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
11165 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
11167 // Lower 64-bit shifts.
11168 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
11169 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
11170 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
11172 // Vector-related lowering.
11173 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
11174 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
11175 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
11176 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
11177 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
11178 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
11179 case ISD::MUL: return LowerMUL(Op, DAG);
11180 case ISD::ABS: return LowerABS(Op, DAG);
11181 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
11182 case ISD::ROTL: return LowerROTL(Op, DAG);
11184 // For counter-based loop handling.
11185 case ISD::INTRINSIC_W_CHAIN: return SDValue();
11187 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
11189 // Frame & Return address.
11190 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
11191 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
11193 case ISD::INTRINSIC_VOID:
11194 return LowerINTRINSIC_VOID(Op, DAG);
11196 return LowerBSWAP(Op, DAG);
11197 case ISD::ATOMIC_CMP_SWAP:
11198 return LowerATOMIC_CMP_SWAP(Op, DAG);
11202 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
11203 SmallVectorImpl<SDValue>&Results,
11204 SelectionDAG &DAG) const {
11206 switch (N->getOpcode()) {
11208 llvm_unreachable("Do not know how to custom type legalize this operation!");
11209 case ISD::READCYCLECOUNTER: {
11210 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11211 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
11214 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11215 Results.push_back(RTB.getValue(2));
11218 case ISD::INTRINSIC_W_CHAIN: {
11219 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
11220 Intrinsic::loop_decrement)
11223 assert(N->getValueType(0) == MVT::i1 &&
11224 "Unexpected result type for CTR decrement intrinsic");
11225 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11226 N->getValueType(0));
11227 SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11228 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11231 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11232 Results.push_back(NewInt.getValue(1));
11236 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11239 EVT VT = N->getValueType(0);
11241 if (VT == MVT::i64) {
11242 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11244 Results.push_back(NewNode);
11245 Results.push_back(NewNode.getValue(1));
11249 case ISD::FP_TO_SINT:
11250 case ISD::FP_TO_UINT:
11251 // LowerFP_TO_INT() can only handle f32 and f64.
11252 if (N->getOperand(0).getValueType() == MVT::ppcf128)
11254 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
11256 case ISD::TRUNCATE: {
11257 EVT TrgVT = N->getValueType(0);
11258 EVT OpVT = N->getOperand(0).getValueType();
11259 if (TrgVT.isVector() &&
11260 isOperationCustom(N->getOpcode(), TrgVT) &&
11261 OpVT.getSizeInBits() <= 128 &&
11262 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
11263 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
11267 // Don't handle bitcast here.
11269 case ISD::FP_EXTEND:
11270 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11272 Results.push_back(Lowered);
11277 //===----------------------------------------------------------------------===//
11278 // Other Lowering Code
11279 //===----------------------------------------------------------------------===//
11281 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
11282 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11283 Function *Func = Intrinsic::getDeclaration(M, Id);
11284 return Builder.CreateCall(Func, {});
11287 // The mappings for emitLeading/TrailingFence is taken from
11288 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
11289 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
11291 AtomicOrdering Ord) const {
11292 if (Ord == AtomicOrdering::SequentiallyConsistent)
11293 return callIntrinsic(Builder, Intrinsic::ppc_sync);
11294 if (isReleaseOrStronger(Ord))
11295 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11299 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
11301 AtomicOrdering Ord) const {
11302 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11303 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11304 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11305 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11306 if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
11307 return Builder.CreateCall(
11308 Intrinsic::getDeclaration(
11309 Builder.GetInsertBlock()->getParent()->getParent(),
11310 Intrinsic::ppc_cfence, {Inst->getType()}),
11312 // FIXME: Can use isync for rmw operation.
11313 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11318 MachineBasicBlock *
11319 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
11320 unsigned AtomicSize,
11321 unsigned BinOpcode,
11322 unsigned CmpOpcode,
11323 unsigned CmpPred) const {
11324 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11325 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11327 auto LoadMnemonic = PPC::LDARX;
11328 auto StoreMnemonic = PPC::STDCX;
11329 switch (AtomicSize) {
11331 llvm_unreachable("Unexpected size of atomic entity");
11333 LoadMnemonic = PPC::LBARX;
11334 StoreMnemonic = PPC::STBCX;
11335 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11338 LoadMnemonic = PPC::LHARX;
11339 StoreMnemonic = PPC::STHCX;
11340 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11343 LoadMnemonic = PPC::LWARX;
11344 StoreMnemonic = PPC::STWCX;
11347 LoadMnemonic = PPC::LDARX;
11348 StoreMnemonic = PPC::STDCX;
11352 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11353 MachineFunction *F = BB->getParent();
11354 MachineFunction::iterator It = ++BB->getIterator();
11356 Register dest = MI.getOperand(0).getReg();
11357 Register ptrA = MI.getOperand(1).getReg();
11358 Register ptrB = MI.getOperand(2).getReg();
11359 Register incr = MI.getOperand(3).getReg();
11360 DebugLoc dl = MI.getDebugLoc();
11362 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11363 MachineBasicBlock *loop2MBB =
11364 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11365 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11366 F->insert(It, loopMBB);
11368 F->insert(It, loop2MBB);
11369 F->insert(It, exitMBB);
11370 exitMBB->splice(exitMBB->begin(), BB,
11371 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11372 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11374 MachineRegisterInfo &RegInfo = F->getRegInfo();
11375 Register TmpReg = (!BinOpcode) ? incr :
11376 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11377 : &PPC::GPRCRegClass);
11381 // fallthrough --> loopMBB
11382 BB->addSuccessor(loopMBB);
11385 // l[wd]arx dest, ptr
11386 // add r0, dest, incr
11387 // st[wd]cx. r0, ptr
11389 // fallthrough --> exitMBB
11393 // l[wd]arx dest, ptr
11394 // cmpl?[wd] incr, dest
11397 // st[wd]cx. dest, ptr
11399 // fallthrough --> exitMBB
11402 BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11403 .addReg(ptrA).addReg(ptrB);
11405 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11407 // Signed comparisons of byte or halfword values must be sign-extended.
11408 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11409 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11410 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11411 ExtReg).addReg(dest);
11412 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11413 .addReg(incr).addReg(ExtReg);
11415 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11416 .addReg(incr).addReg(dest);
11418 BuildMI(BB, dl, TII->get(PPC::BCC))
11419 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11420 BB->addSuccessor(loop2MBB);
11421 BB->addSuccessor(exitMBB);
11424 BuildMI(BB, dl, TII->get(StoreMnemonic))
11425 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11426 BuildMI(BB, dl, TII->get(PPC::BCC))
11427 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11428 BB->addSuccessor(loopMBB);
11429 BB->addSuccessor(exitMBB);
11437 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11438 MachineInstr &MI, MachineBasicBlock *BB,
11439 bool is8bit, // operation
11440 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11441 // If we support part-word atomic mnemonics, just use them
11442 if (Subtarget.hasPartwordAtomics())
11443 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11446 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11447 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11448 // In 64 bit mode we have to use 64 bits for addresses, even though the
11449 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
11450 // registers without caring whether they're 32 or 64, but here we're
11451 // doing actual arithmetic on the addresses.
11452 bool is64bit = Subtarget.isPPC64();
11453 bool isLittleEndian = Subtarget.isLittleEndian();
11454 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11456 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11457 MachineFunction *F = BB->getParent();
11458 MachineFunction::iterator It = ++BB->getIterator();
11460 Register dest = MI.getOperand(0).getReg();
11461 Register ptrA = MI.getOperand(1).getReg();
11462 Register ptrB = MI.getOperand(2).getReg();
11463 Register incr = MI.getOperand(3).getReg();
11464 DebugLoc dl = MI.getDebugLoc();
11466 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11467 MachineBasicBlock *loop2MBB =
11468 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11469 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11470 F->insert(It, loopMBB);
11472 F->insert(It, loop2MBB);
11473 F->insert(It, exitMBB);
11474 exitMBB->splice(exitMBB->begin(), BB,
11475 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11476 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11478 MachineRegisterInfo &RegInfo = F->getRegInfo();
11479 const TargetRegisterClass *RC =
11480 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11481 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11483 Register PtrReg = RegInfo.createVirtualRegister(RC);
11484 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11485 Register ShiftReg =
11486 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11487 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11488 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11489 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11490 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11491 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11492 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11493 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11494 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11497 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11501 // fallthrough --> loopMBB
11502 BB->addSuccessor(loopMBB);
11504 // The 4-byte load must be aligned, while a char or short may be
11505 // anywhere in the word. Hence all this nasty bookkeeping code.
11506 // add ptr1, ptrA, ptrB [copy if ptrA==0]
11507 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11508 // xori shift, shift1, 24 [16]
11509 // rlwinm ptr, ptr1, 0, 0, 29
11510 // slw incr2, incr, shift
11511 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11512 // slw mask, mask2, shift
11514 // lwarx tmpDest, ptr
11515 // add tmp, tmpDest, incr2
11516 // andc tmp2, tmpDest, mask
11517 // and tmp3, tmp, mask
11518 // or tmp4, tmp3, tmp2
11519 // stwcx. tmp4, ptr
11521 // fallthrough --> exitMBB
11522 // srw dest, tmpDest, shift
11523 if (ptrA != ZeroReg) {
11524 Ptr1Reg = RegInfo.createVirtualRegister(RC);
11525 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11531 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11533 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11534 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11537 .addImm(is8bit ? 28 : 27);
11538 if (!isLittleEndian)
11539 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11541 .addImm(is8bit ? 24 : 16);
11543 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11548 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11553 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11555 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11557 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11558 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11562 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11567 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11571 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11573 .addReg(TmpDestReg);
11574 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11575 .addReg(TmpDestReg)
11577 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11579 // For unsigned comparisons, we can directly compare the shifted values.
11580 // For signed comparisons we shift and sign extend.
11581 Register SReg = RegInfo.createVirtualRegister(GPRC);
11582 BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11583 .addReg(TmpDestReg)
11585 unsigned ValueReg = SReg;
11586 unsigned CmpReg = Incr2Reg;
11587 if (CmpOpcode == PPC::CMPW) {
11588 ValueReg = RegInfo.createVirtualRegister(GPRC);
11589 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11592 Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11593 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11595 ValueReg = ValueSReg;
11598 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11601 BuildMI(BB, dl, TII->get(PPC::BCC))
11605 BB->addSuccessor(loop2MBB);
11606 BB->addSuccessor(exitMBB);
11609 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11610 BuildMI(BB, dl, TII->get(PPC::STWCX))
11614 BuildMI(BB, dl, TII->get(PPC::BCC))
11615 .addImm(PPC::PRED_NE)
11618 BB->addSuccessor(loopMBB);
11619 BB->addSuccessor(exitMBB);
11624 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11625 .addReg(TmpDestReg)
11630 llvm::MachineBasicBlock *
11631 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11632 MachineBasicBlock *MBB) const {
11633 DebugLoc DL = MI.getDebugLoc();
11634 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11635 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11637 MachineFunction *MF = MBB->getParent();
11638 MachineRegisterInfo &MRI = MF->getRegInfo();
11640 const BasicBlock *BB = MBB->getBasicBlock();
11641 MachineFunction::iterator I = ++MBB->getIterator();
11643 Register DstReg = MI.getOperand(0).getReg();
11644 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11645 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11646 Register mainDstReg = MRI.createVirtualRegister(RC);
11647 Register restoreDstReg = MRI.createVirtualRegister(RC);
11649 MVT PVT = getPointerTy(MF->getDataLayout());
11650 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11651 "Invalid Pointer Size!");
11652 // For v = setjmp(buf), we generate
11655 // SjLjSetup mainMBB
11661 // buf[LabelOffset] = LR
11665 // v = phi(main, restore)
11668 MachineBasicBlock *thisMBB = MBB;
11669 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11670 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11671 MF->insert(I, mainMBB);
11672 MF->insert(I, sinkMBB);
11674 MachineInstrBuilder MIB;
11676 // Transfer the remainder of BB and its successor edges to sinkMBB.
11677 sinkMBB->splice(sinkMBB->begin(), MBB,
11678 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11679 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11681 // Note that the structure of the jmp_buf used here is not compatible
11682 // with that used by libc, and is not designed to be. Specifically, it
11683 // stores only those 'reserved' registers that LLVM does not otherwise
11684 // understand how to spill. Also, by convention, by the time this
11685 // intrinsic is called, Clang has already stored the frame address in the
11686 // first slot of the buffer and stack address in the third. Following the
11687 // X86 target code, we'll store the jump address in the second slot. We also
11688 // need to save the TOC pointer (R2) to handle jumps between shared
11689 // libraries, and that will be stored in the fourth slot. The thread
11690 // identifier (R13) is not affected.
11693 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11694 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11695 const int64_t BPOffset = 4 * PVT.getStoreSize();
11697 // Prepare IP either in reg.
11698 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11699 Register LabelReg = MRI.createVirtualRegister(PtrRC);
11700 Register BufReg = MI.getOperand(1).getReg();
11702 if (Subtarget.is64BitELFABI()) {
11703 setUsesTOCBasePtr(*MBB->getParent());
11704 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11711 // Naked functions never have a base pointer, and so we use r1. For all
11712 // other functions, this decision must be delayed until during PEI.
11714 if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11715 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11717 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11719 MIB = BuildMI(*thisMBB, MI, DL,
11720 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11727 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11728 MIB.addRegMask(TRI->getNoPreservedMask());
11730 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11732 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11734 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11736 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11737 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11742 BuildMI(mainMBB, DL,
11743 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11746 if (Subtarget.isPPC64()) {
11747 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11749 .addImm(LabelOffset)
11752 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11754 .addImm(LabelOffset)
11757 MIB.cloneMemRefs(MI);
11759 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11760 mainMBB->addSuccessor(sinkMBB);
11763 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11764 TII->get(PPC::PHI), DstReg)
11765 .addReg(mainDstReg).addMBB(mainMBB)
11766 .addReg(restoreDstReg).addMBB(thisMBB);
11768 MI.eraseFromParent();
11772 MachineBasicBlock *
11773 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11774 MachineBasicBlock *MBB) const {
11775 DebugLoc DL = MI.getDebugLoc();
11776 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11778 MachineFunction *MF = MBB->getParent();
11779 MachineRegisterInfo &MRI = MF->getRegInfo();
11781 MVT PVT = getPointerTy(MF->getDataLayout());
11782 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11783 "Invalid Pointer Size!");
11785 const TargetRegisterClass *RC =
11786 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11787 Register Tmp = MRI.createVirtualRegister(RC);
11788 // Since FP is only updated here but NOT referenced, it's treated as GPR.
11789 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11790 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11794 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11797 MachineInstrBuilder MIB;
11799 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11800 const int64_t SPOffset = 2 * PVT.getStoreSize();
11801 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11802 const int64_t BPOffset = 4 * PVT.getStoreSize();
11804 Register BufReg = MI.getOperand(0).getReg();
11806 // Reload FP (the jumped-to function may not have had a
11807 // frame pointer, and if so, then its r31 will be restored
11809 if (PVT == MVT::i64) {
11810 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11814 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11818 MIB.cloneMemRefs(MI);
11821 if (PVT == MVT::i64) {
11822 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11823 .addImm(LabelOffset)
11826 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11827 .addImm(LabelOffset)
11830 MIB.cloneMemRefs(MI);
11833 if (PVT == MVT::i64) {
11834 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11838 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11842 MIB.cloneMemRefs(MI);
11845 if (PVT == MVT::i64) {
11846 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11850 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11854 MIB.cloneMemRefs(MI);
11857 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11858 setUsesTOCBasePtr(*MBB->getParent());
11859 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11866 BuildMI(*MBB, MI, DL,
11867 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11868 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11870 MI.eraseFromParent();
11874 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11875 // If the function specifically requests inline stack probes, emit them.
11876 if (MF.getFunction().hasFnAttribute("probe-stack"))
11877 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11882 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11883 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11884 unsigned StackAlign = TFI->getStackAlignment();
11885 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11886 "Unexpected stack alignment");
11887 // The default stack probe size is 4096 if the function has no
11888 // stack-probe-size attribute.
11889 unsigned StackProbeSize = 4096;
11890 const Function &Fn = MF.getFunction();
11891 if (Fn.hasFnAttribute("stack-probe-size"))
11892 Fn.getFnAttribute("stack-probe-size")
11893 .getValueAsString()
11894 .getAsInteger(0, StackProbeSize);
11895 // Round down to the stack alignment.
11896 StackProbeSize &= ~(StackAlign - 1);
11897 return StackProbeSize ? StackProbeSize : StackAlign;
11900 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11901 // into three phases. In the first phase, it uses pseudo instruction
11902 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11903 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11904 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11905 // MaxCallFrameSize so that it can calculate correct data area pointer.
11906 MachineBasicBlock *
11907 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11908 MachineBasicBlock *MBB) const {
11909 const bool isPPC64 = Subtarget.isPPC64();
11910 MachineFunction *MF = MBB->getParent();
11911 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11912 DebugLoc DL = MI.getDebugLoc();
11913 const unsigned ProbeSize = getStackProbeSize(*MF);
11914 const BasicBlock *ProbedBB = MBB->getBasicBlock();
11915 MachineRegisterInfo &MRI = MF->getRegInfo();
11916 // The CFG of probing stack looks as
11922 // +--->+ TestMBB +---+
11925 // | +-----v----+ |
11926 // +---+ BlockMBB | |
11932 // In MBB, calculate previous frame pointer and final stack pointer.
11933 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11934 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11935 // TailMBB is spliced via \p MI.
11936 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11937 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11938 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11940 MachineFunction::iterator MBBIter = ++MBB->getIterator();
11941 MF->insert(MBBIter, TestMBB);
11942 MF->insert(MBBIter, BlockMBB);
11943 MF->insert(MBBIter, TailMBB);
11945 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11946 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11948 Register DstReg = MI.getOperand(0).getReg();
11949 Register NegSizeReg = MI.getOperand(1).getReg();
11950 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11951 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11952 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11953 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11955 // Since value of NegSizeReg might be realigned in prologepilog, insert a
11956 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11959 if (!MRI.hasOneNonDBGUse(NegSizeReg))
11961 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11963 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11964 // and NegSizeReg will be allocated in the same phyreg to avoid
11965 // redundant copy when NegSizeReg has only one use which is current MI and
11966 // will be replaced by PREPARE_PROBED_ALLOCA then.
11967 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11968 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11969 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11970 .addDef(ActualNegSizeReg)
11971 .addReg(NegSizeReg)
11972 .add(MI.getOperand(2))
11973 .add(MI.getOperand(3));
11975 // Calculate final stack pointer, which equals to SP + ActualNegSize.
11976 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11979 .addReg(ActualNegSizeReg);
11981 // Materialize a scratch register for update.
11982 int64_t NegProbeSize = -(int64_t)ProbeSize;
11983 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11984 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11985 if (!isInt<16>(NegProbeSize)) {
11986 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11987 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11988 .addImm(NegProbeSize >> 16);
11989 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11992 .addImm(NegProbeSize & 0xFFFF);
11994 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11995 .addImm(NegProbeSize);
11998 // Probing leading residual part.
11999 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12000 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
12001 .addReg(ActualNegSizeReg)
12002 .addReg(ScratchReg);
12003 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12004 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
12006 .addReg(ScratchReg);
12007 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12008 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
12010 .addReg(ActualNegSizeReg);
12011 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12012 .addReg(FramePointer)
12018 // Remaining part should be multiple of ProbeSize.
12019 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
12020 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
12022 .addReg(FinalStackPtr);
12023 BuildMI(TestMBB, DL, TII->get(PPC::BCC))
12024 .addImm(PPC::PRED_EQ)
12027 TestMBB->addSuccessor(BlockMBB);
12028 TestMBB->addSuccessor(TailMBB);
12032 // Touch the block.
12034 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12035 .addReg(FramePointer)
12037 .addReg(ScratchReg);
12038 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
12039 BlockMBB->addSuccessor(TestMBB);
12042 // Calculation of MaxCallFrameSize is deferred to prologepilog, use
12043 // DYNAREAOFFSET pseudo instruction to get the future result.
12044 Register MaxCallFrameSizeReg =
12045 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12046 BuildMI(TailMBB, DL,
12047 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
12048 MaxCallFrameSizeReg)
12049 .add(MI.getOperand(2))
12050 .add(MI.getOperand(3));
12051 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
12053 .addReg(MaxCallFrameSizeReg);
12055 // Splice instructions after MI to TailMBB.
12056 TailMBB->splice(TailMBB->end(), MBB,
12057 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
12058 TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
12059 MBB->addSuccessor(TestMBB);
12061 // Delete the pseudo instruction.
12062 MI.eraseFromParent();
12064 ++NumDynamicAllocaProbed;
12068 MachineBasicBlock *
12069 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
12070 MachineBasicBlock *BB) const {
12071 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
12072 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
12073 if (Subtarget.is64BitELFABI() &&
12074 MI.getOpcode() == TargetOpcode::PATCHPOINT &&
12075 !Subtarget.isUsingPCRelativeCalls()) {
12076 // Call lowering should have added an r2 operand to indicate a dependence
12077 // on the TOC base pointer value. It can't however, because there is no
12078 // way to mark the dependence as implicit there, and so the stackmap code
12079 // will confuse it with a regular operand. Instead, add the dependence
12081 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
12084 return emitPatchPoint(MI, BB);
12087 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
12088 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
12089 return emitEHSjLjSetJmp(MI, BB);
12090 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
12091 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
12092 return emitEHSjLjLongJmp(MI, BB);
12095 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12097 // To "insert" these instructions we actually have to insert their
12098 // control-flow patterns.
12099 const BasicBlock *LLVM_BB = BB->getBasicBlock();
12100 MachineFunction::iterator It = ++BB->getIterator();
12102 MachineFunction *F = BB->getParent();
12104 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12105 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
12106 MI.getOpcode() == PPC::SELECT_I8) {
12107 SmallVector<MachineOperand, 2> Cond;
12108 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12109 MI.getOpcode() == PPC::SELECT_CC_I8)
12110 Cond.push_back(MI.getOperand(4));
12112 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
12113 Cond.push_back(MI.getOperand(1));
12115 DebugLoc dl = MI.getDebugLoc();
12116 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
12117 MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
12118 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
12119 MI.getOpcode() == PPC::SELECT_CC_F8 ||
12120 MI.getOpcode() == PPC::SELECT_CC_F16 ||
12121 MI.getOpcode() == PPC::SELECT_CC_QFRC ||
12122 MI.getOpcode() == PPC::SELECT_CC_QSRC ||
12123 MI.getOpcode() == PPC::SELECT_CC_QBRC ||
12124 MI.getOpcode() == PPC::SELECT_CC_VRRC ||
12125 MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
12126 MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
12127 MI.getOpcode() == PPC::SELECT_CC_VSRC ||
12128 MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
12129 MI.getOpcode() == PPC::SELECT_CC_SPE ||
12130 MI.getOpcode() == PPC::SELECT_F4 ||
12131 MI.getOpcode() == PPC::SELECT_F8 ||
12132 MI.getOpcode() == PPC::SELECT_F16 ||
12133 MI.getOpcode() == PPC::SELECT_QFRC ||
12134 MI.getOpcode() == PPC::SELECT_QSRC ||
12135 MI.getOpcode() == PPC::SELECT_QBRC ||
12136 MI.getOpcode() == PPC::SELECT_SPE ||
12137 MI.getOpcode() == PPC::SELECT_SPE4 ||
12138 MI.getOpcode() == PPC::SELECT_VRRC ||
12139 MI.getOpcode() == PPC::SELECT_VSFRC ||
12140 MI.getOpcode() == PPC::SELECT_VSSRC ||
12141 MI.getOpcode() == PPC::SELECT_VSRC) {
12142 // The incoming instruction knows the destination vreg to set, the
12143 // condition code register to branch on, the true/false values to
12144 // select between, and a branch opcode to use.
12149 // cmpTY ccX, r1, r2
12151 // fallthrough --> copy0MBB
12152 MachineBasicBlock *thisMBB = BB;
12153 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12154 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12155 DebugLoc dl = MI.getDebugLoc();
12156 F->insert(It, copy0MBB);
12157 F->insert(It, sinkMBB);
12159 // Transfer the remainder of BB and its successor edges to sinkMBB.
12160 sinkMBB->splice(sinkMBB->begin(), BB,
12161 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12162 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12164 // Next, add the true and fallthrough blocks as its successors.
12165 BB->addSuccessor(copy0MBB);
12166 BB->addSuccessor(sinkMBB);
12168 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
12169 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
12170 MI.getOpcode() == PPC::SELECT_F16 ||
12171 MI.getOpcode() == PPC::SELECT_SPE4 ||
12172 MI.getOpcode() == PPC::SELECT_SPE ||
12173 MI.getOpcode() == PPC::SELECT_QFRC ||
12174 MI.getOpcode() == PPC::SELECT_QSRC ||
12175 MI.getOpcode() == PPC::SELECT_QBRC ||
12176 MI.getOpcode() == PPC::SELECT_VRRC ||
12177 MI.getOpcode() == PPC::SELECT_VSFRC ||
12178 MI.getOpcode() == PPC::SELECT_VSSRC ||
12179 MI.getOpcode() == PPC::SELECT_VSRC) {
12180 BuildMI(BB, dl, TII->get(PPC::BC))
12181 .addReg(MI.getOperand(1).getReg())
12184 unsigned SelectPred = MI.getOperand(4).getImm();
12185 BuildMI(BB, dl, TII->get(PPC::BCC))
12186 .addImm(SelectPred)
12187 .addReg(MI.getOperand(1).getReg())
12192 // %FalseValue = ...
12193 // # fallthrough to sinkMBB
12196 // Update machine-CFG edges
12197 BB->addSuccessor(sinkMBB);
12200 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12203 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12204 .addReg(MI.getOperand(3).getReg())
12206 .addReg(MI.getOperand(2).getReg())
12208 } else if (MI.getOpcode() == PPC::ReadTB) {
12209 // To read the 64-bit time-base register on a 32-bit target, we read the
12210 // two halves. Should the counter have wrapped while it was being read, we
12211 // need to try again.
12214 // mfspr Rx,TBU # load from TBU
12215 // mfspr Ry,TB # load from TB
12216 // mfspr Rz,TBU # load from TBU
12217 // cmpw crX,Rx,Rz # check if 'old'='new'
12218 // bne readLoop # branch if they're not equal
12221 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12222 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12223 DebugLoc dl = MI.getDebugLoc();
12224 F->insert(It, readMBB);
12225 F->insert(It, sinkMBB);
12227 // Transfer the remainder of BB and its successor edges to sinkMBB.
12228 sinkMBB->splice(sinkMBB->begin(), BB,
12229 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12230 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12232 BB->addSuccessor(readMBB);
12235 MachineRegisterInfo &RegInfo = F->getRegInfo();
12236 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12237 Register LoReg = MI.getOperand(0).getReg();
12238 Register HiReg = MI.getOperand(1).getReg();
12240 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12241 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12242 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12244 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12246 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12248 .addReg(ReadAgainReg);
12249 BuildMI(BB, dl, TII->get(PPC::BCC))
12250 .addImm(PPC::PRED_NE)
12254 BB->addSuccessor(readMBB);
12255 BB->addSuccessor(sinkMBB);
12256 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12257 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12258 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12259 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12260 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12261 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12262 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12263 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12265 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12266 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12267 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12268 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12269 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12270 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12271 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12272 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12274 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12275 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12276 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12277 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12278 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12279 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12280 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12281 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12283 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12284 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12285 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12286 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12287 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12288 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12289 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12290 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12292 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12293 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12294 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12295 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12296 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12297 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12298 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12299 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12301 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12302 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12303 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12304 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12305 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12306 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12307 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12308 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12310 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12311 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12312 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12313 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12314 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12315 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12316 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12317 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12319 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12320 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12321 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12322 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12323 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12324 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12325 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12326 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12328 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12329 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12330 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12331 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12332 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12333 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12334 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12335 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12337 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12338 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12339 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12340 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12341 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12342 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12343 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12344 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12346 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12347 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12348 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12349 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12350 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12351 BB = EmitAtomicBinary(MI, BB, 4, 0);
12352 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12353 BB = EmitAtomicBinary(MI, BB, 8, 0);
12354 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12355 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12356 (Subtarget.hasPartwordAtomics() &&
12357 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12358 (Subtarget.hasPartwordAtomics() &&
12359 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12360 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12362 auto LoadMnemonic = PPC::LDARX;
12363 auto StoreMnemonic = PPC::STDCX;
12364 switch (MI.getOpcode()) {
12366 llvm_unreachable("Compare and swap of unknown size");
12367 case PPC::ATOMIC_CMP_SWAP_I8:
12368 LoadMnemonic = PPC::LBARX;
12369 StoreMnemonic = PPC::STBCX;
12370 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12372 case PPC::ATOMIC_CMP_SWAP_I16:
12373 LoadMnemonic = PPC::LHARX;
12374 StoreMnemonic = PPC::STHCX;
12375 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12377 case PPC::ATOMIC_CMP_SWAP_I32:
12378 LoadMnemonic = PPC::LWARX;
12379 StoreMnemonic = PPC::STWCX;
12381 case PPC::ATOMIC_CMP_SWAP_I64:
12382 LoadMnemonic = PPC::LDARX;
12383 StoreMnemonic = PPC::STDCX;
12386 Register dest = MI.getOperand(0).getReg();
12387 Register ptrA = MI.getOperand(1).getReg();
12388 Register ptrB = MI.getOperand(2).getReg();
12389 Register oldval = MI.getOperand(3).getReg();
12390 Register newval = MI.getOperand(4).getReg();
12391 DebugLoc dl = MI.getDebugLoc();
12393 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12394 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12395 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12396 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12397 F->insert(It, loop1MBB);
12398 F->insert(It, loop2MBB);
12399 F->insert(It, midMBB);
12400 F->insert(It, exitMBB);
12401 exitMBB->splice(exitMBB->begin(), BB,
12402 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12403 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12407 // fallthrough --> loopMBB
12408 BB->addSuccessor(loop1MBB);
12411 // l[bhwd]arx dest, ptr
12412 // cmp[wd] dest, oldval
12415 // st[bhwd]cx. newval, ptr
12419 // st[bhwd]cx. dest, ptr
12422 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12423 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12426 BuildMI(BB, dl, TII->get(PPC::BCC))
12427 .addImm(PPC::PRED_NE)
12430 BB->addSuccessor(loop2MBB);
12431 BB->addSuccessor(midMBB);
12434 BuildMI(BB, dl, TII->get(StoreMnemonic))
12438 BuildMI(BB, dl, TII->get(PPC::BCC))
12439 .addImm(PPC::PRED_NE)
12442 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12443 BB->addSuccessor(loop1MBB);
12444 BB->addSuccessor(exitMBB);
12447 BuildMI(BB, dl, TII->get(StoreMnemonic))
12451 BB->addSuccessor(exitMBB);
12456 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12457 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12458 // We must use 64-bit registers for addresses when targeting 64-bit,
12459 // since we're actually doing arithmetic on them. Other registers
12461 bool is64bit = Subtarget.isPPC64();
12462 bool isLittleEndian = Subtarget.isLittleEndian();
12463 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12465 Register dest = MI.getOperand(0).getReg();
12466 Register ptrA = MI.getOperand(1).getReg();
12467 Register ptrB = MI.getOperand(2).getReg();
12468 Register oldval = MI.getOperand(3).getReg();
12469 Register newval = MI.getOperand(4).getReg();
12470 DebugLoc dl = MI.getDebugLoc();
12472 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12473 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12474 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12475 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12476 F->insert(It, loop1MBB);
12477 F->insert(It, loop2MBB);
12478 F->insert(It, midMBB);
12479 F->insert(It, exitMBB);
12480 exitMBB->splice(exitMBB->begin(), BB,
12481 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12482 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12484 MachineRegisterInfo &RegInfo = F->getRegInfo();
12485 const TargetRegisterClass *RC =
12486 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12487 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12489 Register PtrReg = RegInfo.createVirtualRegister(RC);
12490 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12491 Register ShiftReg =
12492 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12493 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12494 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12495 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12496 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12497 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12498 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12499 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12500 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12501 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12502 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12504 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12505 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12508 // fallthrough --> loopMBB
12509 BB->addSuccessor(loop1MBB);
12511 // The 4-byte load must be aligned, while a char or short may be
12512 // anywhere in the word. Hence all this nasty bookkeeping code.
12513 // add ptr1, ptrA, ptrB [copy if ptrA==0]
12514 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12515 // xori shift, shift1, 24 [16]
12516 // rlwinm ptr, ptr1, 0, 0, 29
12517 // slw newval2, newval, shift
12518 // slw oldval2, oldval,shift
12519 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12520 // slw mask, mask2, shift
12521 // and newval3, newval2, mask
12522 // and oldval3, oldval2, mask
12524 // lwarx tmpDest, ptr
12525 // and tmp, tmpDest, mask
12526 // cmpw tmp, oldval3
12529 // andc tmp2, tmpDest, mask
12530 // or tmp4, tmp2, newval3
12531 // stwcx. tmp4, ptr
12535 // stwcx. tmpDest, ptr
12537 // srw dest, tmpDest, shift
12538 if (ptrA != ZeroReg) {
12539 Ptr1Reg = RegInfo.createVirtualRegister(RC);
12540 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12547 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12549 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12550 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12553 .addImm(is8bit ? 28 : 27);
12554 if (!isLittleEndian)
12555 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12557 .addImm(is8bit ? 24 : 16);
12559 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12564 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12569 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12572 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12576 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12578 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12579 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12583 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12586 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12587 .addReg(NewVal2Reg)
12589 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12590 .addReg(OldVal2Reg)
12594 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12597 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12598 .addReg(TmpDestReg)
12600 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12602 .addReg(OldVal3Reg);
12603 BuildMI(BB, dl, TII->get(PPC::BCC))
12604 .addImm(PPC::PRED_NE)
12607 BB->addSuccessor(loop2MBB);
12608 BB->addSuccessor(midMBB);
12611 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12612 .addReg(TmpDestReg)
12614 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12616 .addReg(NewVal3Reg);
12617 BuildMI(BB, dl, TII->get(PPC::STWCX))
12621 BuildMI(BB, dl, TII->get(PPC::BCC))
12622 .addImm(PPC::PRED_NE)
12625 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12626 BB->addSuccessor(loop1MBB);
12627 BB->addSuccessor(exitMBB);
12630 BuildMI(BB, dl, TII->get(PPC::STWCX))
12631 .addReg(TmpDestReg)
12634 BB->addSuccessor(exitMBB);
12639 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12642 } else if (MI.getOpcode() == PPC::FADDrtz) {
12643 // This pseudo performs an FADD with rounding mode temporarily forced
12644 // to round-to-zero. We emit this via custom inserter since the FPSCR
12645 // is not modeled at the SelectionDAG level.
12646 Register Dest = MI.getOperand(0).getReg();
12647 Register Src1 = MI.getOperand(1).getReg();
12648 Register Src2 = MI.getOperand(2).getReg();
12649 DebugLoc dl = MI.getDebugLoc();
12651 MachineRegisterInfo &RegInfo = F->getRegInfo();
12652 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12654 // Save FPSCR value.
12655 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12657 // Set rounding mode to round-to-zero.
12658 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
12659 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
12661 // Perform addition.
12662 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
12664 // Restore FPSCR value.
12665 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12666 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12667 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12668 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12669 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12670 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12671 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12674 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12675 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12677 MachineRegisterInfo &RegInfo = F->getRegInfo();
12678 Register Dest = RegInfo.createVirtualRegister(
12679 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12681 DebugLoc Dl = MI.getDebugLoc();
12682 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12683 .addReg(MI.getOperand(1).getReg())
12685 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12686 MI.getOperand(0).getReg())
12687 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12688 } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12689 DebugLoc Dl = MI.getDebugLoc();
12690 MachineRegisterInfo &RegInfo = F->getRegInfo();
12691 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12692 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12693 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12694 MI.getOperand(0).getReg())
12696 } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12697 DebugLoc Dl = MI.getDebugLoc();
12698 unsigned Imm = MI.getOperand(1).getImm();
12699 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12700 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12701 MI.getOperand(0).getReg())
12702 .addReg(PPC::CR0EQ);
12703 } else if (MI.getOpcode() == PPC::SETRNDi) {
12704 DebugLoc dl = MI.getDebugLoc();
12705 Register OldFPSCRReg = MI.getOperand(0).getReg();
12707 // Save FPSCR value.
12708 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12710 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12711 // the following settings:
12712 // 00 Round to nearest
12714 // 10 Round to +inf
12715 // 11 Round to -inf
12717 // When the operand is immediate, using the two least significant bits of
12718 // the immediate to set the bits 62:63 of FPSCR.
12719 unsigned Mode = MI.getOperand(1).getImm();
12720 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12723 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12725 } else if (MI.getOpcode() == PPC::SETRND) {
12726 DebugLoc dl = MI.getDebugLoc();
12728 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12729 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12730 // If the target doesn't have DirectMove, we should use stack to do the
12731 // conversion, because the target doesn't have the instructions like mtvsrd
12732 // or mfvsrd to do this conversion directly.
12733 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12734 if (Subtarget.hasDirectMove()) {
12735 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12738 // Use stack to do the register copy.
12739 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12740 MachineRegisterInfo &RegInfo = F->getRegInfo();
12741 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12742 if (RC == &PPC::F8RCRegClass) {
12743 // Copy register from F8RCRegClass to G8RCRegclass.
12744 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12745 "Unsupported RegClass.");
12747 StoreOp = PPC::STFD;
12750 // Copy register from G8RCRegClass to F8RCRegclass.
12751 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12752 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12753 "Unsupported RegClass.");
12756 MachineFrameInfo &MFI = F->getFrameInfo();
12757 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12759 MachineMemOperand *MMOStore = F->getMachineMemOperand(
12760 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12761 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12762 MFI.getObjectAlign(FrameIdx));
12764 // Store the SrcReg into the stack.
12765 BuildMI(*BB, MI, dl, TII->get(StoreOp))
12768 .addFrameIndex(FrameIdx)
12769 .addMemOperand(MMOStore);
12771 MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12772 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12773 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12774 MFI.getObjectAlign(FrameIdx));
12776 // Load from the stack where SrcReg is stored, and save to DestReg,
12777 // so we have done the RegClass conversion from RegClass::SrcReg to
12778 // RegClass::DestReg.
12779 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12781 .addFrameIndex(FrameIdx)
12782 .addMemOperand(MMOLoad);
12786 Register OldFPSCRReg = MI.getOperand(0).getReg();
12788 // Save FPSCR value.
12789 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12791 // When the operand is gprc register, use two least significant bits of the
12792 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12794 // copy OldFPSCRTmpReg, OldFPSCRReg
12795 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12796 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12797 // copy NewFPSCRReg, NewFPSCRTmpReg
12798 // mtfsf 255, NewFPSCRReg
12799 MachineOperand SrcOp = MI.getOperand(1);
12800 MachineRegisterInfo &RegInfo = F->getRegInfo();
12801 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12803 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12805 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12806 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12808 // The first operand of INSERT_SUBREG should be a register which has
12809 // subregisters, we only care about its RegClass, so we should use an
12810 // IMPLICIT_DEF register.
12811 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12812 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12817 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12818 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12819 .addReg(OldFPSCRTmpReg)
12824 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12825 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12827 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12829 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12831 .addReg(NewFPSCRReg)
12834 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12835 MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12836 return emitProbedAlloca(MI, BB);
12838 llvm_unreachable("Unexpected instr type to insert");
12841 MI.eraseFromParent(); // The pseudo instruction is gone now.
12845 //===----------------------------------------------------------------------===//
12846 // Target Optimization Hooks
12847 //===----------------------------------------------------------------------===//
12849 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12850 // For the estimates, convergence is quadratic, so we essentially double the
12851 // number of digits correct after every iteration. For both FRE and FRSQRTE,
12852 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12853 // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12854 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12855 if (VT.getScalarType() == MVT::f64)
12857 return RefinementSteps;
12860 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12861 int Enabled, int &RefinementSteps,
12862 bool &UseOneConstNR,
12863 bool Reciprocal) const {
12864 EVT VT = Operand.getValueType();
12865 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12866 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12867 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12868 (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12869 (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12870 (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12871 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12872 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12874 // The Newton-Raphson computation with a single constant does not provide
12875 // enough accuracy on some CPUs.
12876 UseOneConstNR = !Subtarget.needsTwoConstNR();
12877 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12882 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12884 int &RefinementSteps) const {
12885 EVT VT = Operand.getValueType();
12886 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12887 (VT == MVT::f64 && Subtarget.hasFRE()) ||
12888 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12889 (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12890 (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12891 (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12892 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12893 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12894 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12899 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12900 // Note: This functionality is used only when unsafe-fp-math is enabled, and
12901 // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12902 // enabled for division), this functionality is redundant with the default
12903 // combiner logic (once the division -> reciprocal/multiply transformation
12904 // has taken place). As a result, this matters more for older cores than for
12907 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12908 // reciprocal if there are two or more FDIVs (for embedded cores with only
12909 // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12910 switch (Subtarget.getCPUDirective()) {
12915 case PPC::DIR_E500:
12916 case PPC::DIR_E500mc:
12917 case PPC::DIR_E5500:
12922 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12923 // collapsed, and so we need to look through chains of them.
12924 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12925 int64_t& Offset, SelectionDAG &DAG) {
12926 if (DAG.isBaseWithConstantOffset(Loc)) {
12927 Base = Loc.getOperand(0);
12928 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12930 // The base might itself be a base plus an offset, and if so, accumulate
12932 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12936 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12937 unsigned Bytes, int Dist,
12938 SelectionDAG &DAG) {
12939 if (VT.getSizeInBits() / 8 != Bytes)
12942 SDValue BaseLoc = Base->getBasePtr();
12943 if (Loc.getOpcode() == ISD::FrameIndex) {
12944 if (BaseLoc.getOpcode() != ISD::FrameIndex)
12946 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12947 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
12948 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12949 int FS = MFI.getObjectSize(FI);
12950 int BFS = MFI.getObjectSize(BFI);
12951 if (FS != BFS || FS != (int)Bytes) return false;
12952 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12955 SDValue Base1 = Loc, Base2 = BaseLoc;
12956 int64_t Offset1 = 0, Offset2 = 0;
12957 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12958 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12959 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12962 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12963 const GlobalValue *GV1 = nullptr;
12964 const GlobalValue *GV2 = nullptr;
12967 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12968 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12969 if (isGA1 && isGA2 && GV1 == GV2)
12970 return Offset1 == (Offset2 + Dist*Bytes);
12974 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12975 // not enforce equality of the chain operands.
12976 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12977 unsigned Bytes, int Dist,
12978 SelectionDAG &DAG) {
12979 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12980 EVT VT = LS->getMemoryVT();
12981 SDValue Loc = LS->getBasePtr();
12982 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12985 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12987 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12988 default: return false;
12989 case Intrinsic::ppc_qpx_qvlfd:
12990 case Intrinsic::ppc_qpx_qvlfda:
12993 case Intrinsic::ppc_qpx_qvlfs:
12994 case Intrinsic::ppc_qpx_qvlfsa:
12997 case Intrinsic::ppc_qpx_qvlfcd:
12998 case Intrinsic::ppc_qpx_qvlfcda:
13001 case Intrinsic::ppc_qpx_qvlfcs:
13002 case Intrinsic::ppc_qpx_qvlfcsa:
13005 case Intrinsic::ppc_qpx_qvlfiwa:
13006 case Intrinsic::ppc_qpx_qvlfiwz:
13007 case Intrinsic::ppc_altivec_lvx:
13008 case Intrinsic::ppc_altivec_lvxl:
13009 case Intrinsic::ppc_vsx_lxvw4x:
13010 case Intrinsic::ppc_vsx_lxvw4x_be:
13013 case Intrinsic::ppc_vsx_lxvd2x:
13014 case Intrinsic::ppc_vsx_lxvd2x_be:
13017 case Intrinsic::ppc_altivec_lvebx:
13020 case Intrinsic::ppc_altivec_lvehx:
13023 case Intrinsic::ppc_altivec_lvewx:
13028 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
13031 if (N->getOpcode() == ISD::INTRINSIC_VOID) {
13033 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13034 default: return false;
13035 case Intrinsic::ppc_qpx_qvstfd:
13036 case Intrinsic::ppc_qpx_qvstfda:
13039 case Intrinsic::ppc_qpx_qvstfs:
13040 case Intrinsic::ppc_qpx_qvstfsa:
13043 case Intrinsic::ppc_qpx_qvstfcd:
13044 case Intrinsic::ppc_qpx_qvstfcda:
13047 case Intrinsic::ppc_qpx_qvstfcs:
13048 case Intrinsic::ppc_qpx_qvstfcsa:
13051 case Intrinsic::ppc_qpx_qvstfiw:
13052 case Intrinsic::ppc_qpx_qvstfiwa:
13053 case Intrinsic::ppc_altivec_stvx:
13054 case Intrinsic::ppc_altivec_stvxl:
13055 case Intrinsic::ppc_vsx_stxvw4x:
13058 case Intrinsic::ppc_vsx_stxvd2x:
13061 case Intrinsic::ppc_vsx_stxvw4x_be:
13064 case Intrinsic::ppc_vsx_stxvd2x_be:
13067 case Intrinsic::ppc_altivec_stvebx:
13070 case Intrinsic::ppc_altivec_stvehx:
13073 case Intrinsic::ppc_altivec_stvewx:
13078 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
13084 // Return true is there is a nearyby consecutive load to the one provided
13085 // (regardless of alignment). We search up and down the chain, looking though
13086 // token factors and other loads (but nothing else). As a result, a true result
13087 // indicates that it is safe to create a new consecutive load adjacent to the
13089 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
13090 SDValue Chain = LD->getChain();
13091 EVT VT = LD->getMemoryVT();
13093 SmallSet<SDNode *, 16> LoadRoots;
13094 SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13095 SmallSet<SDNode *, 16> Visited;
13097 // First, search up the chain, branching to follow all token-factor operands.
13098 // If we find a consecutive load, then we're done, otherwise, record all
13099 // nodes just above the top-level loads and token factors.
13100 while (!Queue.empty()) {
13101 SDNode *ChainNext = Queue.pop_back_val();
13102 if (!Visited.insert(ChainNext).second)
13105 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13106 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13109 if (!Visited.count(ChainLD->getChain().getNode()))
13110 Queue.push_back(ChainLD->getChain().getNode());
13111 } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13112 for (const SDUse &O : ChainNext->ops())
13113 if (!Visited.count(O.getNode()))
13114 Queue.push_back(O.getNode());
13116 LoadRoots.insert(ChainNext);
13119 // Second, search down the chain, starting from the top-level nodes recorded
13120 // in the first phase. These top-level nodes are the nodes just above all
13121 // loads and token factors. Starting with their uses, recursively look though
13122 // all loads (just the chain uses) and token factors to find a consecutive
13127 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
13128 IE = LoadRoots.end(); I != IE; ++I) {
13129 Queue.push_back(*I);
13131 while (!Queue.empty()) {
13132 SDNode *LoadRoot = Queue.pop_back_val();
13133 if (!Visited.insert(LoadRoot).second)
13136 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13137 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13140 for (SDNode::use_iterator UI = LoadRoot->use_begin(),
13141 UE = LoadRoot->use_end(); UI != UE; ++UI)
13142 if (((isa<MemSDNode>(*UI) &&
13143 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
13144 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
13145 Queue.push_back(*UI);
13152 /// This function is called when we have proved that a SETCC node can be replaced
13153 /// by subtraction (and other supporting instructions) so that the result of
13154 /// comparison is kept in a GPR instead of CR. This function is purely for
13155 /// codegen purposes and has some flags to guide the codegen process.
13156 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13157 bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13158 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13160 // Zero extend the operands to the largest legal integer. Originally, they
13161 // must be of a strictly smaller size.
13162 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13163 DAG.getConstant(Size, DL, MVT::i32));
13164 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13165 DAG.getConstant(Size, DL, MVT::i32));
13167 // Swap if needed. Depends on the condition code.
13169 std::swap(Op0, Op1);
13171 // Subtract extended integers.
13172 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13174 // Move the sign bit to the least significant position and zero out the rest.
13175 // Now the least significant bit carries the result of original comparison.
13176 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13177 DAG.getConstant(Size - 1, DL, MVT::i32));
13178 auto Final = Shifted;
13180 // Complement the result if needed. Based on the condition code.
13182 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13183 DAG.getConstant(1, DL, MVT::i64));
13185 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13188 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13189 DAGCombinerInfo &DCI) const {
13190 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13192 SelectionDAG &DAG = DCI.DAG;
13195 // Size of integers being compared has a critical role in the following
13196 // analysis, so we prefer to do this when all types are legal.
13197 if (!DCI.isAfterLegalizeDAG())
13200 // If all users of SETCC extend its value to a legal integer type
13201 // then we replace SETCC with a subtraction
13202 for (SDNode::use_iterator UI = N->use_begin(),
13203 UE = N->use_end(); UI != UE; ++UI) {
13204 if (UI->getOpcode() != ISD::ZERO_EXTEND)
13208 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13209 auto OpSize = N->getOperand(0).getValueSizeInBits();
13211 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
13213 if (OpSize < Size) {
13217 return generateEquivalentSub(N, Size, false, false, DL, DAG);
13219 return generateEquivalentSub(N, Size, true, true, DL, DAG);
13221 return generateEquivalentSub(N, Size, false, true, DL, DAG);
13223 return generateEquivalentSub(N, Size, true, false, DL, DAG);
13230 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13231 DAGCombinerInfo &DCI) const {
13232 SelectionDAG &DAG = DCI.DAG;
13235 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
13236 // If we're tracking CR bits, we need to be careful that we don't have:
13237 // trunc(binary-ops(zext(x), zext(y)))
13239 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13240 // such that we're unnecessarily moving things into GPRs when it would be
13241 // better to keep them in CR bits.
13243 // Note that trunc here can be an actual i1 trunc, or can be the effective
13244 // truncation that comes from a setcc or select_cc.
13245 if (N->getOpcode() == ISD::TRUNCATE &&
13246 N->getValueType(0) != MVT::i1)
13249 if (N->getOperand(0).getValueType() != MVT::i32 &&
13250 N->getOperand(0).getValueType() != MVT::i64)
13253 if (N->getOpcode() == ISD::SETCC ||
13254 N->getOpcode() == ISD::SELECT_CC) {
13255 // If we're looking at a comparison, then we need to make sure that the
13256 // high bits (all except for the first) don't matter the result.
13258 cast<CondCodeSDNode>(N->getOperand(
13259 N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13260 unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13262 if (ISD::isSignedIntSetCC(CC)) {
13263 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13264 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13266 } else if (ISD::isUnsignedIntSetCC(CC)) {
13267 if (!DAG.MaskedValueIsZero(N->getOperand(0),
13268 APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13269 !DAG.MaskedValueIsZero(N->getOperand(1),
13270 APInt::getHighBitsSet(OpBits, OpBits-1)))
13271 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13274 // This is neither a signed nor an unsigned comparison, just make sure
13275 // that the high bits are equal.
13276 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13277 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13279 // We don't really care about what is known about the first bit (if
13280 // anything), so clear it in all masks prior to comparing them.
13281 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
13282 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
13284 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
13289 // We now know that the higher-order bits are irrelevant, we just need to
13290 // make sure that all of the intermediate operations are bit operations, and
13291 // all inputs are extensions.
13292 if (N->getOperand(0).getOpcode() != ISD::AND &&
13293 N->getOperand(0).getOpcode() != ISD::OR &&
13294 N->getOperand(0).getOpcode() != ISD::XOR &&
13295 N->getOperand(0).getOpcode() != ISD::SELECT &&
13296 N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13297 N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13298 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13299 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13300 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13303 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13304 N->getOperand(1).getOpcode() != ISD::AND &&
13305 N->getOperand(1).getOpcode() != ISD::OR &&
13306 N->getOperand(1).getOpcode() != ISD::XOR &&
13307 N->getOperand(1).getOpcode() != ISD::SELECT &&
13308 N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13309 N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13310 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13311 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13312 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13315 SmallVector<SDValue, 4> Inputs;
13316 SmallVector<SDValue, 8> BinOps, PromOps;
13317 SmallPtrSet<SDNode *, 16> Visited;
13319 for (unsigned i = 0; i < 2; ++i) {
13320 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13321 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13322 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13323 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13324 isa<ConstantSDNode>(N->getOperand(i)))
13325 Inputs.push_back(N->getOperand(i));
13327 BinOps.push_back(N->getOperand(i));
13329 if (N->getOpcode() == ISD::TRUNCATE)
13333 // Visit all inputs, collect all binary operations (and, or, xor and
13334 // select) that are all fed by extensions.
13335 while (!BinOps.empty()) {
13336 SDValue BinOp = BinOps.back();
13339 if (!Visited.insert(BinOp.getNode()).second)
13342 PromOps.push_back(BinOp);
13344 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13345 // The condition of the select is not promoted.
13346 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13348 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13351 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13352 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13353 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13354 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13355 isa<ConstantSDNode>(BinOp.getOperand(i))) {
13356 Inputs.push_back(BinOp.getOperand(i));
13357 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13358 BinOp.getOperand(i).getOpcode() == ISD::OR ||
13359 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13360 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13361 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13362 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13363 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13364 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13365 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13366 BinOps.push_back(BinOp.getOperand(i));
13368 // We have an input that is not an extension or another binary
13369 // operation; we'll abort this transformation.
13375 // Make sure that this is a self-contained cluster of operations (which
13376 // is not quite the same thing as saying that everything has only one
13378 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13379 if (isa<ConstantSDNode>(Inputs[i]))
13382 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13383 UE = Inputs[i].getNode()->use_end();
13385 SDNode *User = *UI;
13386 if (User != N && !Visited.count(User))
13389 // Make sure that we're not going to promote the non-output-value
13390 // operand(s) or SELECT or SELECT_CC.
13391 // FIXME: Although we could sometimes handle this, and it does occur in
13392 // practice that one of the condition inputs to the select is also one of
13393 // the outputs, we currently can't deal with this.
13394 if (User->getOpcode() == ISD::SELECT) {
13395 if (User->getOperand(0) == Inputs[i])
13397 } else if (User->getOpcode() == ISD::SELECT_CC) {
13398 if (User->getOperand(0) == Inputs[i] ||
13399 User->getOperand(1) == Inputs[i])
13405 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13406 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13407 UE = PromOps[i].getNode()->use_end();
13409 SDNode *User = *UI;
13410 if (User != N && !Visited.count(User))
13413 // Make sure that we're not going to promote the non-output-value
13414 // operand(s) or SELECT or SELECT_CC.
13415 // FIXME: Although we could sometimes handle this, and it does occur in
13416 // practice that one of the condition inputs to the select is also one of
13417 // the outputs, we currently can't deal with this.
13418 if (User->getOpcode() == ISD::SELECT) {
13419 if (User->getOperand(0) == PromOps[i])
13421 } else if (User->getOpcode() == ISD::SELECT_CC) {
13422 if (User->getOperand(0) == PromOps[i] ||
13423 User->getOperand(1) == PromOps[i])
13429 // Replace all inputs with the extension operand.
13430 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13431 // Constants may have users outside the cluster of to-be-promoted nodes,
13432 // and so we need to replace those as we do the promotions.
13433 if (isa<ConstantSDNode>(Inputs[i]))
13436 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13439 std::list<HandleSDNode> PromOpHandles;
13440 for (auto &PromOp : PromOps)
13441 PromOpHandles.emplace_back(PromOp);
13443 // Replace all operations (these are all the same, but have a different
13444 // (i1) return type). DAG.getNode will validate that the types of
13445 // a binary operator match, so go through the list in reverse so that
13446 // we've likely promoted both operands first. Any intermediate truncations or
13447 // extensions disappear.
13448 while (!PromOpHandles.empty()) {
13449 SDValue PromOp = PromOpHandles.back().getValue();
13450 PromOpHandles.pop_back();
13452 if (PromOp.getOpcode() == ISD::TRUNCATE ||
13453 PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13454 PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13455 PromOp.getOpcode() == ISD::ANY_EXTEND) {
13456 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13457 PromOp.getOperand(0).getValueType() != MVT::i1) {
13458 // The operand is not yet ready (see comment below).
13459 PromOpHandles.emplace_front(PromOp);
13463 SDValue RepValue = PromOp.getOperand(0);
13464 if (isa<ConstantSDNode>(RepValue))
13465 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13467 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13472 switch (PromOp.getOpcode()) {
13473 default: C = 0; break;
13474 case ISD::SELECT: C = 1; break;
13475 case ISD::SELECT_CC: C = 2; break;
13478 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13479 PromOp.getOperand(C).getValueType() != MVT::i1) ||
13480 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13481 PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13482 // The to-be-promoted operands of this node have not yet been
13483 // promoted (this should be rare because we're going through the
13484 // list backward, but if one of the operands has several users in
13485 // this cluster of to-be-promoted nodes, it is possible).
13486 PromOpHandles.emplace_front(PromOp);
13490 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13491 PromOp.getNode()->op_end());
13493 // If there are any constant inputs, make sure they're replaced now.
13494 for (unsigned i = 0; i < 2; ++i)
13495 if (isa<ConstantSDNode>(Ops[C+i]))
13496 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13498 DAG.ReplaceAllUsesOfValueWith(PromOp,
13499 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13502 // Now we're left with the initial truncation itself.
13503 if (N->getOpcode() == ISD::TRUNCATE)
13504 return N->getOperand(0);
13506 // Otherwise, this is a comparison. The operands to be compared have just
13507 // changed type (to i1), but everything else is the same.
13508 return SDValue(N, 0);
13511 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13512 DAGCombinerInfo &DCI) const {
13513 SelectionDAG &DAG = DCI.DAG;
13516 // If we're tracking CR bits, we need to be careful that we don't have:
13517 // zext(binary-ops(trunc(x), trunc(y)))
13519 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13520 // such that we're unnecessarily moving things into CR bits that can more
13521 // efficiently stay in GPRs. Note that if we're not certain that the high
13522 // bits are set as required by the final extension, we still may need to do
13523 // some masking to get the proper behavior.
13525 // This same functionality is important on PPC64 when dealing with
13526 // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13527 // the return values of functions. Because it is so similar, it is handled
13530 if (N->getValueType(0) != MVT::i32 &&
13531 N->getValueType(0) != MVT::i64)
13534 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13535 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13538 if (N->getOperand(0).getOpcode() != ISD::AND &&
13539 N->getOperand(0).getOpcode() != ISD::OR &&
13540 N->getOperand(0).getOpcode() != ISD::XOR &&
13541 N->getOperand(0).getOpcode() != ISD::SELECT &&
13542 N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13545 SmallVector<SDValue, 4> Inputs;
13546 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13547 SmallPtrSet<SDNode *, 16> Visited;
13549 // Visit all inputs, collect all binary operations (and, or, xor and
13550 // select) that are all fed by truncations.
13551 while (!BinOps.empty()) {
13552 SDValue BinOp = BinOps.back();
13555 if (!Visited.insert(BinOp.getNode()).second)
13558 PromOps.push_back(BinOp);
13560 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13561 // The condition of the select is not promoted.
13562 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13564 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13567 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13568 isa<ConstantSDNode>(BinOp.getOperand(i))) {
13569 Inputs.push_back(BinOp.getOperand(i));
13570 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13571 BinOp.getOperand(i).getOpcode() == ISD::OR ||
13572 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13573 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13574 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13575 BinOps.push_back(BinOp.getOperand(i));
13577 // We have an input that is not a truncation or another binary
13578 // operation; we'll abort this transformation.
13584 // The operands of a select that must be truncated when the select is
13585 // promoted because the operand is actually part of the to-be-promoted set.
13586 DenseMap<SDNode *, EVT> SelectTruncOp[2];
13588 // Make sure that this is a self-contained cluster of operations (which
13589 // is not quite the same thing as saying that everything has only one
13591 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13592 if (isa<ConstantSDNode>(Inputs[i]))
13595 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13596 UE = Inputs[i].getNode()->use_end();
13598 SDNode *User = *UI;
13599 if (User != N && !Visited.count(User))
13602 // If we're going to promote the non-output-value operand(s) or SELECT or
13603 // SELECT_CC, record them for truncation.
13604 if (User->getOpcode() == ISD::SELECT) {
13605 if (User->getOperand(0) == Inputs[i])
13606 SelectTruncOp[0].insert(std::make_pair(User,
13607 User->getOperand(0).getValueType()));
13608 } else if (User->getOpcode() == ISD::SELECT_CC) {
13609 if (User->getOperand(0) == Inputs[i])
13610 SelectTruncOp[0].insert(std::make_pair(User,
13611 User->getOperand(0).getValueType()));
13612 if (User->getOperand(1) == Inputs[i])
13613 SelectTruncOp[1].insert(std::make_pair(User,
13614 User->getOperand(1).getValueType()));
13619 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13620 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13621 UE = PromOps[i].getNode()->use_end();
13623 SDNode *User = *UI;
13624 if (User != N && !Visited.count(User))
13627 // If we're going to promote the non-output-value operand(s) or SELECT or
13628 // SELECT_CC, record them for truncation.
13629 if (User->getOpcode() == ISD::SELECT) {
13630 if (User->getOperand(0) == PromOps[i])
13631 SelectTruncOp[0].insert(std::make_pair(User,
13632 User->getOperand(0).getValueType()));
13633 } else if (User->getOpcode() == ISD::SELECT_CC) {
13634 if (User->getOperand(0) == PromOps[i])
13635 SelectTruncOp[0].insert(std::make_pair(User,
13636 User->getOperand(0).getValueType()));
13637 if (User->getOperand(1) == PromOps[i])
13638 SelectTruncOp[1].insert(std::make_pair(User,
13639 User->getOperand(1).getValueType()));
13644 unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13645 bool ReallyNeedsExt = false;
13646 if (N->getOpcode() != ISD::ANY_EXTEND) {
13647 // If all of the inputs are not already sign/zero extended, then
13648 // we'll still need to do that at the end.
13649 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13650 if (isa<ConstantSDNode>(Inputs[i]))
13654 Inputs[i].getOperand(0).getValueSizeInBits();
13655 assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13657 if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13658 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13659 APInt::getHighBitsSet(OpBits,
13660 OpBits-PromBits))) ||
13661 (N->getOpcode() == ISD::SIGN_EXTEND &&
13662 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13663 (OpBits-(PromBits-1)))) {
13664 ReallyNeedsExt = true;
13670 // Replace all inputs, either with the truncation operand, or a
13671 // truncation or extension to the final output type.
13672 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13673 // Constant inputs need to be replaced with the to-be-promoted nodes that
13674 // use them because they might have users outside of the cluster of
13676 if (isa<ConstantSDNode>(Inputs[i]))
13679 SDValue InSrc = Inputs[i].getOperand(0);
13680 if (Inputs[i].getValueType() == N->getValueType(0))
13681 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13682 else if (N->getOpcode() == ISD::SIGN_EXTEND)
13683 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13684 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13685 else if (N->getOpcode() == ISD::ZERO_EXTEND)
13686 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13687 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13689 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13690 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13693 std::list<HandleSDNode> PromOpHandles;
13694 for (auto &PromOp : PromOps)
13695 PromOpHandles.emplace_back(PromOp);
13697 // Replace all operations (these are all the same, but have a different
13698 // (promoted) return type). DAG.getNode will validate that the types of
13699 // a binary operator match, so go through the list in reverse so that
13700 // we've likely promoted both operands first.
13701 while (!PromOpHandles.empty()) {
13702 SDValue PromOp = PromOpHandles.back().getValue();
13703 PromOpHandles.pop_back();
13706 switch (PromOp.getOpcode()) {
13707 default: C = 0; break;
13708 case ISD::SELECT: C = 1; break;
13709 case ISD::SELECT_CC: C = 2; break;
13712 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13713 PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13714 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13715 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13716 // The to-be-promoted operands of this node have not yet been
13717 // promoted (this should be rare because we're going through the
13718 // list backward, but if one of the operands has several users in
13719 // this cluster of to-be-promoted nodes, it is possible).
13720 PromOpHandles.emplace_front(PromOp);
13724 // For SELECT and SELECT_CC nodes, we do a similar check for any
13725 // to-be-promoted comparison inputs.
13726 if (PromOp.getOpcode() == ISD::SELECT ||
13727 PromOp.getOpcode() == ISD::SELECT_CC) {
13728 if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13729 PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13730 (SelectTruncOp[1].count(PromOp.getNode()) &&
13731 PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13732 PromOpHandles.emplace_front(PromOp);
13737 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13738 PromOp.getNode()->op_end());
13740 // If this node has constant inputs, then they'll need to be promoted here.
13741 for (unsigned i = 0; i < 2; ++i) {
13742 if (!isa<ConstantSDNode>(Ops[C+i]))
13744 if (Ops[C+i].getValueType() == N->getValueType(0))
13747 if (N->getOpcode() == ISD::SIGN_EXTEND)
13748 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13749 else if (N->getOpcode() == ISD::ZERO_EXTEND)
13750 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13752 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13755 // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13756 // truncate them again to the original value type.
13757 if (PromOp.getOpcode() == ISD::SELECT ||
13758 PromOp.getOpcode() == ISD::SELECT_CC) {
13759 auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13760 if (SI0 != SelectTruncOp[0].end())
13761 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13762 auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13763 if (SI1 != SelectTruncOp[1].end())
13764 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13767 DAG.ReplaceAllUsesOfValueWith(PromOp,
13768 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13771 // Now we're left with the initial extension itself.
13772 if (!ReallyNeedsExt)
13773 return N->getOperand(0);
13775 // To zero extend, just mask off everything except for the first bit (in the
13777 if (N->getOpcode() == ISD::ZERO_EXTEND)
13778 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13779 DAG.getConstant(APInt::getLowBitsSet(
13780 N->getValueSizeInBits(0), PromBits),
13781 dl, N->getValueType(0)));
13783 assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13784 "Invalid extension type");
13785 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13787 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13788 return DAG.getNode(
13789 ISD::SRA, dl, N->getValueType(0),
13790 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13794 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13795 DAGCombinerInfo &DCI) const {
13796 assert(N->getOpcode() == ISD::SETCC &&
13797 "Should be called with a SETCC node");
13799 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13800 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13801 SDValue LHS = N->getOperand(0);
13802 SDValue RHS = N->getOperand(1);
13804 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13805 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13807 std::swap(LHS, RHS);
13809 // x == 0-y --> x+y == 0
13810 // x != 0-y --> x+y != 0
13811 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13814 SelectionDAG &DAG = DCI.DAG;
13815 EVT VT = N->getValueType(0);
13816 EVT OpVT = LHS.getValueType();
13817 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13818 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13822 return DAGCombineTruncBoolExt(N, DCI);
13825 // Is this an extending load from an f32 to an f64?
13826 static bool isFPExtLoad(SDValue Op) {
13827 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13828 return LD->getExtensionType() == ISD::EXTLOAD &&
13829 Op.getValueType() == MVT::f64;
13833 /// Reduces the number of fp-to-int conversion when building a vector.
13835 /// If this vector is built out of floating to integer conversions,
13836 /// transform it to a vector built out of floating point values followed by a
13837 /// single floating to integer conversion of the vector.
13838 /// Namely (build_vector (fptosi $A), (fptosi $B), ...)
13839 /// becomes (fptosi (build_vector ($A, $B, ...)))
13840 SDValue PPCTargetLowering::
13841 combineElementTruncationToVectorTruncation(SDNode *N,
13842 DAGCombinerInfo &DCI) const {
13843 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13844 "Should be called with a BUILD_VECTOR node");
13846 SelectionDAG &DAG = DCI.DAG;
13849 SDValue FirstInput = N->getOperand(0);
13850 assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13851 "The input operand must be an fp-to-int conversion.");
13853 // This combine happens after legalization so the fp_to_[su]i nodes are
13854 // already converted to PPCSISD nodes.
13855 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13856 if (FirstConversion == PPCISD::FCTIDZ ||
13857 FirstConversion == PPCISD::FCTIDUZ ||
13858 FirstConversion == PPCISD::FCTIWZ ||
13859 FirstConversion == PPCISD::FCTIWUZ) {
13860 bool IsSplat = true;
13861 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13862 FirstConversion == PPCISD::FCTIWUZ;
13863 EVT SrcVT = FirstInput.getOperand(0).getValueType();
13864 SmallVector<SDValue, 4> Ops;
13865 EVT TargetVT = N->getValueType(0);
13866 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13867 SDValue NextOp = N->getOperand(i);
13868 if (NextOp.getOpcode() != PPCISD::MFVSR)
13870 unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13871 if (NextConversion != FirstConversion)
13873 // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13874 // This is not valid if the input was originally double precision. It is
13875 // also not profitable to do unless this is an extending load in which
13876 // case doing this combine will allow us to combine consecutive loads.
13877 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13879 if (N->getOperand(i) != FirstInput)
13883 // If this is a splat, we leave it as-is since there will be only a single
13884 // fp-to-int conversion followed by a splat of the integer. This is better
13885 // for 32-bit and smaller ints and neutral for 64-bit ints.
13889 // Now that we know we have the right type of node, get its operands
13890 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13891 SDValue In = N->getOperand(i).getOperand(0);
13893 // For 32-bit values, we need to add an FP_ROUND node (if we made it
13894 // here, we know that all inputs are extending loads so this is safe).
13896 Ops.push_back(DAG.getUNDEF(SrcVT));
13898 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13899 MVT::f32, In.getOperand(0),
13900 DAG.getIntPtrConstant(1, dl));
13901 Ops.push_back(Trunc);
13904 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13908 if (FirstConversion == PPCISD::FCTIDZ ||
13909 FirstConversion == PPCISD::FCTIWZ)
13910 Opcode = ISD::FP_TO_SINT;
13912 Opcode = ISD::FP_TO_UINT;
13914 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13915 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13916 return DAG.getNode(Opcode, dl, TargetVT, BV);
13921 /// Reduce the number of loads when building a vector.
13923 /// Building a vector out of multiple loads can be converted to a load
13924 /// of the vector type if the loads are consecutive. If the loads are
13925 /// consecutive but in descending order, a shuffle is added at the end
13926 /// to reorder the vector.
13927 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13928 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13929 "Should be called with a BUILD_VECTOR node");
13933 // Return early for non byte-sized type, as they can't be consecutive.
13934 if (!N->getValueType(0).getVectorElementType().isByteSized())
13937 bool InputsAreConsecutiveLoads = true;
13938 bool InputsAreReverseConsecutive = true;
13939 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13940 SDValue FirstInput = N->getOperand(0);
13941 bool IsRoundOfExtLoad = false;
13943 if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13944 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13945 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13946 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13948 // Not a build vector of (possibly fp_rounded) loads.
13949 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13950 N->getNumOperands() == 1)
13953 for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13954 // If any inputs are fp_round(extload), they all must be.
13955 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13958 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13960 if (NextInput.getOpcode() != ISD::LOAD)
13963 SDValue PreviousInput =
13964 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13965 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13966 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13968 // If any inputs are fp_round(extload), they all must be.
13969 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13972 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13973 InputsAreConsecutiveLoads = false;
13974 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13975 InputsAreReverseConsecutive = false;
13977 // Exit early if the loads are neither consecutive nor reverse consecutive.
13978 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13982 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13983 "The loads cannot be both consecutive and reverse consecutive.");
13985 SDValue FirstLoadOp =
13986 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13987 SDValue LastLoadOp =
13988 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13989 N->getOperand(N->getNumOperands()-1);
13991 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13992 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13993 if (InputsAreConsecutiveLoads) {
13994 assert(LD1 && "Input needs to be a LoadSDNode.");
13995 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13996 LD1->getBasePtr(), LD1->getPointerInfo(),
13997 LD1->getAlignment());
13999 if (InputsAreReverseConsecutive) {
14000 assert(LDL && "Input needs to be a LoadSDNode.");
14001 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
14002 LDL->getBasePtr(), LDL->getPointerInfo(),
14003 LDL->getAlignment());
14004 SmallVector<int, 16> Ops;
14005 for (int i = N->getNumOperands() - 1; i >= 0; i--)
14008 return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
14009 DAG.getUNDEF(N->getValueType(0)), Ops);
14014 // This function adds the required vector_shuffle needed to get
14015 // the elements of the vector extract in the correct position
14016 // as specified by the CorrectElems encoding.
14017 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
14018 SDValue Input, uint64_t Elems,
14019 uint64_t CorrectElems) {
14022 unsigned NumElems = Input.getValueType().getVectorNumElements();
14023 SmallVector<int, 16> ShuffleMask(NumElems, -1);
14025 // Knowing the element indices being extracted from the original
14026 // vector and the order in which they're being inserted, just put
14027 // them at element indices required for the instruction.
14028 for (unsigned i = 0; i < N->getNumOperands(); i++) {
14029 if (DAG.getDataLayout().isLittleEndian())
14030 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
14032 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
14033 CorrectElems = CorrectElems >> 8;
14034 Elems = Elems >> 8;
14038 DAG.getVectorShuffle(Input.getValueType(), dl, Input,
14039 DAG.getUNDEF(Input.getValueType()), ShuffleMask);
14041 EVT VT = N->getValueType(0);
14042 SDValue Conv = DAG.getBitcast(VT, Shuffle);
14044 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
14045 Input.getValueType().getVectorElementType(),
14046 VT.getVectorNumElements());
14047 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
14048 DAG.getValueType(ExtVT));
14051 // Look for build vector patterns where input operands come from sign
14052 // extended vector_extract elements of specific indices. If the correct indices
14053 // aren't used, add a vector shuffle to fix up the indices and create
14054 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
14055 // during instruction selection.
14056 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
14057 // This array encodes the indices that the vector sign extend instructions
14058 // extract from when extending from one type to another for both BE and LE.
14059 // The right nibble of each byte corresponds to the LE incides.
14060 // and the left nibble of each byte corresponds to the BE incides.
14061 // For example: 0x3074B8FC byte->word
14062 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
14063 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
14064 // For example: 0x000070F8 byte->double word
14065 // For LE: the allowed indices are: 0x0,0x8
14066 // For BE: the allowed indices are: 0x7,0xF
14067 uint64_t TargetElems[] = {
14068 0x3074B8FC, // b->w
14069 0x000070F8, // b->d
14070 0x10325476, // h->w
14071 0x00003074, // h->d
14072 0x00001032, // w->d
14075 uint64_t Elems = 0;
14079 auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
14082 if (Op.getOpcode() != ISD::SIGN_EXTEND &&
14083 Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
14086 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
14087 // of the right width.
14088 SDValue Extract = Op.getOperand(0);
14089 if (Extract.getOpcode() == ISD::ANY_EXTEND)
14090 Extract = Extract.getOperand(0);
14091 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14094 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14098 Index = ExtOp->getZExtValue();
14099 if (Input && Input != Extract.getOperand(0))
14103 Input = Extract.getOperand(0);
14105 Elems = Elems << 8;
14106 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14112 // If the build vector operands aren't sign extended vector extracts,
14113 // of the same input vector, then return.
14114 for (unsigned i = 0; i < N->getNumOperands(); i++) {
14115 if (!isSExtOfVecExtract(N->getOperand(i))) {
14120 // If the vector extract indicies are not correct, add the appropriate
14122 int TgtElemArrayIdx;
14123 int InputSize = Input.getValueType().getScalarSizeInBits();
14124 int OutputSize = N->getValueType(0).getScalarSizeInBits();
14125 if (InputSize + OutputSize == 40)
14126 TgtElemArrayIdx = 0;
14127 else if (InputSize + OutputSize == 72)
14128 TgtElemArrayIdx = 1;
14129 else if (InputSize + OutputSize == 48)
14130 TgtElemArrayIdx = 2;
14131 else if (InputSize + OutputSize == 80)
14132 TgtElemArrayIdx = 3;
14133 else if (InputSize + OutputSize == 96)
14134 TgtElemArrayIdx = 4;
14138 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14139 CorrectElems = DAG.getDataLayout().isLittleEndian()
14140 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14141 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14142 if (Elems != CorrectElems) {
14143 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14146 // Regular lowering will catch cases where a shuffle is not needed.
14150 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14151 DAGCombinerInfo &DCI) const {
14152 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14153 "Should be called with a BUILD_VECTOR node");
14155 SelectionDAG &DAG = DCI.DAG;
14158 if (!Subtarget.hasVSX())
14161 // The target independent DAG combiner will leave a build_vector of
14162 // float-to-int conversions intact. We can generate MUCH better code for
14163 // a float-to-int conversion of a vector of floats.
14164 SDValue FirstInput = N->getOperand(0);
14165 if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14166 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14171 // If we're building a vector out of consecutive loads, just load that
14173 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14177 // If we're building a vector out of extended elements from another vector
14178 // we have P9 vector integer extend instructions. The code assumes legal
14179 // input types (i.e. it can't handle things like v4i16) so do not run before
14181 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14182 Reduced = combineBVOfVecSExt(N, DAG);
14188 if (N->getValueType(0) != MVT::v2f64)
14192 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14193 if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14194 FirstInput.getOpcode() != ISD::UINT_TO_FP)
14196 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14197 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14199 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14202 SDValue Ext1 = FirstInput.getOperand(0);
14203 SDValue Ext2 = N->getOperand(1).getOperand(0);
14204 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14205 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14208 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14209 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14210 if (!Ext1Op || !Ext2Op)
14212 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14213 Ext1.getOperand(0) != Ext2.getOperand(0))
14216 int FirstElem = Ext1Op->getZExtValue();
14217 int SecondElem = Ext2Op->getZExtValue();
14219 if (FirstElem == 0 && SecondElem == 1)
14220 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14221 else if (FirstElem == 2 && SecondElem == 3)
14222 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14226 SDValue SrcVec = Ext1.getOperand(0);
14227 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14228 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14229 return DAG.getNode(NodeType, dl, MVT::v2f64,
14230 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14233 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14234 DAGCombinerInfo &DCI) const {
14235 assert((N->getOpcode() == ISD::SINT_TO_FP ||
14236 N->getOpcode() == ISD::UINT_TO_FP) &&
14237 "Need an int -> FP conversion node here");
14239 if (useSoftFloat() || !Subtarget.has64BitSupport())
14242 SelectionDAG &DAG = DCI.DAG;
14246 // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14247 // from the hardware.
14248 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14250 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14251 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14254 SDValue FirstOperand(Op.getOperand(0));
14255 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14256 (FirstOperand.getValueType() == MVT::i8 ||
14257 FirstOperand.getValueType() == MVT::i16);
14258 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14259 bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14260 bool DstDouble = Op.getValueType() == MVT::f64;
14261 unsigned ConvOp = Signed ?
14262 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
14263 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14264 SDValue WidthConst =
14265 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14267 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14268 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14269 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14270 DAG.getVTList(MVT::f64, MVT::Other),
14271 Ops, MVT::i8, LDN->getMemOperand());
14273 // For signed conversion, we need to sign-extend the value in the VSR
14275 SDValue ExtOps[] = { Ld, WidthConst };
14276 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14277 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14279 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14283 // For i32 intermediate values, unfortunately, the conversion functions
14284 // leave the upper 32 bits of the value are undefined. Within the set of
14285 // scalar instructions, we have no method for zero- or sign-extending the
14286 // value. Thus, we cannot handle i32 intermediate values here.
14287 if (Op.getOperand(0).getValueType() == MVT::i32)
14290 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14291 "UINT_TO_FP is supported only with FPCVT");
14293 // If we have FCFIDS, then use it when converting to single-precision.
14294 // Otherwise, convert to double-precision and then round.
14295 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14296 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14298 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14300 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14304 // If we're converting from a float, to an int, and back to a float again,
14305 // then we don't need the store/load pair at all.
14306 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14307 Subtarget.hasFPCVT()) ||
14308 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14309 SDValue Src = Op.getOperand(0).getOperand(0);
14310 if (Src.getValueType() == MVT::f32) {
14311 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14312 DCI.AddToWorklist(Src.getNode());
14313 } else if (Src.getValueType() != MVT::f64) {
14314 // Make sure that we don't pick up a ppc_fp128 source value.
14319 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14322 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14323 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14325 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14326 FP = DAG.getNode(ISD::FP_ROUND, dl,
14327 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14328 DCI.AddToWorklist(FP.getNode());
14337 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14338 // builtins) into loads with swaps.
14339 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14340 DAGCombinerInfo &DCI) const {
14341 SelectionDAG &DAG = DCI.DAG;
14345 MachineMemOperand *MMO;
14347 switch (N->getOpcode()) {
14349 llvm_unreachable("Unexpected opcode for little endian VSX load");
14351 LoadSDNode *LD = cast<LoadSDNode>(N);
14352 Chain = LD->getChain();
14353 Base = LD->getBasePtr();
14354 MMO = LD->getMemOperand();
14355 // If the MMO suggests this isn't a load of a full vector, leave
14356 // things alone. For a built-in, we have to make the change for
14357 // correctness, so if there is a size problem that will be a bug.
14358 if (MMO->getSize() < 16)
14362 case ISD::INTRINSIC_W_CHAIN: {
14363 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14364 Chain = Intrin->getChain();
14365 // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14366 // us what we want. Get operand 2 instead.
14367 Base = Intrin->getOperand(2);
14368 MMO = Intrin->getMemOperand();
14373 MVT VecTy = N->getValueType(0).getSimpleVT();
14375 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14376 // aligned and the type is a vector with elements up to 4 bytes
14377 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14378 VecTy.getScalarSizeInBits() <= 32) {
14382 SDValue LoadOps[] = { Chain, Base };
14383 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14384 DAG.getVTList(MVT::v2f64, MVT::Other),
14385 LoadOps, MVT::v2f64, MMO);
14387 DCI.AddToWorklist(Load.getNode());
14388 Chain = Load.getValue(1);
14389 SDValue Swap = DAG.getNode(
14390 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14391 DCI.AddToWorklist(Swap.getNode());
14393 // Add a bitcast if the resulting load type doesn't match v2f64.
14394 if (VecTy != MVT::v2f64) {
14395 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14396 DCI.AddToWorklist(N.getNode());
14397 // Package {bitcast value, swap's chain} to match Load's shape.
14398 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14399 N, Swap.getValue(1));
14405 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14406 // builtins) into stores with swaps.
14407 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14408 DAGCombinerInfo &DCI) const {
14409 SelectionDAG &DAG = DCI.DAG;
14414 MachineMemOperand *MMO;
14416 switch (N->getOpcode()) {
14418 llvm_unreachable("Unexpected opcode for little endian VSX store");
14420 StoreSDNode *ST = cast<StoreSDNode>(N);
14421 Chain = ST->getChain();
14422 Base = ST->getBasePtr();
14423 MMO = ST->getMemOperand();
14425 // If the MMO suggests this isn't a store of a full vector, leave
14426 // things alone. For a built-in, we have to make the change for
14427 // correctness, so if there is a size problem that will be a bug.
14428 if (MMO->getSize() < 16)
14432 case ISD::INTRINSIC_VOID: {
14433 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14434 Chain = Intrin->getChain();
14435 // Intrin->getBasePtr() oddly does not get what we want.
14436 Base = Intrin->getOperand(3);
14437 MMO = Intrin->getMemOperand();
14443 SDValue Src = N->getOperand(SrcOpnd);
14444 MVT VecTy = Src.getValueType().getSimpleVT();
14446 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14447 // aligned and the type is a vector with elements up to 4 bytes
14448 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14449 VecTy.getScalarSizeInBits() <= 32) {
14453 // All stores are done as v2f64 and possible bit cast.
14454 if (VecTy != MVT::v2f64) {
14455 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14456 DCI.AddToWorklist(Src.getNode());
14459 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14460 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14461 DCI.AddToWorklist(Swap.getNode());
14462 Chain = Swap.getValue(1);
14463 SDValue StoreOps[] = { Chain, Swap, Base };
14464 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14465 DAG.getVTList(MVT::Other),
14466 StoreOps, VecTy, MMO);
14467 DCI.AddToWorklist(Store.getNode());
14471 // Handle DAG combine for STORE (FP_TO_INT F).
14472 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14473 DAGCombinerInfo &DCI) const {
14475 SelectionDAG &DAG = DCI.DAG;
14477 unsigned Opcode = N->getOperand(1).getOpcode();
14479 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14480 && "Not a FP_TO_INT Instruction!");
14482 SDValue Val = N->getOperand(1).getOperand(0);
14483 EVT Op1VT = N->getOperand(1).getValueType();
14484 EVT ResVT = Val.getValueType();
14486 // Floating point types smaller than 32 bits are not legal on Power.
14487 if (ResVT.getScalarSizeInBits() < 32)
14490 // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14491 bool ValidTypeForStoreFltAsInt =
14492 (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14493 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14495 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14496 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14499 // Extend f32 values to f64
14500 if (ResVT.getScalarSizeInBits() == 32) {
14501 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14502 DCI.AddToWorklist(Val.getNode());
14505 // Set signed or unsigned conversion opcode.
14506 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14507 PPCISD::FP_TO_SINT_IN_VSR :
14508 PPCISD::FP_TO_UINT_IN_VSR;
14510 Val = DAG.getNode(ConvOpcode,
14511 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14512 DCI.AddToWorklist(Val.getNode());
14514 // Set number of bytes being converted.
14515 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14516 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14517 DAG.getIntPtrConstant(ByteSize, dl, false),
14518 DAG.getValueType(Op1VT) };
14520 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14521 DAG.getVTList(MVT::Other), Ops,
14522 cast<StoreSDNode>(N)->getMemoryVT(),
14523 cast<StoreSDNode>(N)->getMemOperand());
14525 DCI.AddToWorklist(Val.getNode());
14529 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14530 // Check that the source of the element keeps flipping
14531 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14532 bool PrevElemFromFirstVec = Mask[0] < NumElts;
14533 for (int i = 1, e = Mask.size(); i < e; i++) {
14534 if (PrevElemFromFirstVec && Mask[i] < NumElts)
14536 if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14538 PrevElemFromFirstVec = !PrevElemFromFirstVec;
14543 static bool isSplatBV(SDValue Op) {
14544 if (Op.getOpcode() != ISD::BUILD_VECTOR)
14548 // Find first non-undef input.
14549 for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14550 FirstOp = Op.getOperand(i);
14551 if (!FirstOp.isUndef())
14555 // All inputs are undef or the same as the first non-undef input.
14556 for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14557 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14562 static SDValue isScalarToVec(SDValue Op) {
14563 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14565 if (Op.getOpcode() != ISD::BITCAST)
14567 Op = Op.getOperand(0);
14568 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14573 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14574 int LHSMaxIdx, int RHSMinIdx,
14575 int RHSMaxIdx, int HalfVec) {
14576 for (int i = 0, e = ShuffV.size(); i < e; i++) {
14577 int Idx = ShuffV[i];
14578 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14579 ShuffV[i] += HalfVec;
14584 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14585 // the original is:
14586 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14587 // In such a case, just change the shuffle mask to extract the element
14588 // from the permuted index.
14589 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14590 SDLoc dl(OrigSToV);
14591 EVT VT = OrigSToV.getValueType();
14592 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14593 "Expecting a SCALAR_TO_VECTOR here");
14594 SDValue Input = OrigSToV.getOperand(0);
14596 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14597 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14598 SDValue OrigVector = Input.getOperand(0);
14600 // Can't handle non-const element indices or different vector types
14601 // for the input to the extract and the output of the scalar_to_vector.
14602 if (Idx && VT == OrigVector.getValueType()) {
14603 SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14604 NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14605 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14608 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14609 OrigSToV.getOperand(0));
14612 // On little endian subtargets, combine shuffles such as:
14613 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14615 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14616 // because the latter can be matched to a single instruction merge.
14617 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14618 // to put the value into element zero. Adjust the shuffle mask so that the
14619 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14620 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14621 SelectionDAG &DAG) const {
14622 SDValue LHS = SVN->getOperand(0);
14623 SDValue RHS = SVN->getOperand(1);
14624 auto Mask = SVN->getMask();
14625 int NumElts = LHS.getValueType().getVectorNumElements();
14626 SDValue Res(SVN, 0);
14629 // None of these combines are useful on big endian systems since the ISA
14630 // already has a big endian bias.
14631 if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14634 // If this is not a shuffle of a shuffle and the first element comes from
14635 // the second vector, canonicalize to the commuted form. This will make it
14636 // more likely to match one of the single instruction patterns.
14637 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14638 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14639 std::swap(LHS, RHS);
14640 Res = DAG.getCommutedVectorShuffle(*SVN);
14641 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14644 // Adjust the shuffle mask if either input vector comes from a
14645 // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14646 // form (to prevent the need for a swap).
14647 SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14648 SDValue SToVLHS = isScalarToVec(LHS);
14649 SDValue SToVRHS = isScalarToVec(RHS);
14650 if (SToVLHS || SToVRHS) {
14651 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14652 : SToVRHS.getValueType().getVectorNumElements();
14653 int NumEltsOut = ShuffV.size();
14655 // Initially assume that neither input is permuted. These will be adjusted
14656 // accordingly if either input is.
14657 int LHSMaxIdx = -1;
14658 int RHSMinIdx = -1;
14659 int RHSMaxIdx = -1;
14660 int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14662 // Get the permuted scalar to vector nodes for the source(s) that come from
14663 // ISD::SCALAR_TO_VECTOR.
14665 // Set up the values for the shuffle vector fixup.
14666 LHSMaxIdx = NumEltsOut / NumEltsIn;
14667 SToVLHS = getSToVPermuted(SToVLHS, DAG);
14668 if (SToVLHS.getValueType() != LHS.getValueType())
14669 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14673 RHSMinIdx = NumEltsOut;
14674 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14675 SToVRHS = getSToVPermuted(SToVRHS, DAG);
14676 if (SToVRHS.getValueType() != RHS.getValueType())
14677 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14681 // Fix up the shuffle mask to reflect where the desired element actually is.
14682 // The minimum and maximum indices that correspond to element zero for both
14683 // the LHS and RHS are computed and will control which shuffle mask entries
14684 // are to be changed. For example, if the RHS is permuted, any shuffle mask
14685 // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14686 // HalfVec to refer to the corresponding element in the permuted vector.
14687 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14689 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14691 // We may have simplified away the shuffle. We won't be able to do anything
14692 // further with it here.
14693 if (!isa<ShuffleVectorSDNode>(Res))
14695 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14698 // The common case after we commuted the shuffle is that the RHS is a splat
14699 // and we have elements coming in from the splat at indices that are not
14700 // conducive to using a merge.
14702 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14703 if (!isSplatBV(RHS))
14706 // We are looking for a mask such that all even elements are from
14707 // one vector and all odd elements from the other.
14708 if (!isAlternatingShuffMask(Mask, NumElts))
14711 // Adjust the mask so we are pulling in the same index from the splat
14712 // as the index from the interesting vector in consecutive elements.
14713 // Example (even elements from first vector):
14714 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14715 if (Mask[0] < NumElts)
14716 for (int i = 1, e = Mask.size(); i < e; i += 2)
14717 ShuffV[i] = (ShuffV[i - 1] + NumElts);
14718 // Example (odd elements from first vector):
14719 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14721 for (int i = 0, e = Mask.size(); i < e; i += 2)
14722 ShuffV[i] = (ShuffV[i + 1] + NumElts);
14724 // If the RHS has undefs, we need to remove them since we may have created
14725 // a shuffle that adds those instead of the splat value.
14726 SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14727 RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14729 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14733 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14734 LSBaseSDNode *LSBase,
14735 DAGCombinerInfo &DCI) const {
14736 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14737 "Not a reverse memop pattern!");
14739 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14740 auto Mask = SVN->getMask();
14742 auto I = Mask.rbegin();
14743 auto E = Mask.rend();
14745 for (; I != E; ++I) {
14753 SelectionDAG &DAG = DCI.DAG;
14754 EVT VT = SVN->getValueType(0);
14756 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14759 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14760 // See comment in PPCVSXSwapRemoval.cpp.
14761 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14762 if (!Subtarget.hasP9Vector())
14765 if(!IsElementReverse(SVN))
14768 if (LSBase->getOpcode() == ISD::LOAD) {
14770 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14771 return DAG.getMemIntrinsicNode(
14772 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14773 LSBase->getMemoryVT(), LSBase->getMemOperand());
14776 if (LSBase->getOpcode() == ISD::STORE) {
14778 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14779 LSBase->getBasePtr()};
14780 return DAG.getMemIntrinsicNode(
14781 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14782 LSBase->getMemoryVT(), LSBase->getMemOperand());
14785 llvm_unreachable("Expected a load or store node here");
14788 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14789 DAGCombinerInfo &DCI) const {
14790 SelectionDAG &DAG = DCI.DAG;
14792 switch (N->getOpcode()) {
14795 return combineADD(N, DCI);
14797 return combineSHL(N, DCI);
14799 return combineSRA(N, DCI);
14801 return combineSRL(N, DCI);
14803 return combineMUL(N, DCI);
14805 case PPCISD::FNMSUB:
14806 return combineFMALike(N, DCI);
14808 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14809 return N->getOperand(0);
14812 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14813 return N->getOperand(0);
14816 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14817 if (C->isNullValue() || // 0 >>s V -> 0.
14818 C->isAllOnesValue()) // -1 >>s V -> -1.
14819 return N->getOperand(0);
14822 case ISD::SIGN_EXTEND:
14823 case ISD::ZERO_EXTEND:
14824 case ISD::ANY_EXTEND:
14825 return DAGCombineExtBoolTrunc(N, DCI);
14826 case ISD::TRUNCATE:
14827 return combineTRUNCATE(N, DCI);
14829 if (SDValue CSCC = combineSetCC(N, DCI))
14832 case ISD::SELECT_CC:
14833 return DAGCombineTruncBoolExt(N, DCI);
14834 case ISD::SINT_TO_FP:
14835 case ISD::UINT_TO_FP:
14836 return combineFPToIntToFP(N, DCI);
14837 case ISD::VECTOR_SHUFFLE:
14838 if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14839 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14840 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14842 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14845 EVT Op1VT = N->getOperand(1).getValueType();
14846 unsigned Opcode = N->getOperand(1).getOpcode();
14848 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14849 SDValue Val= combineStoreFPToInt(N, DCI);
14854 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14855 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14856 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14861 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14862 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14863 N->getOperand(1).getNode()->hasOneUse() &&
14864 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14865 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14867 // STBRX can only handle simple types and it makes no sense to store less
14868 // two bytes in byte-reversed order.
14869 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14870 if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14873 SDValue BSwapOp = N->getOperand(1).getOperand(0);
14874 // Do an any-extend to 32-bits if this is a half-word input.
14875 if (BSwapOp.getValueType() == MVT::i16)
14876 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14878 // If the type of BSWAP operand is wider than stored memory width
14879 // it need to be shifted to the right side before STBRX.
14880 if (Op1VT.bitsGT(mVT)) {
14881 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14882 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14883 DAG.getConstant(Shift, dl, MVT::i32));
14884 // Need to truncate if this is a bswap of i64 stored as i32/i16.
14885 if (Op1VT == MVT::i64)
14886 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14890 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14893 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14894 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14895 cast<StoreSDNode>(N)->getMemOperand());
14898 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0>
14899 // So it can increase the chance of CSE constant construction.
14900 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14901 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14902 // Need to sign-extended to 64-bits to handle negative values.
14903 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14904 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14905 MemVT.getSizeInBits());
14906 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14908 // DAG.getTruncStore() can't be used here because it doesn't accept
14909 // the general (base + offset) addressing mode.
14910 // So we use UpdateNodeOperands and setTruncatingStore instead.
14911 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14913 cast<StoreSDNode>(N)->setTruncatingStore(true);
14914 return SDValue(N, 0);
14917 // For little endian, VSX stores require generating xxswapd/lxvd2x.
14918 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14919 if (Op1VT.isSimple()) {
14920 MVT StoreVT = Op1VT.getSimpleVT();
14921 if (Subtarget.needsSwapsForVSXMemOps() &&
14922 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14923 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14924 return expandVSXStoreForLE(N, DCI);
14929 LoadSDNode *LD = cast<LoadSDNode>(N);
14930 EVT VT = LD->getValueType(0);
14932 // For little endian, VSX loads require generating lxvd2x/xxswapd.
14933 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14934 if (VT.isSimple()) {
14935 MVT LoadVT = VT.getSimpleVT();
14936 if (Subtarget.needsSwapsForVSXMemOps() &&
14937 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14938 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14939 return expandVSXLoadForLE(N, DCI);
14942 // We sometimes end up with a 64-bit integer load, from which we extract
14943 // two single-precision floating-point numbers. This happens with
14944 // std::complex<float>, and other similar structures, because of the way we
14945 // canonicalize structure copies. However, if we lack direct moves,
14946 // then the final bitcasts from the extracted integer values to the
14947 // floating-point numbers turn into store/load pairs. Even with direct moves,
14948 // just loading the two floating-point numbers is likely better.
14949 auto ReplaceTwoFloatLoad = [&]() {
14950 if (VT != MVT::i64)
14953 if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14957 // We're looking for a sequence like this:
14958 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14959 // t16: i64 = srl t13, Constant:i32<32>
14960 // t17: i32 = truncate t16
14961 // t18: f32 = bitcast t17
14962 // t19: i32 = truncate t13
14963 // t20: f32 = bitcast t19
14965 if (!LD->hasNUsesOfValue(2, 0))
14968 auto UI = LD->use_begin();
14969 while (UI.getUse().getResNo() != 0) ++UI;
14970 SDNode *Trunc = *UI++;
14971 while (UI.getUse().getResNo() != 0) ++UI;
14972 SDNode *RightShift = *UI;
14973 if (Trunc->getOpcode() != ISD::TRUNCATE)
14974 std::swap(Trunc, RightShift);
14976 if (Trunc->getOpcode() != ISD::TRUNCATE ||
14977 Trunc->getValueType(0) != MVT::i32 ||
14978 !Trunc->hasOneUse())
14980 if (RightShift->getOpcode() != ISD::SRL ||
14981 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14982 RightShift->getConstantOperandVal(1) != 32 ||
14983 !RightShift->hasOneUse())
14986 SDNode *Trunc2 = *RightShift->use_begin();
14987 if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14988 Trunc2->getValueType(0) != MVT::i32 ||
14989 !Trunc2->hasOneUse())
14992 SDNode *Bitcast = *Trunc->use_begin();
14993 SDNode *Bitcast2 = *Trunc2->use_begin();
14995 if (Bitcast->getOpcode() != ISD::BITCAST ||
14996 Bitcast->getValueType(0) != MVT::f32)
14998 if (Bitcast2->getOpcode() != ISD::BITCAST ||
14999 Bitcast2->getValueType(0) != MVT::f32)
15002 if (Subtarget.isLittleEndian())
15003 std::swap(Bitcast, Bitcast2);
15005 // Bitcast has the second float (in memory-layout order) and Bitcast2
15006 // has the first one.
15008 SDValue BasePtr = LD->getBasePtr();
15009 if (LD->isIndexed()) {
15010 assert(LD->getAddressingMode() == ISD::PRE_INC &&
15011 "Non-pre-inc AM on PPC?");
15013 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
15018 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
15019 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
15020 LD->getPointerInfo(), LD->getAlignment(),
15021 MMOFlags, LD->getAAInfo());
15023 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
15024 BasePtr, DAG.getIntPtrConstant(4, dl));
15025 SDValue FloatLoad2 = DAG.getLoad(
15026 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
15027 LD->getPointerInfo().getWithOffset(4),
15028 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
15030 if (LD->isIndexed()) {
15031 // Note that DAGCombine should re-form any pre-increment load(s) from
15032 // what is produced here if that makes sense.
15033 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
15036 DCI.CombineTo(Bitcast2, FloatLoad);
15037 DCI.CombineTo(Bitcast, FloatLoad2);
15039 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
15040 SDValue(FloatLoad2.getNode(), 1));
15044 if (ReplaceTwoFloatLoad())
15045 return SDValue(N, 0);
15047 EVT MemVT = LD->getMemoryVT();
15048 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15049 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15050 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
15051 Align ScalarABIAlignment = DAG.getDataLayout().getABITypeAlign(STy);
15052 if (LD->isUnindexed() && VT.isVector() &&
15053 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15054 // P8 and later hardware should just use LOAD.
15055 !Subtarget.hasP8Vector() &&
15056 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15057 VT == MVT::v4f32)) ||
15058 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
15059 LD->getAlign() >= ScalarABIAlignment)) &&
15060 LD->getAlign() < ABIAlignment) {
15061 // This is a type-legal unaligned Altivec or QPX load.
15062 SDValue Chain = LD->getChain();
15063 SDValue Ptr = LD->getBasePtr();
15064 bool isLittleEndian = Subtarget.isLittleEndian();
15066 // This implements the loading of unaligned vectors as described in
15067 // the venerable Apple Velocity Engine overview. Specifically:
15068 // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15069 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15071 // The general idea is to expand a sequence of one or more unaligned
15072 // loads into an alignment-based permutation-control instruction (lvsl
15073 // or lvsr), a series of regular vector loads (which always truncate
15074 // their input address to an aligned address), and a series of
15075 // permutations. The results of these permutations are the requested
15076 // loaded values. The trick is that the last "extra" load is not taken
15077 // from the address you might suspect (sizeof(vector) bytes after the
15078 // last requested load), but rather sizeof(vector) - 1 bytes after the
15079 // last requested vector. The point of this is to avoid a page fault if
15080 // the base address happened to be aligned. This works because if the
15081 // base address is aligned, then adding less than a full vector length
15082 // will cause the last vector in the sequence to be (re)loaded.
15083 // Otherwise, the next vector will be fetched as you might suspect was
15086 // We might be able to reuse the permutation generation from
15087 // a different base address offset from this one by an aligned amount.
15088 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15089 // optimization later.
15090 Intrinsic::ID Intr, IntrLD, IntrPerm;
15091 MVT PermCntlTy, PermTy, LDTy;
15092 if (Subtarget.hasAltivec()) {
15093 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr :
15094 Intrinsic::ppc_altivec_lvsl;
15095 IntrLD = Intrinsic::ppc_altivec_lvx;
15096 IntrPerm = Intrinsic::ppc_altivec_vperm;
15097 PermCntlTy = MVT::v16i8;
15098 PermTy = MVT::v4i32;
15101 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
15102 Intrinsic::ppc_qpx_qvlpcls;
15103 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
15104 Intrinsic::ppc_qpx_qvlfs;
15105 IntrPerm = Intrinsic::ppc_qpx_qvfperm;
15106 PermCntlTy = MVT::v4f64;
15107 PermTy = MVT::v4f64;
15108 LDTy = MemVT.getSimpleVT();
15111 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15113 // Create the new MMO for the new base load. It is like the original MMO,
15114 // but represents an area in memory almost twice the vector size centered
15115 // on the original address. If the address is unaligned, we might start
15116 // reading up to (sizeof(vector)-1) bytes below the address of the
15117 // original unaligned load.
15118 MachineFunction &MF = DAG.getMachineFunction();
15119 MachineMemOperand *BaseMMO =
15120 MF.getMachineMemOperand(LD->getMemOperand(),
15121 -(long)MemVT.getStoreSize()+1,
15122 2*MemVT.getStoreSize()-1);
15124 // Create the new base load.
15126 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15127 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15129 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15130 DAG.getVTList(PermTy, MVT::Other),
15131 BaseLoadOps, LDTy, BaseMMO);
15133 // Note that the value of IncOffset (which is provided to the next
15134 // load's pointer info offset value, and thus used to calculate the
15135 // alignment), and the value of IncValue (which is actually used to
15136 // increment the pointer value) are different! This is because we
15137 // require the next load to appear to be aligned, even though it
15138 // is actually offset from the base pointer by a lesser amount.
15139 int IncOffset = VT.getSizeInBits() / 8;
15140 int IncValue = IncOffset;
15142 // Walk (both up and down) the chain looking for another load at the real
15143 // (aligned) offset (the alignment of the other load does not matter in
15144 // this case). If found, then do not use the offset reduction trick, as
15145 // that will prevent the loads from being later combined (as they would
15146 // otherwise be duplicates).
15147 if (!findConsecutiveLoad(LD, DAG))
15150 SDValue Increment =
15151 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15152 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15154 MachineMemOperand *ExtraMMO =
15155 MF.getMachineMemOperand(LD->getMemOperand(),
15156 1, 2*MemVT.getStoreSize()-1);
15157 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15158 SDValue ExtraLoad =
15159 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15160 DAG.getVTList(PermTy, MVT::Other),
15161 ExtraLoadOps, LDTy, ExtraMMO);
15163 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15164 BaseLoad.getValue(1), ExtraLoad.getValue(1));
15166 // Because vperm has a big-endian bias, we must reverse the order
15167 // of the input vectors and complement the permute control vector
15168 // when generating little endian code. We have already handled the
15169 // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15170 // and ExtraLoad here.
15172 if (isLittleEndian)
15173 Perm = BuildIntrinsicOp(IntrPerm,
15174 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15176 Perm = BuildIntrinsicOp(IntrPerm,
15177 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15180 Perm = Subtarget.hasAltivec() ?
15181 DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
15182 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
15183 DAG.getTargetConstant(1, dl, MVT::i64));
15184 // second argument is 1 because this rounding
15185 // is always exact.
15187 // The output of the permutation is our loaded result, the TokenFactor is
15189 DCI.CombineTo(N, Perm, TF);
15190 return SDValue(N, 0);
15194 case ISD::INTRINSIC_WO_CHAIN: {
15195 bool isLittleEndian = Subtarget.isLittleEndian();
15196 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15197 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15198 : Intrinsic::ppc_altivec_lvsl);
15199 if ((IID == Intr ||
15200 IID == Intrinsic::ppc_qpx_qvlpcld ||
15201 IID == Intrinsic::ppc_qpx_qvlpcls) &&
15202 N->getOperand(1)->getOpcode() == ISD::ADD) {
15203 SDValue Add = N->getOperand(1);
15205 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
15206 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
15208 if (DAG.MaskedValueIsZero(Add->getOperand(1),
15209 APInt::getAllOnesValue(Bits /* alignment */)
15210 .zext(Add.getScalarValueSizeInBits()))) {
15211 SDNode *BasePtr = Add->getOperand(0).getNode();
15212 for (SDNode::use_iterator UI = BasePtr->use_begin(),
15213 UE = BasePtr->use_end();
15215 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15216 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
15217 // We've found another LVSL/LVSR, and this address is an aligned
15218 // multiple of that one. The results will be the same, so use the
15219 // one we've just found instead.
15221 return SDValue(*UI, 0);
15226 if (isa<ConstantSDNode>(Add->getOperand(1))) {
15227 SDNode *BasePtr = Add->getOperand(0).getNode();
15228 for (SDNode::use_iterator UI = BasePtr->use_begin(),
15229 UE = BasePtr->use_end(); UI != UE; ++UI) {
15230 if (UI->getOpcode() == ISD::ADD &&
15231 isa<ConstantSDNode>(UI->getOperand(1)) &&
15232 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15233 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15234 (1ULL << Bits) == 0) {
15235 SDNode *OtherAdd = *UI;
15236 for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15237 VE = OtherAdd->use_end(); VI != VE; ++VI) {
15238 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15239 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15240 return SDValue(*VI, 0);
15248 // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15249 // Expose the vabsduw/h/b opportunity for down stream
15250 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15251 (IID == Intrinsic::ppc_altivec_vmaxsw ||
15252 IID == Intrinsic::ppc_altivec_vmaxsh ||
15253 IID == Intrinsic::ppc_altivec_vmaxsb)) {
15254 SDValue V1 = N->getOperand(1);
15255 SDValue V2 = N->getOperand(2);
15256 if ((V1.getSimpleValueType() == MVT::v4i32 ||
15257 V1.getSimpleValueType() == MVT::v8i16 ||
15258 V1.getSimpleValueType() == MVT::v16i8) &&
15259 V1.getSimpleValueType() == V2.getSimpleValueType()) {
15261 if (V1.getOpcode() == ISD::SUB &&
15262 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15263 V1.getOperand(1) == V2) {
15264 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15267 if (V2.getOpcode() == ISD::SUB &&
15268 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15269 V2.getOperand(1) == V1) {
15270 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15273 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15274 V1.getOperand(0) == V2.getOperand(1) &&
15275 V1.getOperand(1) == V2.getOperand(0)) {
15276 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15283 case ISD::INTRINSIC_W_CHAIN:
15284 // For little endian, VSX loads require generating lxvd2x/xxswapd.
15285 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15286 if (Subtarget.needsSwapsForVSXMemOps()) {
15287 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15290 case Intrinsic::ppc_vsx_lxvw4x:
15291 case Intrinsic::ppc_vsx_lxvd2x:
15292 return expandVSXLoadForLE(N, DCI);
15296 case ISD::INTRINSIC_VOID:
15297 // For little endian, VSX stores require generating xxswapd/stxvd2x.
15298 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15299 if (Subtarget.needsSwapsForVSXMemOps()) {
15300 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15303 case Intrinsic::ppc_vsx_stxvw4x:
15304 case Intrinsic::ppc_vsx_stxvd2x:
15305 return expandVSXStoreForLE(N, DCI);
15310 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15311 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15312 N->getOperand(0).hasOneUse() &&
15313 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15314 (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15315 N->getValueType(0) == MVT::i64))) {
15316 SDValue Load = N->getOperand(0);
15317 LoadSDNode *LD = cast<LoadSDNode>(Load);
15318 // Create the byte-swapping load.
15320 LD->getChain(), // Chain
15321 LD->getBasePtr(), // Ptr
15322 DAG.getValueType(N->getValueType(0)) // VT
15325 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15326 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15327 MVT::i64 : MVT::i32, MVT::Other),
15328 Ops, LD->getMemoryVT(), LD->getMemOperand());
15330 // If this is an i16 load, insert the truncate.
15331 SDValue ResVal = BSLoad;
15332 if (N->getValueType(0) == MVT::i16)
15333 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15335 // First, combine the bswap away. This makes the value produced by the
15337 DCI.CombineTo(N, ResVal);
15339 // Next, combine the load away, we give it a bogus result value but a real
15340 // chain result. The result value is dead because the bswap is dead.
15341 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15343 // Return N so it doesn't get rechecked!
15344 return SDValue(N, 0);
15348 // If a VCMPo node already exists with exactly the same operands as this
15349 // node, use its result instead of this node (VCMPo computes both a CR6 and
15350 // a normal output).
15352 if (!N->getOperand(0).hasOneUse() &&
15353 !N->getOperand(1).hasOneUse() &&
15354 !N->getOperand(2).hasOneUse()) {
15356 // Scan all of the users of the LHS, looking for VCMPo's that match.
15357 SDNode *VCMPoNode = nullptr;
15359 SDNode *LHSN = N->getOperand(0).getNode();
15360 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15362 if (UI->getOpcode() == PPCISD::VCMPo &&
15363 UI->getOperand(1) == N->getOperand(1) &&
15364 UI->getOperand(2) == N->getOperand(2) &&
15365 UI->getOperand(0) == N->getOperand(0)) {
15370 // If there is no VCMPo node, or if the flag value has a single use, don't
15372 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
15375 // Look at the (necessarily single) use of the flag value. If it has a
15376 // chain, this transformation is more complex. Note that multiple things
15377 // could use the value result, which we should ignore.
15378 SDNode *FlagUser = nullptr;
15379 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
15380 FlagUser == nullptr; ++UI) {
15381 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
15382 SDNode *User = *UI;
15383 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15384 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
15391 // If the user is a MFOCRF instruction, we know this is safe.
15392 // Otherwise we give up for right now.
15393 if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15394 return SDValue(VCMPoNode, 0);
15397 case ISD::BRCOND: {
15398 SDValue Cond = N->getOperand(1);
15399 SDValue Target = N->getOperand(2);
15401 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15402 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15403 Intrinsic::loop_decrement) {
15405 // We now need to make the intrinsic dead (it cannot be instruction
15407 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15408 assert(Cond.getNode()->hasOneUse() &&
15409 "Counter decrement has more than one use");
15411 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15412 N->getOperand(0), Target);
15417 // If this is a branch on an altivec predicate comparison, lower this so
15418 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
15419 // lowering is done pre-legalize, because the legalizer lowers the predicate
15420 // compare down to code that is difficult to reassemble.
15421 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15422 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15424 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15425 // value. If so, pass-through the AND to get to the intrinsic.
15426 if (LHS.getOpcode() == ISD::AND &&
15427 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15428 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15429 Intrinsic::loop_decrement &&
15430 isa<ConstantSDNode>(LHS.getOperand(1)) &&
15431 !isNullConstant(LHS.getOperand(1)))
15432 LHS = LHS.getOperand(0);
15434 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15435 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15436 Intrinsic::loop_decrement &&
15437 isa<ConstantSDNode>(RHS)) {
15438 assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15439 "Counter decrement comparison is not EQ or NE");
15441 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15442 bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15443 (CC == ISD::SETNE && !Val);
15445 // We now need to make the intrinsic dead (it cannot be instruction
15447 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15448 assert(LHS.getNode()->hasOneUse() &&
15449 "Counter decrement has more than one use");
15451 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15452 N->getOperand(0), N->getOperand(4));
15458 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15459 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15460 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15461 assert(isDot && "Can't compare against a vector result!");
15463 // If this is a comparison against something other than 0/1, then we know
15464 // that the condition is never/always true.
15465 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15466 if (Val != 0 && Val != 1) {
15467 if (CC == ISD::SETEQ) // Cond never true, remove branch.
15468 return N->getOperand(0);
15469 // Always !=, turn it into an unconditional branch.
15470 return DAG.getNode(ISD::BR, dl, MVT::Other,
15471 N->getOperand(0), N->getOperand(4));
15474 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15476 // Create the PPCISD altivec 'dot' comparison node.
15478 LHS.getOperand(2), // LHS of compare
15479 LHS.getOperand(3), // RHS of compare
15480 DAG.getConstant(CompareOpc, dl, MVT::i32)
15482 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15483 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
15485 // Unpack the result based on how the target uses it.
15486 PPC::Predicate CompOpc;
15487 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15488 default: // Can't happen, don't crash on invalid number though.
15489 case 0: // Branch on the value of the EQ bit of CR6.
15490 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15492 case 1: // Branch on the inverted value of the EQ bit of CR6.
15493 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15495 case 2: // Branch on the value of the LT bit of CR6.
15496 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15498 case 3: // Branch on the inverted value of the LT bit of CR6.
15499 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15503 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15504 DAG.getConstant(CompOpc, dl, MVT::i32),
15505 DAG.getRegister(PPC::CR6, MVT::i32),
15506 N->getOperand(4), CompNode.getValue(1));
15510 case ISD::BUILD_VECTOR:
15511 return DAGCombineBuildVector(N, DCI);
15513 return combineABS(N, DCI);
15515 return combineVSelect(N, DCI);
15522 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15524 SmallVectorImpl<SDNode *> &Created) const {
15525 // fold (sdiv X, pow2)
15526 EVT VT = N->getValueType(0);
15527 if (VT == MVT::i64 && !Subtarget.isPPC64())
15529 if ((VT != MVT::i32 && VT != MVT::i64) ||
15530 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15534 SDValue N0 = N->getOperand(0);
15536 bool IsNegPow2 = (-Divisor).isPowerOf2();
15537 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15538 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15540 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15541 Created.push_back(Op.getNode());
15544 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15545 Created.push_back(Op.getNode());
15551 //===----------------------------------------------------------------------===//
15552 // Inline Assembly Support
15553 //===----------------------------------------------------------------------===//
15555 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15557 const APInt &DemandedElts,
15558 const SelectionDAG &DAG,
15559 unsigned Depth) const {
15561 switch (Op.getOpcode()) {
15563 case PPCISD::LBRX: {
15564 // lhbrx is known to have the top bits cleared out.
15565 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15566 Known.Zero = 0xFFFF0000;
15569 case ISD::INTRINSIC_WO_CHAIN: {
15570 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15572 case Intrinsic::ppc_altivec_vcmpbfp_p:
15573 case Intrinsic::ppc_altivec_vcmpeqfp_p:
15574 case Intrinsic::ppc_altivec_vcmpequb_p:
15575 case Intrinsic::ppc_altivec_vcmpequh_p:
15576 case Intrinsic::ppc_altivec_vcmpequw_p:
15577 case Intrinsic::ppc_altivec_vcmpequd_p:
15578 case Intrinsic::ppc_altivec_vcmpgefp_p:
15579 case Intrinsic::ppc_altivec_vcmpgtfp_p:
15580 case Intrinsic::ppc_altivec_vcmpgtsb_p:
15581 case Intrinsic::ppc_altivec_vcmpgtsh_p:
15582 case Intrinsic::ppc_altivec_vcmpgtsw_p:
15583 case Intrinsic::ppc_altivec_vcmpgtsd_p:
15584 case Intrinsic::ppc_altivec_vcmpgtub_p:
15585 case Intrinsic::ppc_altivec_vcmpgtuh_p:
15586 case Intrinsic::ppc_altivec_vcmpgtuw_p:
15587 case Intrinsic::ppc_altivec_vcmpgtud_p:
15588 Known.Zero = ~1U; // All bits but the low one are known to be zero.
15595 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15596 switch (Subtarget.getCPUDirective()) {
15599 case PPC::DIR_PWR4:
15600 case PPC::DIR_PWR5:
15601 case PPC::DIR_PWR5X:
15602 case PPC::DIR_PWR6:
15603 case PPC::DIR_PWR6X:
15604 case PPC::DIR_PWR7:
15605 case PPC::DIR_PWR8:
15606 case PPC::DIR_PWR9:
15607 case PPC::DIR_PWR10:
15608 case PPC::DIR_PWR_FUTURE: {
15612 if (!DisableInnermostLoopAlign32) {
15613 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15614 // so that we can decrease cache misses and branch-prediction misses.
15615 // Actual alignment of the loop will depend on the hotness check and other
15616 // logic in alignBlocks.
15617 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15621 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15623 // For small loops (between 5 and 8 instructions), align to a 32-byte
15624 // boundary so that the entire loop fits in one instruction-cache line.
15625 uint64_t LoopSize = 0;
15626 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15627 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15628 LoopSize += TII->getInstSizeInBytes(*J);
15633 if (LoopSize > 16 && LoopSize <= 32)
15640 return TargetLowering::getPrefLoopAlignment(ML);
15643 /// getConstraintType - Given a constraint, return the type of
15644 /// constraint it is for this target.
15645 PPCTargetLowering::ConstraintType
15646 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15647 if (Constraint.size() == 1) {
15648 switch (Constraint[0]) {
15656 return C_RegisterClass;
15658 // FIXME: While Z does indicate a memory constraint, it specifically
15659 // indicates an r+r address (used in conjunction with the 'y' modifier
15660 // in the replacement string). Currently, we're forcing the base
15661 // register to be r0 in the asm printer (which is interpreted as zero)
15662 // and forming the complete address in the second register. This is
15666 } else if (Constraint == "wc") { // individual CR bits.
15667 return C_RegisterClass;
15668 } else if (Constraint == "wa" || Constraint == "wd" ||
15669 Constraint == "wf" || Constraint == "ws" ||
15670 Constraint == "wi" || Constraint == "ww") {
15671 return C_RegisterClass; // VSX registers.
15673 return TargetLowering::getConstraintType(Constraint);
15676 /// Examine constraint type and operand type and determine a weight value.
15677 /// This object must already have been set up with the operand type
15678 /// and the current alternative constraint selected.
15679 TargetLowering::ConstraintWeight
15680 PPCTargetLowering::getSingleConstraintMatchWeight(
15681 AsmOperandInfo &info, const char *constraint) const {
15682 ConstraintWeight weight = CW_Invalid;
15683 Value *CallOperandVal = info.CallOperandVal;
15684 // If we don't have a value, we can't do a match,
15685 // but allow it at the lowest weight.
15686 if (!CallOperandVal)
15688 Type *type = CallOperandVal->getType();
15690 // Look at the constraint type.
15691 if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15692 return CW_Register; // an individual CR bit.
15693 else if ((StringRef(constraint) == "wa" ||
15694 StringRef(constraint) == "wd" ||
15695 StringRef(constraint) == "wf") &&
15696 type->isVectorTy())
15697 return CW_Register;
15698 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15699 return CW_Register; // just hold 64-bit integers data.
15700 else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15701 return CW_Register;
15702 else if (StringRef(constraint) == "ww" && type->isFloatTy())
15703 return CW_Register;
15705 switch (*constraint) {
15707 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15710 if (type->isIntegerTy())
15711 weight = CW_Register;
15714 if (type->isFloatTy())
15715 weight = CW_Register;
15718 if (type->isDoubleTy())
15719 weight = CW_Register;
15722 if (type->isVectorTy())
15723 weight = CW_Register;
15726 weight = CW_Register;
15729 weight = CW_Memory;
15735 std::pair<unsigned, const TargetRegisterClass *>
15736 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15737 StringRef Constraint,
15739 if (Constraint.size() == 1) {
15740 // GCC RS6000 Constraint Letters
15741 switch (Constraint[0]) {
15742 case 'b': // R1-R31
15743 if (VT == MVT::i64 && Subtarget.isPPC64())
15744 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15745 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15746 case 'r': // R0-R31
15747 if (VT == MVT::i64 && Subtarget.isPPC64())
15748 return std::make_pair(0U, &PPC::G8RCRegClass);
15749 return std::make_pair(0U, &PPC::GPRCRegClass);
15750 // 'd' and 'f' constraints are both defined to be "the floating point
15751 // registers", where one is for 32-bit and the other for 64-bit. We don't
15752 // really care overly much here so just give them all the same reg classes.
15755 if (Subtarget.hasSPE()) {
15756 if (VT == MVT::f32 || VT == MVT::i32)
15757 return std::make_pair(0U, &PPC::GPRCRegClass);
15758 if (VT == MVT::f64 || VT == MVT::i64)
15759 return std::make_pair(0U, &PPC::SPERCRegClass);
15761 if (VT == MVT::f32 || VT == MVT::i32)
15762 return std::make_pair(0U, &PPC::F4RCRegClass);
15763 if (VT == MVT::f64 || VT == MVT::i64)
15764 return std::make_pair(0U, &PPC::F8RCRegClass);
15765 if (VT == MVT::v4f64 && Subtarget.hasQPX())
15766 return std::make_pair(0U, &PPC::QFRCRegClass);
15767 if (VT == MVT::v4f32 && Subtarget.hasQPX())
15768 return std::make_pair(0U, &PPC::QSRCRegClass);
15772 if (VT == MVT::v4f64 && Subtarget.hasQPX())
15773 return std::make_pair(0U, &PPC::QFRCRegClass);
15774 if (VT == MVT::v4f32 && Subtarget.hasQPX())
15775 return std::make_pair(0U, &PPC::QSRCRegClass);
15776 if (Subtarget.hasAltivec())
15777 return std::make_pair(0U, &PPC::VRRCRegClass);
15780 return std::make_pair(0U, &PPC::CRRCRegClass);
15782 } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15783 // An individual CR bit.
15784 return std::make_pair(0U, &PPC::CRBITRCRegClass);
15785 } else if ((Constraint == "wa" || Constraint == "wd" ||
15786 Constraint == "wf" || Constraint == "wi") &&
15787 Subtarget.hasVSX()) {
15788 return std::make_pair(0U, &PPC::VSRCRegClass);
15789 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15790 if (VT == MVT::f32 && Subtarget.hasP8Vector())
15791 return std::make_pair(0U, &PPC::VSSRCRegClass);
15793 return std::make_pair(0U, &PPC::VSFRCRegClass);
15796 // If we name a VSX register, we can't defer to the base class because it
15797 // will not recognize the correct register (their names will be VSL{0-31}
15798 // and V{0-31} so they won't match). So we match them here.
15799 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15800 int VSNum = atoi(Constraint.data() + 3);
15801 assert(VSNum >= 0 && VSNum <= 63 &&
15802 "Attempted to access a vsr out of range");
15804 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15805 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15807 std::pair<unsigned, const TargetRegisterClass *> R =
15808 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15810 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15811 // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15812 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15814 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15815 // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15816 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15817 PPC::GPRCRegClass.contains(R.first))
15818 return std::make_pair(TRI->getMatchingSuperReg(R.first,
15819 PPC::sub_32, &PPC::G8RCRegClass),
15820 &PPC::G8RCRegClass);
15822 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15823 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15824 R.first = PPC::CR0;
15825 R.second = &PPC::CRRCRegClass;
15831 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15832 /// vector. If it is invalid, don't add anything to Ops.
15833 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15834 std::string &Constraint,
15835 std::vector<SDValue>&Ops,
15836 SelectionDAG &DAG) const {
15839 // Only support length 1 constraints.
15840 if (Constraint.length() > 1) return;
15842 char Letter = Constraint[0];
15853 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15854 if (!CST) return; // Must be an immediate to match.
15856 int64_t Value = CST->getSExtValue();
15857 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15858 // numbers are printed as such.
15860 default: llvm_unreachable("Unknown constraint letter!");
15861 case 'I': // "I" is a signed 16-bit constant.
15862 if (isInt<16>(Value))
15863 Result = DAG.getTargetConstant(Value, dl, TCVT);
15865 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
15866 if (isShiftedUInt<16, 16>(Value))
15867 Result = DAG.getTargetConstant(Value, dl, TCVT);
15869 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
15870 if (isShiftedInt<16, 16>(Value))
15871 Result = DAG.getTargetConstant(Value, dl, TCVT);
15873 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
15874 if (isUInt<16>(Value))
15875 Result = DAG.getTargetConstant(Value, dl, TCVT);
15877 case 'M': // "M" is a constant that is greater than 31.
15879 Result = DAG.getTargetConstant(Value, dl, TCVT);
15881 case 'N': // "N" is a positive constant that is an exact power of two.
15882 if (Value > 0 && isPowerOf2_64(Value))
15883 Result = DAG.getTargetConstant(Value, dl, TCVT);
15885 case 'O': // "O" is the constant zero.
15887 Result = DAG.getTargetConstant(Value, dl, TCVT);
15889 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
15890 if (isInt<16>(-Value))
15891 Result = DAG.getTargetConstant(Value, dl, TCVT);
15898 if (Result.getNode()) {
15899 Ops.push_back(Result);
15903 // Handle standard constraint letters.
15904 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15907 // isLegalAddressingMode - Return true if the addressing mode represented
15908 // by AM is legal for this target, for a load/store of the specified type.
15909 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15910 const AddrMode &AM, Type *Ty,
15911 unsigned AS, Instruction *I) const {
15912 // PPC does not allow r+i addressing modes for vectors!
15913 if (Ty->isVectorTy() && AM.BaseOffs != 0)
15916 // PPC allows a sign-extended 16-bit immediate field.
15917 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15920 // No global is ever allowed as a base.
15924 // PPC only support r+r,
15925 switch (AM.Scale) {
15926 case 0: // "r+i" or just "i", depending on HasBaseReg.
15929 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
15931 // Otherwise we have r+r or r+i.
15934 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
15936 // Allow 2*r as r+r.
15939 // No other scales are supported.
15946 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15947 SelectionDAG &DAG) const {
15948 MachineFunction &MF = DAG.getMachineFunction();
15949 MachineFrameInfo &MFI = MF.getFrameInfo();
15950 MFI.setReturnAddressIsTaken(true);
15952 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15956 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15958 // Make sure the function does not optimize away the store of the RA to
15960 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15961 FuncInfo->setLRStoreRequired();
15962 bool isPPC64 = Subtarget.isPPC64();
15963 auto PtrVT = getPointerTy(MF.getDataLayout());
15966 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15968 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15969 isPPC64 ? MVT::i64 : MVT::i32);
15970 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15971 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15972 MachinePointerInfo());
15975 // Just load the return address off the stack.
15976 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15977 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15978 MachinePointerInfo());
15981 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15982 SelectionDAG &DAG) const {
15984 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15986 MachineFunction &MF = DAG.getMachineFunction();
15987 MachineFrameInfo &MFI = MF.getFrameInfo();
15988 MFI.setFrameAddressIsTaken(true);
15990 EVT PtrVT = getPointerTy(MF.getDataLayout());
15991 bool isPPC64 = PtrVT == MVT::i64;
15993 // Naked functions never have a frame pointer, and so we use r1. For all
15994 // other functions, this decision must be delayed until during PEI.
15996 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15997 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15999 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
16001 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
16004 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
16005 FrameAddr, MachinePointerInfo());
16009 // FIXME? Maybe this could be a TableGen attribute on some registers and
16010 // this table could be generated automatically from RegInfo.
16011 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
16012 const MachineFunction &MF) const {
16013 bool isPPC64 = Subtarget.isPPC64();
16015 bool is64Bit = isPPC64 && VT == LLT::scalar(64);
16016 if (!is64Bit && VT != LLT::scalar(32))
16017 report_fatal_error("Invalid register global variable type");
16019 Register Reg = StringSwitch<Register>(RegName)
16020 .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
16021 .Case("r2", isPPC64 ? Register() : PPC::R2)
16022 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
16023 .Default(Register());
16027 report_fatal_error("Invalid register name global variable");
16030 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
16031 // 32-bit SVR4 ABI access everything as got-indirect.
16032 if (Subtarget.is32BitELFABI())
16035 // AIX accesses everything indirectly through the TOC, which is similar to
16037 if (Subtarget.isAIXABI())
16040 CodeModel::Model CModel = getTargetMachine().getCodeModel();
16041 // If it is small or large code model, module locals are accessed
16042 // indirectly by loading their address from .toc/.got.
16043 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
16046 // JumpTable and BlockAddress are accessed as got-indirect.
16047 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
16050 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
16051 return Subtarget.isGVIndirectSymbol(G->getGlobal());
16057 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
16058 // The PowerPC target isn't yet aware of offsets.
16062 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16064 MachineFunction &MF,
16065 unsigned Intrinsic) const {
16066 switch (Intrinsic) {
16067 case Intrinsic::ppc_qpx_qvlfd:
16068 case Intrinsic::ppc_qpx_qvlfs:
16069 case Intrinsic::ppc_qpx_qvlfcd:
16070 case Intrinsic::ppc_qpx_qvlfcs:
16071 case Intrinsic::ppc_qpx_qvlfiwa:
16072 case Intrinsic::ppc_qpx_qvlfiwz:
16073 case Intrinsic::ppc_altivec_lvx:
16074 case Intrinsic::ppc_altivec_lvxl:
16075 case Intrinsic::ppc_altivec_lvebx:
16076 case Intrinsic::ppc_altivec_lvehx:
16077 case Intrinsic::ppc_altivec_lvewx:
16078 case Intrinsic::ppc_vsx_lxvd2x:
16079 case Intrinsic::ppc_vsx_lxvw4x: {
16081 switch (Intrinsic) {
16082 case Intrinsic::ppc_altivec_lvebx:
16085 case Intrinsic::ppc_altivec_lvehx:
16088 case Intrinsic::ppc_altivec_lvewx:
16091 case Intrinsic::ppc_vsx_lxvd2x:
16094 case Intrinsic::ppc_qpx_qvlfd:
16097 case Intrinsic::ppc_qpx_qvlfs:
16100 case Intrinsic::ppc_qpx_qvlfcd:
16103 case Intrinsic::ppc_qpx_qvlfcs:
16111 Info.opc = ISD::INTRINSIC_W_CHAIN;
16113 Info.ptrVal = I.getArgOperand(0);
16114 Info.offset = -VT.getStoreSize()+1;
16115 Info.size = 2*VT.getStoreSize()-1;
16116 Info.align = Align(1);
16117 Info.flags = MachineMemOperand::MOLoad;
16120 case Intrinsic::ppc_qpx_qvlfda:
16121 case Intrinsic::ppc_qpx_qvlfsa:
16122 case Intrinsic::ppc_qpx_qvlfcda:
16123 case Intrinsic::ppc_qpx_qvlfcsa:
16124 case Intrinsic::ppc_qpx_qvlfiwaa:
16125 case Intrinsic::ppc_qpx_qvlfiwza: {
16127 switch (Intrinsic) {
16128 case Intrinsic::ppc_qpx_qvlfda:
16131 case Intrinsic::ppc_qpx_qvlfsa:
16134 case Intrinsic::ppc_qpx_qvlfcda:
16137 case Intrinsic::ppc_qpx_qvlfcsa:
16145 Info.opc = ISD::INTRINSIC_W_CHAIN;
16147 Info.ptrVal = I.getArgOperand(0);
16149 Info.size = VT.getStoreSize();
16150 Info.align = Align(1);
16151 Info.flags = MachineMemOperand::MOLoad;
16154 case Intrinsic::ppc_qpx_qvstfd:
16155 case Intrinsic::ppc_qpx_qvstfs:
16156 case Intrinsic::ppc_qpx_qvstfcd:
16157 case Intrinsic::ppc_qpx_qvstfcs:
16158 case Intrinsic::ppc_qpx_qvstfiw:
16159 case Intrinsic::ppc_altivec_stvx:
16160 case Intrinsic::ppc_altivec_stvxl:
16161 case Intrinsic::ppc_altivec_stvebx:
16162 case Intrinsic::ppc_altivec_stvehx:
16163 case Intrinsic::ppc_altivec_stvewx:
16164 case Intrinsic::ppc_vsx_stxvd2x:
16165 case Intrinsic::ppc_vsx_stxvw4x: {
16167 switch (Intrinsic) {
16168 case Intrinsic::ppc_altivec_stvebx:
16171 case Intrinsic::ppc_altivec_stvehx:
16174 case Intrinsic::ppc_altivec_stvewx:
16177 case Intrinsic::ppc_vsx_stxvd2x:
16180 case Intrinsic::ppc_qpx_qvstfd:
16183 case Intrinsic::ppc_qpx_qvstfs:
16186 case Intrinsic::ppc_qpx_qvstfcd:
16189 case Intrinsic::ppc_qpx_qvstfcs:
16197 Info.opc = ISD::INTRINSIC_VOID;
16199 Info.ptrVal = I.getArgOperand(1);
16200 Info.offset = -VT.getStoreSize()+1;
16201 Info.size = 2*VT.getStoreSize()-1;
16202 Info.align = Align(1);
16203 Info.flags = MachineMemOperand::MOStore;
16206 case Intrinsic::ppc_qpx_qvstfda:
16207 case Intrinsic::ppc_qpx_qvstfsa:
16208 case Intrinsic::ppc_qpx_qvstfcda:
16209 case Intrinsic::ppc_qpx_qvstfcsa:
16210 case Intrinsic::ppc_qpx_qvstfiwa: {
16212 switch (Intrinsic) {
16213 case Intrinsic::ppc_qpx_qvstfda:
16216 case Intrinsic::ppc_qpx_qvstfsa:
16219 case Intrinsic::ppc_qpx_qvstfcda:
16222 case Intrinsic::ppc_qpx_qvstfcsa:
16230 Info.opc = ISD::INTRINSIC_VOID;
16232 Info.ptrVal = I.getArgOperand(1);
16234 Info.size = VT.getStoreSize();
16235 Info.align = Align(1);
16236 Info.flags = MachineMemOperand::MOStore;
16246 /// It returns EVT::Other if the type should be determined using generic
16247 /// target-independent logic.
16248 EVT PPCTargetLowering::getOptimalMemOpType(
16249 const MemOp &Op, const AttributeList &FuncAttributes) const {
16250 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16251 // When expanding a memset, require at least two QPX instructions to cover
16252 // the cost of loading the value to be stored from the constant pool.
16253 if (Subtarget.hasQPX() && Op.size() >= 32 &&
16254 (Op.isMemcpy() || Op.size() >= 64) && Op.isAligned(Align(32)) &&
16255 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
16259 // We should use Altivec/VSX loads and stores when available. For unaligned
16260 // addresses, unaligned VSX loads are only fast starting with the P8.
16261 if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16262 (Op.isAligned(Align(16)) ||
16263 ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16267 if (Subtarget.isPPC64()) {
16274 /// Returns true if it is beneficial to convert a load of a constant
16275 /// to just the constant itself.
16276 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16278 assert(Ty->isIntegerTy());
16280 unsigned BitSize = Ty->getPrimitiveSizeInBits();
16281 return !(BitSize == 0 || BitSize > 64);
16284 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16285 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16287 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16288 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16289 return NumBits1 == 64 && NumBits2 == 32;
16292 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16293 if (!VT1.isInteger() || !VT2.isInteger())
16295 unsigned NumBits1 = VT1.getSizeInBits();
16296 unsigned NumBits2 = VT2.getSizeInBits();
16297 return NumBits1 == 64 && NumBits2 == 32;
16300 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16301 // Generally speaking, zexts are not free, but they are free when they can be
16302 // folded with other operations.
16303 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16304 EVT MemVT = LD->getMemoryVT();
16305 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16306 (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16307 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16308 LD->getExtensionType() == ISD::ZEXTLOAD))
16312 // FIXME: Add other cases...
16313 // - 32-bit shifts with a zext to i64
16314 // - zext after ctlz, bswap, etc.
16315 // - zext after and by a constant mask
16317 return TargetLowering::isZExtFree(Val, VT2);
16320 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16321 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16322 "invalid fpext types");
16323 // Extending to float128 is not free.
16324 if (DestVT == MVT::f128)
16329 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16330 return isInt<16>(Imm) || isUInt<16>(Imm);
16333 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16334 return isInt<16>(Imm) || isUInt<16>(Imm);
16337 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
16340 MachineMemOperand::Flags,
16341 bool *Fast) const {
16342 if (DisablePPCUnaligned)
16345 // PowerPC supports unaligned memory access for simple non-vector types.
16346 // Although accessing unaligned addresses is not as efficient as accessing
16347 // aligned addresses, it is generally more efficient than manual expansion,
16348 // and generally only traps for software emulation when crossing page
16351 if (!VT.isSimple())
16354 if (VT.isFloatingPoint() && !VT.isVector() &&
16355 !Subtarget.allowsUnalignedFPAccess())
16358 if (VT.getSimpleVT().isVector()) {
16359 if (Subtarget.hasVSX()) {
16360 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16361 VT != MVT::v4f32 && VT != MVT::v4i32)
16368 if (VT == MVT::ppcf128)
16377 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16379 return isFMAFasterThanFMulAndFAdd(
16380 MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16383 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16385 switch (Ty->getScalarType()->getTypeID()) {
16386 case Type::FloatTyID:
16387 case Type::DoubleTyID:
16389 case Type::FP128TyID:
16390 return Subtarget.hasP9Vector();
16396 // Currently this is a copy from AArch64TargetLowering::isProfitableToHoist.
16397 // FIXME: add more patterns which are profitable to hoist.
16398 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16399 if (I->getOpcode() != Instruction::FMul)
16402 if (!I->hasOneUse())
16405 Instruction *User = I->user_back();
16406 assert(User && "A single use instruction with no uses.");
16408 if (User->getOpcode() != Instruction::FSub &&
16409 User->getOpcode() != Instruction::FAdd)
16412 const TargetOptions &Options = getTargetMachine().Options;
16413 const Function *F = I->getFunction();
16414 const DataLayout &DL = F->getParent()->getDataLayout();
16415 Type *Ty = User->getOperand(0)->getType();
16418 isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16419 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16420 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16424 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16425 // LR is a callee-save register, but we must treat it as clobbered by any call
16426 // site. Hence we include LR in the scratch registers, which are in turn added
16427 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16428 // to CTR, which is used by any indirect call.
16429 static const MCPhysReg ScratchRegs[] = {
16430 PPC::X12, PPC::LR8, PPC::CTR8, 0
16433 return ScratchRegs;
16436 Register PPCTargetLowering::getExceptionPointerRegister(
16437 const Constant *PersonalityFn) const {
16438 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16441 Register PPCTargetLowering::getExceptionSelectorRegister(
16442 const Constant *PersonalityFn) const {
16443 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16447 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16448 EVT VT , unsigned DefinedValues) const {
16449 if (VT == MVT::v2i64)
16450 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16452 if (Subtarget.hasVSX() || Subtarget.hasQPX())
16455 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16458 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16459 if (DisableILPPref || Subtarget.enableMachineScheduler())
16460 return TargetLowering::getSchedulingPreference(N);
16465 // Create a fast isel object.
16467 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16468 const TargetLibraryInfo *LibInfo) const {
16469 return PPC::createFastISel(FuncInfo, LibInfo);
16472 // 'Inverted' means the FMA opcode after negating one multiplicand.
16473 // For example, (fma -a b c) = (fnmsub a b c)
16474 static unsigned invertFMAOpcode(unsigned Opc) {
16477 llvm_unreachable("Invalid FMA opcode for PowerPC!");
16479 return PPCISD::FNMSUB;
16480 case PPCISD::FNMSUB:
16485 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16486 bool LegalOps, bool OptForSize,
16487 NegatibleCost &Cost,
16488 unsigned Depth) const {
16489 if (Depth > SelectionDAG::MaxRecursionDepth)
16492 unsigned Opc = Op.getOpcode();
16493 EVT VT = Op.getValueType();
16494 SDNodeFlags Flags = Op.getNode()->getFlags();
16497 case PPCISD::FNMSUB:
16498 // TODO: QPX subtarget is deprecated. No transformation here.
16499 if (!Op.hasOneUse() || !isTypeLegal(VT) || Subtarget.hasQPX())
16502 const TargetOptions &Options = getTargetMachine().Options;
16503 SDValue N0 = Op.getOperand(0);
16504 SDValue N1 = Op.getOperand(1);
16505 SDValue N2 = Op.getOperand(2);
16508 NegatibleCost N2Cost = NegatibleCost::Expensive;
16510 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16515 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16516 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16517 // These transformations may change sign of zeroes. For example,
16518 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16519 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16520 // Try and choose the cheaper one to negate.
16521 NegatibleCost N0Cost = NegatibleCost::Expensive;
16522 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16523 N0Cost, Depth + 1);
16525 NegatibleCost N1Cost = NegatibleCost::Expensive;
16526 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16527 N1Cost, Depth + 1);
16529 if (NegN0 && N0Cost <= N1Cost) {
16530 Cost = std::min(N0Cost, N2Cost);
16531 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16532 } else if (NegN1) {
16533 Cost = std::min(N1Cost, N2Cost);
16534 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16538 // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16539 if (isOperationLegal(ISD::FMA, VT)) {
16541 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16547 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16551 // Override to enable LOAD_STACK_GUARD lowering on Linux.
16552 bool PPCTargetLowering::useLoadStackGuardNode() const {
16553 if (!Subtarget.isTargetLinux())
16554 return TargetLowering::useLoadStackGuardNode();
16558 // Override to disable global variable loading on Linux.
16559 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16560 if (!Subtarget.isTargetLinux())
16561 return TargetLowering::insertSSPDeclarations(M);
16564 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16565 bool ForCodeSize) const {
16566 if (!VT.isSimple() || !Subtarget.hasVSX())
16569 switch(VT.getSimpleVT().SimpleTy) {
16571 // For FP types that are currently not supported by PPC backend, return
16572 // false. Examples: f16, f80.
16576 if (Subtarget.hasPrefixInstrs()) {
16577 // With prefixed instructions, we can materialize anything that can be
16578 // represented with a 32-bit immediate, not just positive zero.
16579 APFloat APFloatOfImm = Imm;
16580 return convertToNonDenormSingle(APFloatOfImm);
16584 return Imm.isPosZero();
16588 // For vector shift operation op, fold
16589 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16590 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16591 SelectionDAG &DAG) {
16592 SDValue N0 = N->getOperand(0);
16593 SDValue N1 = N->getOperand(1);
16594 EVT VT = N0.getValueType();
16595 unsigned OpSizeInBits = VT.getScalarSizeInBits();
16596 unsigned Opcode = N->getOpcode();
16597 unsigned TargetOpcode;
16601 llvm_unreachable("Unexpected shift operation");
16603 TargetOpcode = PPCISD::SHL;
16606 TargetOpcode = PPCISD::SRL;
16609 TargetOpcode = PPCISD::SRA;
16613 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16614 N1->getOpcode() == ISD::AND)
16615 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16616 if (Mask->getZExtValue() == OpSizeInBits - 1)
16617 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16622 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16623 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16626 SDValue N0 = N->getOperand(0);
16627 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16628 if (!Subtarget.isISA3_0() ||
16629 N0.getOpcode() != ISD::SIGN_EXTEND ||
16630 N0.getOperand(0).getValueType() != MVT::i32 ||
16631 CN1 == nullptr || N->getValueType(0) != MVT::i64)
16634 // We can't save an operation here if the value is already extended, and
16635 // the existing shift is easier to combine.
16636 SDValue ExtsSrc = N0.getOperand(0);
16637 if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16638 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16642 SDValue ShiftBy = SDValue(CN1, 0);
16643 // We want the shift amount to be i32 on the extswli, but the shift could
16645 if (ShiftBy.getValueType() == MVT::i64)
16646 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16648 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16652 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16653 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16659 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16660 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16666 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16667 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16668 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16669 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16670 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16671 const PPCSubtarget &Subtarget) {
16672 if (!Subtarget.isPPC64())
16675 SDValue LHS = N->getOperand(0);
16676 SDValue RHS = N->getOperand(1);
16678 auto isZextOfCompareWithConstant = [](SDValue Op) {
16679 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16680 Op.getValueType() != MVT::i64)
16683 SDValue Cmp = Op.getOperand(0);
16684 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16685 Cmp.getOperand(0).getValueType() != MVT::i64)
16688 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16689 int64_t NegConstant = 0 - Constant->getSExtValue();
16690 // Due to the limitations of the addi instruction,
16691 // -C is required to be [-32768, 32767].
16692 return isInt<16>(NegConstant);
16698 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16699 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16701 // If there is a pattern, canonicalize a zext operand to the RHS.
16702 if (LHSHasPattern && !RHSHasPattern)
16703 std::swap(LHS, RHS);
16704 else if (!LHSHasPattern && !RHSHasPattern)
16708 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16709 SDValue Cmp = RHS.getOperand(0);
16710 SDValue Z = Cmp.getOperand(0);
16711 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16713 assert(Constant && "Constant Should not be a null pointer.");
16714 int64_t NegConstant = 0 - Constant->getSExtValue();
16716 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16720 // --> addze X, (addic Z, -1).carry
16722 // add X, (zext(setne Z, C))--
16723 // \ when -32768 <= -C <= 32767 && C != 0
16724 // --> addze X, (addic (addi Z, -C), -1).carry
16725 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16726 DAG.getConstant(NegConstant, DL, MVT::i64));
16727 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16728 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16729 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16730 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16731 SDValue(Addc.getNode(), 1));
16735 // --> addze X, (subfic Z, 0).carry
16737 // add X, (zext(sete Z, C))--
16738 // \ when -32768 <= -C <= 32767 && C != 0
16739 // --> addze X, (subfic (addi Z, -C), 0).carry
16740 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16741 DAG.getConstant(NegConstant, DL, MVT::i64));
16742 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16743 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16744 DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16745 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16746 SDValue(Subc.getNode(), 1));
16754 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16755 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16756 // In this case both C1 and C2 must be known constants.
16757 // C1+C2 must fit into a 34 bit signed integer.
16758 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16759 const PPCSubtarget &Subtarget) {
16760 if (!Subtarget.isUsingPCRelativeCalls())
16763 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16764 // If we find that node try to cast the Global Address and the Constant.
16765 SDValue LHS = N->getOperand(0);
16766 SDValue RHS = N->getOperand(1);
16768 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16769 std::swap(LHS, RHS);
16771 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16774 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16775 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16776 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16778 // Check that both casts succeeded.
16779 if (!GSDN || !ConstNode)
16782 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16785 // The signed int offset needs to fit in 34 bits.
16786 if (!isInt<34>(NewOffset))
16789 // The new global address is a copy of the old global address except
16790 // that it has the updated Offset.
16792 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16793 NewOffset, GSDN->getTargetFlags());
16795 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16799 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16800 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16803 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16809 // Detect TRUNCATE operations on bitcasts of float128 values.
16810 // What we are looking for here is the situtation where we extract a subset
16811 // of bits from a 128 bit float.
16812 // This can be of two forms:
16813 // 1) BITCAST of f128 feeding TRUNCATE
16814 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16815 // The reason this is required is because we do not have a legal i128 type
16816 // and so we want to prevent having to store the f128 and then reload part
16818 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16819 DAGCombinerInfo &DCI) const {
16820 // If we are using CRBits then try that first.
16821 if (Subtarget.useCRBits()) {
16822 // Check if CRBits did anything and return that if it did.
16823 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16824 return CRTruncValue;
16828 SDValue Op0 = N->getOperand(0);
16830 // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16831 if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16832 EVT VT = N->getValueType(0);
16833 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16835 SDValue Sub = Op0.getOperand(0);
16836 if (Sub.getOpcode() == ISD::SUB) {
16837 SDValue SubOp0 = Sub.getOperand(0);
16838 SDValue SubOp1 = Sub.getOperand(1);
16839 if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16840 (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16841 return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16842 SubOp1.getOperand(0),
16843 DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16848 // Looking for a truncate of i128 to i64.
16849 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16852 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16854 // SRL feeding TRUNCATE.
16855 if (Op0.getOpcode() == ISD::SRL) {
16856 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16857 // The right shift has to be by 64 bits.
16858 if (!ConstNode || ConstNode->getZExtValue() != 64)
16861 // Switch the element number to extract.
16862 EltToExtract = EltToExtract ? 0 : 1;
16863 // Update Op0 past the SRL.
16864 Op0 = Op0.getOperand(0);
16867 // BITCAST feeding a TRUNCATE possibly via SRL.
16868 if (Op0.getOpcode() == ISD::BITCAST &&
16869 Op0.getValueType() == MVT::i128 &&
16870 Op0.getOperand(0).getValueType() == MVT::f128) {
16871 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16872 return DCI.DAG.getNode(
16873 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16874 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16879 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16880 SelectionDAG &DAG = DCI.DAG;
16882 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16883 if (!ConstOpOrElement)
16886 // An imul is usually smaller than the alternative sequence for legal type.
16887 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16888 isOperationLegal(ISD::MUL, N->getValueType(0)))
16891 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16892 switch (this->Subtarget.getCPUDirective()) {
16894 // TODO: enhance the condition for subtarget before pwr8
16896 case PPC::DIR_PWR8:
16897 // type mul add shl
16901 case PPC::DIR_PWR9:
16902 case PPC::DIR_PWR10:
16903 case PPC::DIR_PWR_FUTURE:
16904 // type mul add shl
16908 // The cycle RATIO of related operations are showed as a table above.
16909 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16910 // scalar and vector type. For 2 instrs patterns, add/sub + shl
16911 // are 4, it is always profitable; but for 3 instrs patterns
16912 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16913 // So we should only do it for vector type.
16914 return IsAddOne && IsNeg ? VT.isVector() : true;
16918 EVT VT = N->getValueType(0);
16921 const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16922 bool IsNeg = MulAmt.isNegative();
16923 APInt MulAmtAbs = MulAmt.abs();
16925 if ((MulAmtAbs - 1).isPowerOf2()) {
16926 // (mul x, 2^N + 1) => (add (shl x, N), x)
16927 // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16929 if (!IsProfitable(IsNeg, true, VT))
16932 SDValue Op0 = N->getOperand(0);
16934 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16935 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16936 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16941 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16942 } else if ((MulAmtAbs + 1).isPowerOf2()) {
16943 // (mul x, 2^N - 1) => (sub (shl x, N), x)
16944 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16946 if (!IsProfitable(IsNeg, false, VT))
16949 SDValue Op0 = N->getOperand(0);
16951 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16952 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16955 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16957 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16964 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16965 // in combiner since we need to check SD flags and other subtarget features.
16966 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16967 DAGCombinerInfo &DCI) const {
16968 SDValue N0 = N->getOperand(0);
16969 SDValue N1 = N->getOperand(1);
16970 SDValue N2 = N->getOperand(2);
16971 SDNodeFlags Flags = N->getFlags();
16972 EVT VT = N->getValueType(0);
16973 SelectionDAG &DAG = DCI.DAG;
16974 const TargetOptions &Options = getTargetMachine().Options;
16975 unsigned Opc = N->getOpcode();
16976 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16977 bool LegalOps = !DCI.isBeforeLegalizeOps();
16980 // TODO: QPX subtarget is deprecated. No transformation here.
16981 if (Subtarget.hasQPX() || !isOperationLegal(ISD::FMA, VT))
16984 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16985 // since (fnmsub a b c)=-0 while c-ab=+0.
16986 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16989 // (fma (fneg a) b c) => (fnmsub a b c)
16990 // (fnmsub (fneg a) b c) => (fma a b c)
16991 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16992 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16994 // (fma a (fneg b) c) => (fnmsub a b c)
16995 // (fnmsub a (fneg b) c) => (fma a b c)
16996 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16997 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
17002 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
17003 // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
17004 if (!Subtarget.is64BitELFABI())
17007 // If not a tail call then no need to proceed.
17008 if (!CI->isTailCall())
17011 // If sibling calls have been disabled and tail-calls aren't guaranteed
17012 // there is no reason to duplicate.
17013 auto &TM = getTargetMachine();
17014 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
17017 // Can't tail call a function called indirectly, or if it has variadic args.
17018 const Function *Callee = CI->getCalledFunction();
17019 if (!Callee || Callee->isVarArg())
17022 // Make sure the callee and caller calling conventions are eligible for tco.
17023 const Function *Caller = CI->getParent()->getParent();
17024 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
17025 CI->getCallingConv()))
17028 // If the function is local then we have a good chance at tail-calling it
17029 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
17032 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
17033 if (!Subtarget.hasVSX())
17035 if (Subtarget.hasP9Vector() && VT == MVT::f128)
17037 return VT == MVT::f32 || VT == MVT::f64 ||
17038 VT == MVT::v4f32 || VT == MVT::v2f64;
17041 bool PPCTargetLowering::
17042 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
17043 const Value *Mask = AndI.getOperand(1);
17044 // If the mask is suitable for andi. or andis. we should sink the and.
17045 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
17046 // Can't handle constants wider than 64-bits.
17047 if (CI->getBitWidth() > 64)
17049 int64_t ConstVal = CI->getZExtValue();
17050 return isUInt<16>(ConstVal) ||
17051 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
17054 // For non-constant masks, we can always use the record-form and.
17058 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
17059 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
17060 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
17061 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
17062 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
17063 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
17064 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
17065 assert(Subtarget.hasP9Altivec() &&
17066 "Only combine this when P9 altivec supported!");
17067 EVT VT = N->getValueType(0);
17068 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17071 SelectionDAG &DAG = DCI.DAG;
17073 if (N->getOperand(0).getOpcode() == ISD::SUB) {
17074 // Even for signed integers, if it's known to be positive (as signed
17075 // integer) due to zero-extended inputs.
17076 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
17077 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
17078 if ((SubOpcd0 == ISD::ZERO_EXTEND ||
17079 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
17080 (SubOpcd1 == ISD::ZERO_EXTEND ||
17081 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
17082 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17083 N->getOperand(0)->getOperand(0),
17084 N->getOperand(0)->getOperand(1),
17085 DAG.getTargetConstant(0, dl, MVT::i32));
17088 // For type v4i32, it can be optimized with xvnegsp + vabsduw
17089 if (N->getOperand(0).getValueType() == MVT::v4i32 &&
17090 N->getOperand(0).hasOneUse()) {
17091 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17092 N->getOperand(0)->getOperand(0),
17093 N->getOperand(0)->getOperand(1),
17094 DAG.getTargetConstant(1, dl, MVT::i32));
17101 // For type v4i32/v8ii16/v16i8, transform
17102 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
17103 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
17104 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
17105 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
17106 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
17107 DAGCombinerInfo &DCI) const {
17108 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
17109 assert(Subtarget.hasP9Altivec() &&
17110 "Only combine this when P9 altivec supported!");
17112 SelectionDAG &DAG = DCI.DAG;
17114 SDValue Cond = N->getOperand(0);
17115 SDValue TrueOpnd = N->getOperand(1);
17116 SDValue FalseOpnd = N->getOperand(2);
17117 EVT VT = N->getOperand(1).getValueType();
17119 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
17120 FalseOpnd.getOpcode() != ISD::SUB)
17123 // ABSD only available for type v4i32/v8i16/v16i8
17124 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17127 // At least to save one more dependent computation
17128 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
17131 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
17133 // Can only handle unsigned comparison here
17142 std::swap(TrueOpnd, FalseOpnd);
17146 SDValue CmpOpnd1 = Cond.getOperand(0);
17147 SDValue CmpOpnd2 = Cond.getOperand(1);
17149 // SETCC CmpOpnd1 CmpOpnd2 cond
17150 // TrueOpnd = CmpOpnd1 - CmpOpnd2
17151 // FalseOpnd = CmpOpnd2 - CmpOpnd1
17152 if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17153 TrueOpnd.getOperand(1) == CmpOpnd2 &&
17154 FalseOpnd.getOperand(0) == CmpOpnd2 &&
17155 FalseOpnd.getOperand(1) == CmpOpnd1) {
17156 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17157 CmpOpnd1, CmpOpnd2,
17158 DAG.getTargetConstant(0, dl, MVT::i32));