1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the PPCISelLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallSite.h"
59 #include "llvm/IR/CallingConv.h"
60 #include "llvm/IR/Constant.h"
61 #include "llvm/IR/Constants.h"
62 #include "llvm/IR/DataLayout.h"
63 #include "llvm/IR/DebugLoc.h"
64 #include "llvm/IR/DerivedTypes.h"
65 #include "llvm/IR/Function.h"
66 #include "llvm/IR/GlobalValue.h"
67 #include "llvm/IR/IRBuilder.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/IntrinsicsPowerPC.h"
71 #include "llvm/IR/Module.h"
72 #include "llvm/IR/Type.h"
73 #include "llvm/IR/Use.h"
74 #include "llvm/IR/Value.h"
75 #include "llvm/MC/MCContext.h"
76 #include "llvm/MC/MCExpr.h"
77 #include "llvm/MC/MCRegisterInfo.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
102 using namespace llvm;
104 #define DEBUG_TYPE "ppc-lowering"
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
121 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision",
122 cl::desc("enable quad precision float support on ppc"), cl::Hidden);
124 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
125 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
127 STATISTIC(NumTailCalls, "Number of tail calls");
128 STATISTIC(NumSiblingCalls, "Number of sibling calls");
130 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
132 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
134 // FIXME: Remove this once the bug has been fixed!
135 extern cl::opt<bool> ANDIGlueBug;
137 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
138 const PPCSubtarget &STI)
139 : TargetLowering(TM), Subtarget(STI) {
140 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
141 // arguments are at least 4/8 bytes aligned.
142 bool isPPC64 = Subtarget.isPPC64();
143 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
145 // Set up the register classes.
146 addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
147 if (!useSoftFloat()) {
149 addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
150 addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
152 addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
153 addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
157 // Match BITREVERSE to customized fast code sequence in the td file.
158 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
159 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
161 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
162 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
164 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
165 for (MVT VT : MVT::integer_valuetypes()) {
166 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
170 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
172 // PowerPC has pre-inc load and store's.
173 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
174 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
175 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
176 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
177 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
178 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
179 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
180 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
181 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
182 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
183 if (!Subtarget.hasSPE()) {
184 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
185 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
186 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
187 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
190 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
191 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
192 for (MVT VT : ScalarIntVTs) {
193 setOperationAction(ISD::ADDC, VT, Legal);
194 setOperationAction(ISD::ADDE, VT, Legal);
195 setOperationAction(ISD::SUBC, VT, Legal);
196 setOperationAction(ISD::SUBE, VT, Legal);
199 if (Subtarget.useCRBits()) {
200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
202 if (isPPC64 || Subtarget.hasFPCVT()) {
203 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
204 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
205 isPPC64 ? MVT::i64 : MVT::i32);
206 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
207 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
208 isPPC64 ? MVT::i64 : MVT::i32);
210 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
211 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
214 // PowerPC does not support direct load/store of condition registers.
215 setOperationAction(ISD::LOAD, MVT::i1, Custom);
216 setOperationAction(ISD::STORE, MVT::i1, Custom);
218 // FIXME: Remove this once the ANDI glue bug is fixed:
220 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
222 for (MVT VT : MVT::integer_valuetypes()) {
223 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
224 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
225 setTruncStoreAction(VT, MVT::i1, Expand);
228 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
231 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
232 // PPC (the libcall is not available).
233 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
234 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
236 // We do not currently implement these libm ops for PowerPC.
237 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
238 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
239 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
240 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
241 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
242 setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
244 // PowerPC has no SREM/UREM instructions unless we are on P9
245 // On P9 we may use a hardware instruction to compute the remainder.
246 // The instructions are not legalized directly because in the cases where the
247 // result of both the remainder and the division is required it is more
248 // efficient to compute the remainder from the result of the division rather
249 // than use the remainder instruction.
250 if (Subtarget.isISA3_0()) {
251 setOperationAction(ISD::SREM, MVT::i32, Custom);
252 setOperationAction(ISD::UREM, MVT::i32, Custom);
253 setOperationAction(ISD::SREM, MVT::i64, Custom);
254 setOperationAction(ISD::UREM, MVT::i64, Custom);
256 setOperationAction(ISD::SREM, MVT::i32, Expand);
257 setOperationAction(ISD::UREM, MVT::i32, Expand);
258 setOperationAction(ISD::SREM, MVT::i64, Expand);
259 setOperationAction(ISD::UREM, MVT::i64, Expand);
262 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
263 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
264 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
265 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
266 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
267 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
268 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
269 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
270 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
272 // We don't support sin/cos/sqrt/fmod/pow
273 setOperationAction(ISD::FSIN , MVT::f64, Expand);
274 setOperationAction(ISD::FCOS , MVT::f64, Expand);
275 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
276 setOperationAction(ISD::FREM , MVT::f64, Expand);
277 setOperationAction(ISD::FPOW , MVT::f64, Expand);
278 setOperationAction(ISD::FSIN , MVT::f32, Expand);
279 setOperationAction(ISD::FCOS , MVT::f32, Expand);
280 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
281 setOperationAction(ISD::FREM , MVT::f32, Expand);
282 setOperationAction(ISD::FPOW , MVT::f32, Expand);
283 if (Subtarget.hasSPE()) {
284 setOperationAction(ISD::FMA , MVT::f64, Expand);
285 setOperationAction(ISD::FMA , MVT::f32, Expand);
287 setOperationAction(ISD::FMA , MVT::f64, Legal);
288 setOperationAction(ISD::FMA , MVT::f32, Legal);
291 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
293 // If we're enabling GP optimizations, use hardware square root
294 if (!Subtarget.hasFSQRT() &&
295 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
297 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
299 if (!Subtarget.hasFSQRT() &&
300 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
301 Subtarget.hasFRES()))
302 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
304 if (Subtarget.hasFCPSGN()) {
305 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
306 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
308 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
309 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
312 if (Subtarget.hasFPRND()) {
313 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
314 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
315 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
316 setOperationAction(ISD::FROUND, MVT::f64, Legal);
318 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
319 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
320 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
321 setOperationAction(ISD::FROUND, MVT::f32, Legal);
324 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
325 // to speed up scalar BSWAP64.
326 // CTPOP or CTTZ were introduced in P8/P9 respectively
327 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
328 if (Subtarget.hasP9Vector())
329 setOperationAction(ISD::BSWAP, MVT::i64 , Custom);
331 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
332 if (Subtarget.isISA3_0()) {
333 setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
334 setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
336 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
337 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
340 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
341 setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
342 setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
344 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
345 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
348 // PowerPC does not have ROTR
349 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
350 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
352 if (!Subtarget.useCRBits()) {
353 // PowerPC does not have Select
354 setOperationAction(ISD::SELECT, MVT::i32, Expand);
355 setOperationAction(ISD::SELECT, MVT::i64, Expand);
356 setOperationAction(ISD::SELECT, MVT::f32, Expand);
357 setOperationAction(ISD::SELECT, MVT::f64, Expand);
360 // PowerPC wants to turn select_cc of FP into fsel when possible.
361 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
362 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
364 // PowerPC wants to optimize integer setcc a bit
365 if (!Subtarget.useCRBits())
366 setOperationAction(ISD::SETCC, MVT::i32, Custom);
368 // PowerPC does not have BRCOND which requires SetCC
369 if (!Subtarget.useCRBits())
370 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
372 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
374 if (Subtarget.hasSPE()) {
375 // SPE has built-in conversions
376 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
377 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
378 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
380 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
381 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
383 // PowerPC does not have [U|S]INT_TO_FP
384 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
385 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
388 if (Subtarget.hasDirectMove() && isPPC64) {
389 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
390 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
391 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
392 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
393 if (TM.Options.UnsafeFPMath) {
394 setOperationAction(ISD::LRINT, MVT::f64, Legal);
395 setOperationAction(ISD::LRINT, MVT::f32, Legal);
396 setOperationAction(ISD::LLRINT, MVT::f64, Legal);
397 setOperationAction(ISD::LLRINT, MVT::f32, Legal);
398 setOperationAction(ISD::LROUND, MVT::f64, Legal);
399 setOperationAction(ISD::LROUND, MVT::f32, Legal);
400 setOperationAction(ISD::LLROUND, MVT::f64, Legal);
401 setOperationAction(ISD::LLROUND, MVT::f32, Legal);
404 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
405 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
406 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
407 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
410 // We cannot sextinreg(i1). Expand to shifts.
411 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
413 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
414 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
415 // support continuation, user-level threading, and etc.. As a result, no
416 // other SjLj exception interfaces are implemented and please don't build
417 // your own exception handling based on them.
418 // LLVM/Clang supports zero-cost DWARF exception handling.
419 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
420 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
422 // We want to legalize GlobalAddress and ConstantPool nodes into the
423 // appropriate instructions to materialize the address.
424 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
425 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
426 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
427 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
428 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
429 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
430 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
431 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
432 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
433 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
436 setOperationAction(ISD::TRAP, MVT::Other, Legal);
438 // TRAMPOLINE is custom lowered.
439 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
440 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
442 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
443 setOperationAction(ISD::VASTART , MVT::Other, Custom);
445 if (Subtarget.is64BitELFABI()) {
446 // VAARG always uses double-word chunks, so promote anything smaller.
447 setOperationAction(ISD::VAARG, MVT::i1, Promote);
448 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
449 setOperationAction(ISD::VAARG, MVT::i8, Promote);
450 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
451 setOperationAction(ISD::VAARG, MVT::i16, Promote);
452 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
453 setOperationAction(ISD::VAARG, MVT::i32, Promote);
454 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
455 setOperationAction(ISD::VAARG, MVT::Other, Expand);
456 } else if (Subtarget.is32BitELFABI()) {
457 // VAARG is custom lowered with the 32-bit SVR4 ABI.
458 setOperationAction(ISD::VAARG, MVT::Other, Custom);
459 setOperationAction(ISD::VAARG, MVT::i64, Custom);
461 setOperationAction(ISD::VAARG, MVT::Other, Expand);
463 // VACOPY is custom lowered with the 32-bit SVR4 ABI.
464 if (Subtarget.is32BitELFABI())
465 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
467 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
469 // Use the default implementation.
470 setOperationAction(ISD::VAEND , MVT::Other, Expand);
471 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
472 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
473 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
474 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
475 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
476 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
477 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
478 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
480 // We want to custom lower some of our intrinsics.
481 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
483 // To handle counter-based loop conditions.
484 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
486 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
487 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
488 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
489 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
491 // Comparisons that require checking two conditions.
492 if (Subtarget.hasSPE()) {
493 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
494 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
495 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
496 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
498 setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
499 setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
500 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
501 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
502 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
503 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
504 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
505 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
506 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
507 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
508 setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
509 setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
511 if (Subtarget.has64BitSupport()) {
512 // They also have instructions for converting between i64 and fp.
513 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
514 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
515 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
516 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
517 // This is just the low 32 bits of a (signed) fp->i64 conversion.
518 // We cannot do this with Promote because i64 is not a legal type.
519 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
521 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
522 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
524 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
525 if (Subtarget.hasSPE())
526 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
528 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
531 // With the instructions enabled under FPCVT, we can do everything.
532 if (Subtarget.hasFPCVT()) {
533 if (Subtarget.has64BitSupport()) {
534 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
535 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
536 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
537 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
540 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
541 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
542 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
543 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
546 if (Subtarget.use64BitRegs()) {
547 // 64-bit PowerPC implementations can support i64 types directly
548 addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
549 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
550 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
551 // 64-bit PowerPC wants to expand i128 shifts itself.
552 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
553 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
554 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
556 // 32-bit PowerPC wants to expand i64 shifts itself.
557 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
558 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
559 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
562 if (Subtarget.hasVSX()) {
563 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
564 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
565 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
566 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
569 if (Subtarget.hasAltivec()) {
570 // First set operation action for all vector types to expand. Then we
571 // will selectively turn on ones that can be effectively codegen'd.
572 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
573 // add/sub are legal for all supported vector VT's.
574 setOperationAction(ISD::ADD, VT, Legal);
575 setOperationAction(ISD::SUB, VT, Legal);
577 // For v2i64, these are only valid with P8Vector. This is corrected after
579 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
580 setOperationAction(ISD::SMAX, VT, Legal);
581 setOperationAction(ISD::SMIN, VT, Legal);
582 setOperationAction(ISD::UMAX, VT, Legal);
583 setOperationAction(ISD::UMIN, VT, Legal);
586 setOperationAction(ISD::SMAX, VT, Expand);
587 setOperationAction(ISD::SMIN, VT, Expand);
588 setOperationAction(ISD::UMAX, VT, Expand);
589 setOperationAction(ISD::UMIN, VT, Expand);
592 if (Subtarget.hasVSX()) {
593 setOperationAction(ISD::FMAXNUM, VT, Legal);
594 setOperationAction(ISD::FMINNUM, VT, Legal);
597 // Vector instructions introduced in P8
598 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
599 setOperationAction(ISD::CTPOP, VT, Legal);
600 setOperationAction(ISD::CTLZ, VT, Legal);
603 setOperationAction(ISD::CTPOP, VT, Expand);
604 setOperationAction(ISD::CTLZ, VT, Expand);
607 // Vector instructions introduced in P9
608 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
609 setOperationAction(ISD::CTTZ, VT, Legal);
611 setOperationAction(ISD::CTTZ, VT, Expand);
613 // We promote all shuffles to v16i8.
614 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
615 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
617 // We promote all non-typed operations to v4i32.
618 setOperationAction(ISD::AND , VT, Promote);
619 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
620 setOperationAction(ISD::OR , VT, Promote);
621 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
622 setOperationAction(ISD::XOR , VT, Promote);
623 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
624 setOperationAction(ISD::LOAD , VT, Promote);
625 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
626 setOperationAction(ISD::SELECT, VT, Promote);
627 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
628 setOperationAction(ISD::VSELECT, VT, Legal);
629 setOperationAction(ISD::SELECT_CC, VT, Promote);
630 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
631 setOperationAction(ISD::STORE, VT, Promote);
632 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
634 // No other operations are legal.
635 setOperationAction(ISD::MUL , VT, Expand);
636 setOperationAction(ISD::SDIV, VT, Expand);
637 setOperationAction(ISD::SREM, VT, Expand);
638 setOperationAction(ISD::UDIV, VT, Expand);
639 setOperationAction(ISD::UREM, VT, Expand);
640 setOperationAction(ISD::FDIV, VT, Expand);
641 setOperationAction(ISD::FREM, VT, Expand);
642 setOperationAction(ISD::FNEG, VT, Expand);
643 setOperationAction(ISD::FSQRT, VT, Expand);
644 setOperationAction(ISD::FLOG, VT, Expand);
645 setOperationAction(ISD::FLOG10, VT, Expand);
646 setOperationAction(ISD::FLOG2, VT, Expand);
647 setOperationAction(ISD::FEXP, VT, Expand);
648 setOperationAction(ISD::FEXP2, VT, Expand);
649 setOperationAction(ISD::FSIN, VT, Expand);
650 setOperationAction(ISD::FCOS, VT, Expand);
651 setOperationAction(ISD::FABS, VT, Expand);
652 setOperationAction(ISD::FFLOOR, VT, Expand);
653 setOperationAction(ISD::FCEIL, VT, Expand);
654 setOperationAction(ISD::FTRUNC, VT, Expand);
655 setOperationAction(ISD::FRINT, VT, Expand);
656 setOperationAction(ISD::FNEARBYINT, VT, Expand);
657 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
658 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
659 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
660 setOperationAction(ISD::MULHU, VT, Expand);
661 setOperationAction(ISD::MULHS, VT, Expand);
662 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
663 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
664 setOperationAction(ISD::UDIVREM, VT, Expand);
665 setOperationAction(ISD::SDIVREM, VT, Expand);
666 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
667 setOperationAction(ISD::FPOW, VT, Expand);
668 setOperationAction(ISD::BSWAP, VT, Expand);
669 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
670 setOperationAction(ISD::ROTL, VT, Expand);
671 setOperationAction(ISD::ROTR, VT, Expand);
673 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
674 setTruncStoreAction(VT, InnerVT, Expand);
675 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
676 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
677 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
680 if (!Subtarget.hasP8Vector()) {
681 setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
682 setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
683 setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
684 setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
687 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
688 setOperationAction(ISD::ABS, VT, Custom);
690 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
691 // with merges, splats, etc.
692 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
694 // Vector truncates to sub-word integer that fit in an Altivec/VSX register
695 // are cheap, so handle them before they get expanded to scalar.
696 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
697 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
698 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
699 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
700 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
702 setOperationAction(ISD::AND , MVT::v4i32, Legal);
703 setOperationAction(ISD::OR , MVT::v4i32, Legal);
704 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
705 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
706 setOperationAction(ISD::SELECT, MVT::v4i32,
707 Subtarget.useCRBits() ? Legal : Expand);
708 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
709 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
710 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
711 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
712 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
713 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
714 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
715 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
716 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
718 // Without hasP8Altivec set, v2i64 SMAX isn't available.
719 // But ABS custom lowering requires SMAX support.
720 if (!Subtarget.hasP8Altivec())
721 setOperationAction(ISD::ABS, MVT::v2i64, Expand);
723 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
724 if (Subtarget.hasAltivec())
725 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
726 setOperationAction(ISD::ROTL, VT, Legal);
727 // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
728 if (Subtarget.hasP8Altivec())
729 setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
731 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
732 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
733 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
734 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
736 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
737 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
739 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
740 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
741 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
744 if (Subtarget.hasP8Altivec())
745 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
747 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
749 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
750 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
752 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
753 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
755 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
756 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
757 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
758 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
760 // Altivec does not contain unordered floating-point compare instructions
761 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
762 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
763 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
764 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
766 if (Subtarget.hasVSX()) {
767 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
768 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
769 if (Subtarget.hasP8Vector()) {
770 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
771 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
773 if (Subtarget.hasDirectMove() && isPPC64) {
774 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
775 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
776 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
777 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
778 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
779 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
780 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
781 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
783 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
785 // The nearbyint variants are not allowed to raise the inexact exception
786 // so we can only code-gen them with unsafe math.
787 if (TM.Options.UnsafeFPMath) {
788 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
789 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
792 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
793 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
794 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
795 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
796 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
797 setOperationAction(ISD::FROUND, MVT::f64, Legal);
799 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
800 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
801 setOperationAction(ISD::FROUND, MVT::f32, Legal);
803 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
804 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
806 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
807 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
809 // Share the Altivec comparison restrictions.
810 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
811 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
812 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
813 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
815 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
816 setOperationAction(ISD::STORE, MVT::v2f64, Legal);
818 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
820 if (Subtarget.hasP8Vector())
821 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
823 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
825 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
826 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
827 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
829 if (Subtarget.hasP8Altivec()) {
830 setOperationAction(ISD::SHL, MVT::v2i64, Legal);
831 setOperationAction(ISD::SRA, MVT::v2i64, Legal);
832 setOperationAction(ISD::SRL, MVT::v2i64, Legal);
834 // 128 bit shifts can be accomplished via 3 instructions for SHL and
835 // SRL, but not for SRA because of the instructions available:
836 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
838 setOperationAction(ISD::SHL, MVT::v1i128, Expand);
839 setOperationAction(ISD::SRL, MVT::v1i128, Expand);
840 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
842 setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
845 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
846 setOperationAction(ISD::SRA, MVT::v2i64, Expand);
847 setOperationAction(ISD::SRL, MVT::v2i64, Expand);
849 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
851 // VSX v2i64 only supports non-arithmetic operations.
852 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
853 setOperationAction(ISD::SUB, MVT::v2i64, Expand);
856 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
857 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
858 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
859 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
861 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
863 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
864 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
865 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
866 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
868 // Custom handling for partial vectors of integers converted to
869 // floating point. We already have optimal handling for v2i32 through
870 // the DAG combine, so those aren't necessary.
871 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
872 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
873 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
874 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
875 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
876 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
877 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
878 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
880 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
881 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
882 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
883 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
884 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
885 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
887 if (Subtarget.hasDirectMove())
888 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
889 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
891 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
894 if (Subtarget.hasP8Altivec()) {
895 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
896 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
899 if (Subtarget.hasP9Vector()) {
900 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
901 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
903 // 128 bit shifts can be accomplished via 3 instructions for SHL and
904 // SRL, but not for SRA because of the instructions available:
905 // VS{RL} and VS{RL}O.
906 setOperationAction(ISD::SHL, MVT::v1i128, Legal);
907 setOperationAction(ISD::SRL, MVT::v1i128, Legal);
908 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
910 if (EnableQuadPrecision) {
911 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
912 setOperationAction(ISD::FADD, MVT::f128, Legal);
913 setOperationAction(ISD::FSUB, MVT::f128, Legal);
914 setOperationAction(ISD::FDIV, MVT::f128, Legal);
915 setOperationAction(ISD::FMUL, MVT::f128, Legal);
916 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
917 // No extending loads to f128 on PPC.
918 for (MVT FPT : MVT::fp_valuetypes())
919 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
920 setOperationAction(ISD::FMA, MVT::f128, Legal);
921 setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
922 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
923 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
924 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
925 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
926 setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
928 setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
929 setOperationAction(ISD::FRINT, MVT::f128, Legal);
930 setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
931 setOperationAction(ISD::FCEIL, MVT::f128, Legal);
932 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
933 setOperationAction(ISD::FROUND, MVT::f128, Legal);
935 setOperationAction(ISD::SELECT, MVT::f128, Expand);
936 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
937 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
938 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
939 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
940 setOperationAction(ISD::BITCAST, MVT::i128, Custom);
941 // No implementation for these ops for PowerPC.
942 setOperationAction(ISD::FSIN , MVT::f128, Expand);
943 setOperationAction(ISD::FCOS , MVT::f128, Expand);
944 setOperationAction(ISD::FPOW, MVT::f128, Expand);
945 setOperationAction(ISD::FPOWI, MVT::f128, Expand);
946 setOperationAction(ISD::FREM, MVT::f128, Expand);
948 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
949 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
950 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
951 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
952 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
955 if (Subtarget.hasP9Altivec()) {
956 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
957 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
959 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
960 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
961 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
962 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
963 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
964 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
965 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
969 if (Subtarget.hasQPX()) {
970 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
971 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
972 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
973 setOperationAction(ISD::FREM, MVT::v4f64, Expand);
975 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
976 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
978 setOperationAction(ISD::LOAD , MVT::v4f64, Custom);
979 setOperationAction(ISD::STORE , MVT::v4f64, Custom);
981 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
982 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
984 if (!Subtarget.useCRBits())
985 setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
986 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
988 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
989 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
990 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
991 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
992 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
993 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
994 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
996 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
997 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
999 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1000 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1002 setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1003 setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1004 setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1005 setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1006 setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1007 setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1008 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1009 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1010 setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1011 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1013 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1014 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1016 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1017 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1019 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1021 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1022 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1023 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1024 setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1026 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1027 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1029 setOperationAction(ISD::LOAD , MVT::v4f32, Custom);
1030 setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1032 if (!Subtarget.useCRBits())
1033 setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1034 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1036 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1037 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1038 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1039 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1040 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1041 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1042 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1044 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1045 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1047 setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1048 setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1049 setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1050 setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1051 setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1052 setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1053 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1054 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1055 setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1056 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1058 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1059 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1061 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1062 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1064 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1066 setOperationAction(ISD::AND , MVT::v4i1, Legal);
1067 setOperationAction(ISD::OR , MVT::v4i1, Legal);
1068 setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1070 if (!Subtarget.useCRBits())
1071 setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1072 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1074 setOperationAction(ISD::LOAD , MVT::v4i1, Custom);
1075 setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1077 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1078 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1079 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1080 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1081 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1082 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1083 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1085 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1086 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1088 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1090 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1091 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1092 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1093 setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1095 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1096 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1097 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1098 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1100 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1101 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1103 // These need to set FE_INEXACT, and so cannot be vectorized here.
1104 setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1105 setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1107 if (TM.Options.UnsafeFPMath) {
1108 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1109 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1111 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1112 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1114 setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1115 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1117 setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1118 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1122 if (Subtarget.has64BitSupport())
1123 setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1125 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1128 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
1129 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1132 setBooleanContents(ZeroOrOneBooleanContent);
1134 if (Subtarget.hasAltivec()) {
1135 // Altivec instructions set fields to all zeros or all ones.
1136 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1140 // These libcalls are not available in 32-bit.
1141 setLibcallName(RTLIB::SHL_I128, nullptr);
1142 setLibcallName(RTLIB::SRL_I128, nullptr);
1143 setLibcallName(RTLIB::SRA_I128, nullptr);
1146 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1148 // We have target-specific dag combine patterns for the following nodes:
1149 setTargetDAGCombine(ISD::ADD);
1150 setTargetDAGCombine(ISD::SHL);
1151 setTargetDAGCombine(ISD::SRA);
1152 setTargetDAGCombine(ISD::SRL);
1153 setTargetDAGCombine(ISD::MUL);
1154 setTargetDAGCombine(ISD::SINT_TO_FP);
1155 setTargetDAGCombine(ISD::BUILD_VECTOR);
1156 if (Subtarget.hasFPCVT())
1157 setTargetDAGCombine(ISD::UINT_TO_FP);
1158 setTargetDAGCombine(ISD::LOAD);
1159 setTargetDAGCombine(ISD::STORE);
1160 setTargetDAGCombine(ISD::BR_CC);
1161 if (Subtarget.useCRBits())
1162 setTargetDAGCombine(ISD::BRCOND);
1163 setTargetDAGCombine(ISD::BSWAP);
1164 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1165 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1166 setTargetDAGCombine(ISD::INTRINSIC_VOID);
1168 setTargetDAGCombine(ISD::SIGN_EXTEND);
1169 setTargetDAGCombine(ISD::ZERO_EXTEND);
1170 setTargetDAGCombine(ISD::ANY_EXTEND);
1172 setTargetDAGCombine(ISD::TRUNCATE);
1173 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1176 if (Subtarget.useCRBits()) {
1177 setTargetDAGCombine(ISD::TRUNCATE);
1178 setTargetDAGCombine(ISD::SETCC);
1179 setTargetDAGCombine(ISD::SELECT_CC);
1182 // Use reciprocal estimates.
1183 if (TM.Options.UnsafeFPMath) {
1184 setTargetDAGCombine(ISD::FDIV);
1185 setTargetDAGCombine(ISD::FSQRT);
1188 if (Subtarget.hasP9Altivec()) {
1189 setTargetDAGCombine(ISD::ABS);
1190 setTargetDAGCombine(ISD::VSELECT);
1193 // Darwin long double math library functions have $LDBL128 appended.
1194 if (Subtarget.isDarwin()) {
1195 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
1196 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
1197 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
1198 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
1199 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
1200 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
1201 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
1202 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
1203 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
1204 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
1207 if (EnableQuadPrecision) {
1208 setLibcallName(RTLIB::LOG_F128, "logf128");
1209 setLibcallName(RTLIB::LOG2_F128, "log2f128");
1210 setLibcallName(RTLIB::LOG10_F128, "log10f128");
1211 setLibcallName(RTLIB::EXP_F128, "expf128");
1212 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1213 setLibcallName(RTLIB::SIN_F128, "sinf128");
1214 setLibcallName(RTLIB::COS_F128, "cosf128");
1215 setLibcallName(RTLIB::POW_F128, "powf128");
1216 setLibcallName(RTLIB::FMIN_F128, "fminf128");
1217 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1218 setLibcallName(RTLIB::POWI_F128, "__powikf2");
1219 setLibcallName(RTLIB::REM_F128, "fmodf128");
1222 // With 32 condition bits, we don't need to sink (and duplicate) compares
1223 // aggressively in CodeGenPrep.
1224 if (Subtarget.useCRBits()) {
1225 setHasMultipleConditionRegisters();
1226 setJumpIsExpensive();
1229 setMinFunctionAlignment(Align(4));
1230 if (Subtarget.isDarwin())
1231 setPrefFunctionAlignment(Align(16));
1233 switch (Subtarget.getCPUDirective()) {
1238 case PPC::DIR_E500mc:
1239 case PPC::DIR_E5500:
1242 case PPC::DIR_PWR5X:
1244 case PPC::DIR_PWR6X:
1248 case PPC::DIR_PWR_FUTURE:
1249 setPrefLoopAlignment(Align(16));
1250 setPrefFunctionAlignment(Align(16));
1254 if (Subtarget.enableMachineScheduler())
1255 setSchedulingPreference(Sched::Source);
1257 setSchedulingPreference(Sched::Hybrid);
1259 computeRegisterProperties(STI.getRegisterInfo());
1261 // The Freescale cores do better with aggressive inlining of memcpy and
1262 // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1263 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1264 Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1265 MaxStoresPerMemset = 32;
1266 MaxStoresPerMemsetOptSize = 16;
1267 MaxStoresPerMemcpy = 32;
1268 MaxStoresPerMemcpyOptSize = 8;
1269 MaxStoresPerMemmove = 32;
1270 MaxStoresPerMemmoveOptSize = 8;
1271 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1272 // The A2 also benefits from (very) aggressive inlining of memcpy and
1273 // friends. The overhead of a the function call, even when warm, can be
1274 // over one hundred cycles.
1275 MaxStoresPerMemset = 128;
1276 MaxStoresPerMemcpy = 128;
1277 MaxStoresPerMemmove = 128;
1278 MaxLoadsPerMemcmp = 128;
1280 MaxLoadsPerMemcmp = 8;
1281 MaxLoadsPerMemcmpOptSize = 4;
1285 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1286 /// the desired ByVal argument alignment.
1287 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1288 unsigned MaxMaxAlign) {
1289 if (MaxAlign == MaxMaxAlign)
1291 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1292 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1294 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1296 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1297 unsigned EltAlign = 0;
1298 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1299 if (EltAlign > MaxAlign)
1300 MaxAlign = EltAlign;
1301 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1302 for (auto *EltTy : STy->elements()) {
1303 unsigned EltAlign = 0;
1304 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1305 if (EltAlign > MaxAlign)
1306 MaxAlign = EltAlign;
1307 if (MaxAlign == MaxMaxAlign)
1313 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1314 /// function arguments in the caller parameter area.
1315 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1316 const DataLayout &DL) const {
1317 // Darwin passes everything on 4 byte boundary.
1318 if (Subtarget.isDarwin())
1321 // 16byte and wider vectors are passed on 16byte boundary.
1322 // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1323 unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1324 if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1325 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1329 bool PPCTargetLowering::useSoftFloat() const {
1330 return Subtarget.useSoftFloat();
1333 bool PPCTargetLowering::hasSPE() const {
1334 return Subtarget.hasSPE();
1337 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1338 return VT.isScalarInteger();
1341 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1342 switch ((PPCISD::NodeType)Opcode) {
1343 case PPCISD::FIRST_NUMBER: break;
1344 case PPCISD::FSEL: return "PPCISD::FSEL";
1345 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP";
1346 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP";
1347 case PPCISD::FCFID: return "PPCISD::FCFID";
1348 case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
1349 case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
1350 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
1351 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
1352 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
1353 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
1354 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
1355 case PPCISD::FP_TO_UINT_IN_VSR:
1356 return "PPCISD::FP_TO_UINT_IN_VSR,";
1357 case PPCISD::FP_TO_SINT_IN_VSR:
1358 return "PPCISD::FP_TO_SINT_IN_VSR";
1359 case PPCISD::FRE: return "PPCISD::FRE";
1360 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
1361 case PPCISD::STFIWX: return "PPCISD::STFIWX";
1362 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
1363 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
1364 case PPCISD::VPERM: return "PPCISD::VPERM";
1365 case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
1366 case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
1367 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
1368 case PPCISD::VECSHL: return "PPCISD::VECSHL";
1369 case PPCISD::CMPB: return "PPCISD::CMPB";
1370 case PPCISD::Hi: return "PPCISD::Hi";
1371 case PPCISD::Lo: return "PPCISD::Lo";
1372 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
1373 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1374 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1375 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
1376 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
1377 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
1378 case PPCISD::SRL: return "PPCISD::SRL";
1379 case PPCISD::SRA: return "PPCISD::SRA";
1380 case PPCISD::SHL: return "PPCISD::SHL";
1381 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
1382 case PPCISD::CALL: return "PPCISD::CALL";
1383 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
1384 case PPCISD::MTCTR: return "PPCISD::MTCTR";
1385 case PPCISD::BCTRL: return "PPCISD::BCTRL";
1386 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
1387 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
1388 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
1389 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
1390 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1391 case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
1392 case PPCISD::MFVSR: return "PPCISD::MFVSR";
1393 case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
1394 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
1395 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
1396 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
1397 case PPCISD::ANDI_rec_1_EQ_BIT:
1398 return "PPCISD::ANDI_rec_1_EQ_BIT";
1399 case PPCISD::ANDI_rec_1_GT_BIT:
1400 return "PPCISD::ANDI_rec_1_GT_BIT";
1401 case PPCISD::VCMP: return "PPCISD::VCMP";
1402 case PPCISD::VCMPo: return "PPCISD::VCMPo";
1403 case PPCISD::LBRX: return "PPCISD::LBRX";
1404 case PPCISD::STBRX: return "PPCISD::STBRX";
1405 case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
1406 case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
1407 case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
1408 case PPCISD::STXSIX: return "PPCISD::STXSIX";
1409 case PPCISD::VEXTS: return "PPCISD::VEXTS";
1410 case PPCISD::SExtVElems: return "PPCISD::SExtVElems";
1411 case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
1412 case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
1413 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
1414 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
1415 case PPCISD::ST_VSR_SCAL_INT:
1416 return "PPCISD::ST_VSR_SCAL_INT";
1417 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
1418 case PPCISD::BDNZ: return "PPCISD::BDNZ";
1419 case PPCISD::BDZ: return "PPCISD::BDZ";
1420 case PPCISD::MFFS: return "PPCISD::MFFS";
1421 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
1422 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
1423 case PPCISD::CR6SET: return "PPCISD::CR6SET";
1424 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
1425 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
1426 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
1427 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1428 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
1429 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
1430 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
1431 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
1432 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
1433 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1434 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
1435 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
1436 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
1437 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1438 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1439 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
1440 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
1441 case PPCISD::SC: return "PPCISD::SC";
1442 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
1443 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
1444 case PPCISD::RFEBB: return "PPCISD::RFEBB";
1445 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
1446 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
1447 case PPCISD::VABSD: return "PPCISD::VABSD";
1448 case PPCISD::QVFPERM: return "PPCISD::QVFPERM";
1449 case PPCISD::QVGPCI: return "PPCISD::QVGPCI";
1450 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI";
1451 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI";
1452 case PPCISD::QBFLT: return "PPCISD::QBFLT";
1453 case PPCISD::QVLFSb: return "PPCISD::QVLFSb";
1454 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
1455 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
1456 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
1457 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
1458 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
1459 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
1460 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
1465 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1468 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1470 if (Subtarget.hasQPX())
1471 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1473 return VT.changeVectorElementTypeToInteger();
1476 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1477 assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1481 //===----------------------------------------------------------------------===//
1482 // Node matching predicates, for use by the tblgen matching code.
1483 //===----------------------------------------------------------------------===//
1485 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1486 static bool isFloatingPointZero(SDValue Op) {
1487 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1488 return CFP->getValueAPF().isZero();
1489 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1490 // Maybe this has already been legalized into the constant pool?
1491 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1492 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1493 return CFP->getValueAPF().isZero();
1498 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
1499 /// true if Op is undef or if it matches the specified value.
1500 static bool isConstantOrUndef(int Op, int Val) {
1501 return Op < 0 || Op == Val;
1504 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1505 /// VPKUHUM instruction.
1506 /// The ShuffleKind distinguishes between big-endian operations with
1507 /// two different inputs (0), either-endian operations with two identical
1508 /// inputs (1), and little-endian operations with two different inputs (2).
1509 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1510 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1511 SelectionDAG &DAG) {
1512 bool IsLE = DAG.getDataLayout().isLittleEndian();
1513 if (ShuffleKind == 0) {
1516 for (unsigned i = 0; i != 16; ++i)
1517 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1519 } else if (ShuffleKind == 2) {
1522 for (unsigned i = 0; i != 16; ++i)
1523 if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1525 } else if (ShuffleKind == 1) {
1526 unsigned j = IsLE ? 0 : 1;
1527 for (unsigned i = 0; i != 8; ++i)
1528 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
1529 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
1535 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1536 /// VPKUWUM instruction.
1537 /// The ShuffleKind distinguishes between big-endian operations with
1538 /// two different inputs (0), either-endian operations with two identical
1539 /// inputs (1), and little-endian operations with two different inputs (2).
1540 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1541 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1542 SelectionDAG &DAG) {
1543 bool IsLE = DAG.getDataLayout().isLittleEndian();
1544 if (ShuffleKind == 0) {
1547 for (unsigned i = 0; i != 16; i += 2)
1548 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
1549 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
1551 } else if (ShuffleKind == 2) {
1554 for (unsigned i = 0; i != 16; i += 2)
1555 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1556 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
1558 } else if (ShuffleKind == 1) {
1559 unsigned j = IsLE ? 0 : 2;
1560 for (unsigned i = 0; i != 8; i += 2)
1561 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1562 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1563 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1564 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
1570 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1571 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1572 /// current subtarget.
1574 /// The ShuffleKind distinguishes between big-endian operations with
1575 /// two different inputs (0), either-endian operations with two identical
1576 /// inputs (1), and little-endian operations with two different inputs (2).
1577 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1578 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1579 SelectionDAG &DAG) {
1580 const PPCSubtarget& Subtarget =
1581 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1582 if (!Subtarget.hasP8Vector())
1585 bool IsLE = DAG.getDataLayout().isLittleEndian();
1586 if (ShuffleKind == 0) {
1589 for (unsigned i = 0; i != 16; i += 4)
1590 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
1591 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
1592 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
1593 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
1595 } else if (ShuffleKind == 2) {
1598 for (unsigned i = 0; i != 16; i += 4)
1599 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1600 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
1601 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
1602 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
1604 } else if (ShuffleKind == 1) {
1605 unsigned j = IsLE ? 0 : 4;
1606 for (unsigned i = 0; i != 8; i += 4)
1607 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1608 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1609 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
1610 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
1611 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1612 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
1613 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1614 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1620 /// isVMerge - Common function, used to match vmrg* shuffles.
1622 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1623 unsigned LHSStart, unsigned RHSStart) {
1624 if (N->getValueType(0) != MVT::v16i8)
1626 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1627 "Unsupported merge size!");
1629 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
1630 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
1631 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1632 LHSStart+j+i*UnitSize) ||
1633 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1634 RHSStart+j+i*UnitSize))
1640 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1641 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1642 /// The ShuffleKind distinguishes between big-endian merges with two
1643 /// different inputs (0), either-endian merges with two identical inputs (1),
1644 /// and little-endian merges with two different inputs (2). For the latter,
1645 /// the input operands are swapped (see PPCInstrAltivec.td).
1646 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1647 unsigned ShuffleKind, SelectionDAG &DAG) {
1648 if (DAG.getDataLayout().isLittleEndian()) {
1649 if (ShuffleKind == 1) // unary
1650 return isVMerge(N, UnitSize, 0, 0);
1651 else if (ShuffleKind == 2) // swapped
1652 return isVMerge(N, UnitSize, 0, 16);
1656 if (ShuffleKind == 1) // unary
1657 return isVMerge(N, UnitSize, 8, 8);
1658 else if (ShuffleKind == 0) // normal
1659 return isVMerge(N, UnitSize, 8, 24);
1665 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1666 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1667 /// The ShuffleKind distinguishes between big-endian merges with two
1668 /// different inputs (0), either-endian merges with two identical inputs (1),
1669 /// and little-endian merges with two different inputs (2). For the latter,
1670 /// the input operands are swapped (see PPCInstrAltivec.td).
1671 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1672 unsigned ShuffleKind, SelectionDAG &DAG) {
1673 if (DAG.getDataLayout().isLittleEndian()) {
1674 if (ShuffleKind == 1) // unary
1675 return isVMerge(N, UnitSize, 8, 8);
1676 else if (ShuffleKind == 2) // swapped
1677 return isVMerge(N, UnitSize, 8, 24);
1681 if (ShuffleKind == 1) // unary
1682 return isVMerge(N, UnitSize, 0, 0);
1683 else if (ShuffleKind == 0) // normal
1684 return isVMerge(N, UnitSize, 0, 16);
1691 * Common function used to match vmrgew and vmrgow shuffles
1693 * The indexOffset determines whether to look for even or odd words in
1694 * the shuffle mask. This is based on the of the endianness of the target
1697 * - Use offset of 0 to check for odd elements
1698 * - Use offset of 4 to check for even elements
1700 * - Use offset of 0 to check for even elements
1701 * - Use offset of 4 to check for odd elements
1702 * A detailed description of the vector element ordering for little endian and
1703 * big endian can be found at
1704 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1705 * Targeting your applications - what little endian and big endian IBM XL C/C++
1706 * compiler differences mean to you
1708 * The mask to the shuffle vector instruction specifies the indices of the
1709 * elements from the two input vectors to place in the result. The elements are
1710 * numbered in array-access order, starting with the first vector. These vectors
1711 * are always of type v16i8, thus each vector will contain 16 elements of size
1712 * 8. More info on the shuffle vector can be found in the
1713 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1714 * Language Reference.
1716 * The RHSStartValue indicates whether the same input vectors are used (unary)
1717 * or two different input vectors are used, based on the following:
1718 * - If the instruction uses the same vector for both inputs, the range of the
1719 * indices will be 0 to 15. In this case, the RHSStart value passed should
1721 * - If the instruction has two different vectors then the range of the
1722 * indices will be 0 to 31. In this case, the RHSStart value passed should
1723 * be 16 (indices 0-15 specify elements in the first vector while indices 16
1724 * to 31 specify elements in the second vector).
1726 * \param[in] N The shuffle vector SD Node to analyze
1727 * \param[in] IndexOffset Specifies whether to look for even or odd elements
1728 * \param[in] RHSStartValue Specifies the starting index for the righthand input
1729 * vector to the shuffle_vector instruction
1730 * \return true iff this shuffle vector represents an even or odd word merge
1732 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1733 unsigned RHSStartValue) {
1734 if (N->getValueType(0) != MVT::v16i8)
1737 for (unsigned i = 0; i < 2; ++i)
1738 for (unsigned j = 0; j < 4; ++j)
1739 if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1740 i*RHSStartValue+j+IndexOffset) ||
1741 !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1742 i*RHSStartValue+j+IndexOffset+8))
1748 * Determine if the specified shuffle mask is suitable for the vmrgew or
1749 * vmrgow instructions.
1751 * \param[in] N The shuffle vector SD Node to analyze
1752 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1753 * \param[in] ShuffleKind Identify the type of merge:
1754 * - 0 = big-endian merge with two different inputs;
1755 * - 1 = either-endian merge with two identical inputs;
1756 * - 2 = little-endian merge with two different inputs (inputs are swapped for
1757 * little-endian merges).
1758 * \param[in] DAG The current SelectionDAG
1759 * \return true iff this shuffle mask
1761 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1762 unsigned ShuffleKind, SelectionDAG &DAG) {
1763 if (DAG.getDataLayout().isLittleEndian()) {
1764 unsigned indexOffset = CheckEven ? 4 : 0;
1765 if (ShuffleKind == 1) // Unary
1766 return isVMerge(N, indexOffset, 0);
1767 else if (ShuffleKind == 2) // swapped
1768 return isVMerge(N, indexOffset, 16);
1773 unsigned indexOffset = CheckEven ? 0 : 4;
1774 if (ShuffleKind == 1) // Unary
1775 return isVMerge(N, indexOffset, 0);
1776 else if (ShuffleKind == 0) // Normal
1777 return isVMerge(N, indexOffset, 16);
1784 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1785 /// amount, otherwise return -1.
1786 /// The ShuffleKind distinguishes between big-endian operations with two
1787 /// different inputs (0), either-endian operations with two identical inputs
1788 /// (1), and little-endian operations with two different inputs (2). For the
1789 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1790 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1791 SelectionDAG &DAG) {
1792 if (N->getValueType(0) != MVT::v16i8)
1795 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1797 // Find the first non-undef value in the shuffle mask.
1799 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1802 if (i == 16) return -1; // all undef.
1804 // Otherwise, check to see if the rest of the elements are consecutively
1805 // numbered from this value.
1806 unsigned ShiftAmt = SVOp->getMaskElt(i);
1807 if (ShiftAmt < i) return -1;
1810 bool isLE = DAG.getDataLayout().isLittleEndian();
1812 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1813 // Check the rest of the elements to see if they are consecutive.
1814 for (++i; i != 16; ++i)
1815 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1817 } else if (ShuffleKind == 1) {
1818 // Check the rest of the elements to see if they are consecutive.
1819 for (++i; i != 16; ++i)
1820 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1826 ShiftAmt = 16 - ShiftAmt;
1831 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1832 /// specifies a splat of a single element that is suitable for input to
1833 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1834 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1835 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1836 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1838 // The consecutive indices need to specify an element, not part of two
1839 // different elements. So abandon ship early if this isn't the case.
1840 if (N->getMaskElt(0) % EltSize != 0)
1843 // This is a splat operation if each element of the permute is the same, and
1844 // if the value doesn't reference the second vector.
1845 unsigned ElementBase = N->getMaskElt(0);
1847 // FIXME: Handle UNDEF elements too!
1848 if (ElementBase >= 16)
1851 // Check that the indices are consecutive, in the case of a multi-byte element
1852 // splatted with a v16i8 mask.
1853 for (unsigned i = 1; i != EltSize; ++i)
1854 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1857 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1858 if (N->getMaskElt(i) < 0) continue;
1859 for (unsigned j = 0; j != EltSize; ++j)
1860 if (N->getMaskElt(i+j) != N->getMaskElt(j))
1866 /// Check that the mask is shuffling N byte elements. Within each N byte
1867 /// element of the mask, the indices could be either in increasing or
1868 /// decreasing order as long as they are consecutive.
1869 /// \param[in] N the shuffle vector SD Node to analyze
1870 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1871 /// Word/DoubleWord/QuadWord).
1872 /// \param[in] StepLen the delta indices number among the N byte element, if
1873 /// the mask is in increasing/decreasing order then it is 1/-1.
1874 /// \return true iff the mask is shuffling N byte elements.
1875 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1877 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1878 "Unexpected element width.");
1879 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1881 unsigned NumOfElem = 16 / Width;
1882 unsigned MaskVal[16]; // Width is never greater than 16
1883 for (unsigned i = 0; i < NumOfElem; ++i) {
1884 MaskVal[0] = N->getMaskElt(i * Width);
1885 if ((StepLen == 1) && (MaskVal[0] % Width)) {
1887 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1891 for (unsigned int j = 1; j < Width; ++j) {
1892 MaskVal[j] = N->getMaskElt(i * Width + j);
1893 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1902 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1903 unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1904 if (!isNByteElemShuffleMask(N, 4, 1))
1907 // Now we look at mask elements 0,4,8,12
1908 unsigned M0 = N->getMaskElt(0) / 4;
1909 unsigned M1 = N->getMaskElt(4) / 4;
1910 unsigned M2 = N->getMaskElt(8) / 4;
1911 unsigned M3 = N->getMaskElt(12) / 4;
1912 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1913 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1915 // Below, let H and L be arbitrary elements of the shuffle mask
1916 // where H is in the range [4,7] and L is in the range [0,3].
1917 // H, 1, 2, 3 or L, 5, 6, 7
1918 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1919 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1920 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1921 InsertAtByte = IsLE ? 12 : 0;
1925 // 0, H, 2, 3 or 4, L, 6, 7
1926 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1927 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1928 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1929 InsertAtByte = IsLE ? 8 : 4;
1933 // 0, 1, H, 3 or 4, 5, L, 7
1934 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1935 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1936 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1937 InsertAtByte = IsLE ? 4 : 8;
1941 // 0, 1, 2, H or 4, 5, 6, L
1942 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1943 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1944 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1945 InsertAtByte = IsLE ? 0 : 12;
1950 // If both vector operands for the shuffle are the same vector, the mask will
1951 // contain only elements from the first one and the second one will be undef.
1952 if (N->getOperand(1).isUndef()) {
1955 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1956 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1957 InsertAtByte = IsLE ? 12 : 0;
1960 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1961 InsertAtByte = IsLE ? 8 : 4;
1964 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1965 InsertAtByte = IsLE ? 4 : 8;
1968 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1969 InsertAtByte = IsLE ? 0 : 12;
1977 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1978 bool &Swap, bool IsLE) {
1979 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1980 // Ensure each byte index of the word is consecutive.
1981 if (!isNByteElemShuffleMask(N, 4, 1))
1984 // Now we look at mask elements 0,4,8,12, which are the beginning of words.
1985 unsigned M0 = N->getMaskElt(0) / 4;
1986 unsigned M1 = N->getMaskElt(4) / 4;
1987 unsigned M2 = N->getMaskElt(8) / 4;
1988 unsigned M3 = N->getMaskElt(12) / 4;
1990 // If both vector operands for the shuffle are the same vector, the mask will
1991 // contain only elements from the first one and the second one will be undef.
1992 if (N->getOperand(1).isUndef()) {
1993 assert(M0 < 4 && "Indexing into an undef vector?");
1994 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
1997 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2002 // Ensure each word index of the ShuffleVector Mask is consecutive.
2003 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2007 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2008 // Input vectors don't need to be swapped if the leading element
2009 // of the result is one of the 3 left elements of the second vector
2010 // (or if there is no shift to be done at all).
2012 ShiftElts = (8 - M0) % 8;
2013 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2014 // Input vectors need to be swapped if the leading element
2015 // of the result is one of the 3 left elements of the first vector
2016 // (or if we're shifting by 4 - thereby simply swapping the vectors).
2018 ShiftElts = (4 - M0) % 4;
2023 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2024 // Input vectors don't need to be swapped if the leading element
2025 // of the result is one of the 4 elements of the first vector.
2028 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2029 // Input vectors need to be swapped if the leading element
2030 // of the result is one of the 4 elements of the right vector.
2039 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2040 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2042 if (!isNByteElemShuffleMask(N, Width, -1))
2045 for (int i = 0; i < 16; i += Width)
2046 if (N->getMaskElt(i) != i + Width - 1)
2052 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2053 return isXXBRShuffleMaskHelper(N, 2);
2056 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2057 return isXXBRShuffleMaskHelper(N, 4);
2060 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2061 return isXXBRShuffleMaskHelper(N, 8);
2064 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2065 return isXXBRShuffleMaskHelper(N, 16);
2068 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2069 /// if the inputs to the instruction should be swapped and set \p DM to the
2070 /// value for the immediate.
2071 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2072 /// AND element 0 of the result comes from the first input (LE) or second input
2073 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2074 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2076 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2077 bool &Swap, bool IsLE) {
2078 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2080 // Ensure each byte index of the double word is consecutive.
2081 if (!isNByteElemShuffleMask(N, 8, 1))
2084 unsigned M0 = N->getMaskElt(0) / 8;
2085 unsigned M1 = N->getMaskElt(8) / 8;
2086 assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2088 // If both vector operands for the shuffle are the same vector, the mask will
2089 // contain only elements from the first one and the second one will be undef.
2090 if (N->getOperand(1).isUndef()) {
2091 if ((M0 | M1) < 2) {
2092 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2100 if (M0 > 1 && M1 < 2) {
2102 } else if (M0 < 2 && M1 > 1) {
2109 // Note: if control flow comes here that means Swap is already set above
2110 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2113 if (M0 < 2 && M1 > 1) {
2115 } else if (M0 > 1 && M1 < 2) {
2122 // Note: if control flow comes here that means Swap is already set above
2123 DM = (M0 << 1) + (M1 & 1);
2129 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2130 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2131 /// elements are counted from the left of the vector register).
2132 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2133 SelectionDAG &DAG) {
2134 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2135 assert(isSplatShuffleMask(SVOp, EltSize));
2136 if (DAG.getDataLayout().isLittleEndian())
2137 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2139 return SVOp->getMaskElt(0) / EltSize;
2142 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2143 /// by using a vspltis[bhw] instruction of the specified element size, return
2144 /// the constant being splatted. The ByteSize field indicates the number of
2145 /// bytes of each element [124] -> [bhw].
2146 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2147 SDValue OpVal(nullptr, 0);
2149 // If ByteSize of the splat is bigger than the element size of the
2150 // build_vector, then we have a case where we are checking for a splat where
2151 // multiple elements of the buildvector are folded together into a single
2152 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2153 unsigned EltSize = 16/N->getNumOperands();
2154 if (EltSize < ByteSize) {
2155 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
2156 SDValue UniquedVals[4];
2157 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2159 // See if all of the elements in the buildvector agree across.
2160 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2161 if (N->getOperand(i).isUndef()) continue;
2162 // If the element isn't a constant, bail fully out.
2163 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2165 if (!UniquedVals[i&(Multiple-1)].getNode())
2166 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2167 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2168 return SDValue(); // no match.
2171 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2172 // either constant or undef values that are identical for each chunk. See
2173 // if these chunks can form into a larger vspltis*.
2175 // Check to see if all of the leading entries are either 0 or -1. If
2176 // neither, then this won't fit into the immediate field.
2177 bool LeadingZero = true;
2178 bool LeadingOnes = true;
2179 for (unsigned i = 0; i != Multiple-1; ++i) {
2180 if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
2182 LeadingZero &= isNullConstant(UniquedVals[i]);
2183 LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2185 // Finally, check the least significant entry.
2187 if (!UniquedVals[Multiple-1].getNode())
2188 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
2189 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2190 if (Val < 16) // 0,0,0,4 -> vspltisw(4)
2191 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2194 if (!UniquedVals[Multiple-1].getNode())
2195 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2196 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2197 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
2198 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2204 // Check to see if this buildvec has a single non-undef value in its elements.
2205 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2206 if (N->getOperand(i).isUndef()) continue;
2207 if (!OpVal.getNode())
2208 OpVal = N->getOperand(i);
2209 else if (OpVal != N->getOperand(i))
2213 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
2215 unsigned ValSizeInBytes = EltSize;
2217 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2218 Value = CN->getZExtValue();
2219 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2220 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2221 Value = FloatToBits(CN->getValueAPF().convertToFloat());
2224 // If the splat value is larger than the element value, then we can never do
2225 // this splat. The only case that we could fit the replicated bits into our
2226 // immediate field for would be zero, and we prefer to use vxor for it.
2227 if (ValSizeInBytes < ByteSize) return SDValue();
2229 // If the element value is larger than the splat value, check if it consists
2230 // of a repeated bit pattern of size ByteSize.
2231 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2234 // Properly sign extend the value.
2235 int MaskVal = SignExtend32(Value, ByteSize * 8);
2237 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2238 if (MaskVal == 0) return SDValue();
2240 // Finally, if this value fits in a 5 bit sext field, return it
2241 if (SignExtend32<5>(MaskVal) == MaskVal)
2242 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2246 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2247 /// amount, otherwise return -1.
2248 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2249 EVT VT = N->getValueType(0);
2250 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2253 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2255 // Find the first non-undef value in the shuffle mask.
2257 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2260 if (i == 4) return -1; // all undef.
2262 // Otherwise, check to see if the rest of the elements are consecutively
2263 // numbered from this value.
2264 unsigned ShiftAmt = SVOp->getMaskElt(i);
2265 if (ShiftAmt < i) return -1;
2268 // Check the rest of the elements to see if they are consecutive.
2269 for (++i; i != 4; ++i)
2270 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2276 //===----------------------------------------------------------------------===//
2277 // Addressing Mode Selection
2278 //===----------------------------------------------------------------------===//
2280 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2281 /// or 64-bit immediate, and if the value can be accurately represented as a
2282 /// sign extension from a 16-bit value. If so, this returns true and the
2284 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2285 if (!isa<ConstantSDNode>(N))
2288 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2289 if (N->getValueType(0) == MVT::i32)
2290 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2292 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2294 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2295 return isIntS16Immediate(Op.getNode(), Imm);
2299 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2300 /// be represented as an indexed [r+r] operation.
2301 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2303 SelectionDAG &DAG) const {
2304 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2306 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2307 if (Memop->getMemoryVT() == MVT::f64) {
2308 Base = N.getOperand(0);
2309 Index = N.getOperand(1);
2317 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2318 /// can be represented as an indexed [r+r] operation. Returns false if it
2319 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2320 /// non-zero and N can be represented by a base register plus a signed 16-bit
2321 /// displacement, make a more precise judgement by checking (displacement % \p
2322 /// EncodingAlignment).
2323 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2324 SDValue &Index, SelectionDAG &DAG,
2325 unsigned EncodingAlignment) const {
2327 if (N.getOpcode() == ISD::ADD) {
2328 // Is there any SPE load/store (f64), which can't handle 16bit offset?
2329 // SPE load/store can only handle 8-bit offsets.
2330 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2332 if (isIntS16Immediate(N.getOperand(1), imm) &&
2333 (!EncodingAlignment || !(imm % EncodingAlignment)))
2334 return false; // r+i
2335 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2336 return false; // r+i
2338 Base = N.getOperand(0);
2339 Index = N.getOperand(1);
2341 } else if (N.getOpcode() == ISD::OR) {
2342 if (isIntS16Immediate(N.getOperand(1), imm) &&
2343 (!EncodingAlignment || !(imm % EncodingAlignment)))
2344 return false; // r+i can fold it if we can.
2346 // If this is an or of disjoint bitfields, we can codegen this as an add
2347 // (for better address arithmetic) if the LHS and RHS of the OR are provably
2349 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2351 if (LHSKnown.Zero.getBoolValue()) {
2352 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2353 // If all of the bits are known zero on the LHS or RHS, the add won't
2355 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2356 Base = N.getOperand(0);
2357 Index = N.getOperand(1);
2366 // If we happen to be doing an i64 load or store into a stack slot that has
2367 // less than a 4-byte alignment, then the frame-index elimination may need to
2368 // use an indexed load or store instruction (because the offset may not be a
2369 // multiple of 4). The extra register needed to hold the offset comes from the
2370 // register scavenger, and it is possible that the scavenger will need to use
2371 // an emergency spill slot. As a result, we need to make sure that a spill slot
2372 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2374 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2375 // FIXME: This does not handle the LWA case.
2379 // NOTE: We'll exclude negative FIs here, which come from argument
2380 // lowering, because there are no known test cases triggering this problem
2381 // using packed structures (or similar). We can remove this exclusion if
2382 // we find such a test case. The reason why this is so test-case driven is
2383 // because this entire 'fixup' is only to prevent crashes (from the
2384 // register scavenger) on not-really-valid inputs. For example, if we have:
2386 // %b = bitcast i1* %a to i64*
2387 // store i64* a, i64 b
2388 // then the store should really be marked as 'align 1', but is not. If it
2389 // were marked as 'align 1' then the indexed form would have been
2390 // instruction-selected initially, and the problem this 'fixup' is preventing
2391 // won't happen regardless.
2395 MachineFunction &MF = DAG.getMachineFunction();
2396 MachineFrameInfo &MFI = MF.getFrameInfo();
2398 unsigned Align = MFI.getObjectAlignment(FrameIdx);
2402 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2403 FuncInfo->setHasNonRISpills();
2406 /// Returns true if the address N can be represented by a base register plus
2407 /// a signed 16-bit displacement [r+imm], and if it is not better
2408 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
2409 /// displacements that are multiples of that value.
2410 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2413 unsigned EncodingAlignment) const {
2414 // FIXME dl should come from parent load or store, not from address
2416 // If this can be more profitably realized as r+r, fail.
2417 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2420 if (N.getOpcode() == ISD::ADD) {
2422 if (isIntS16Immediate(N.getOperand(1), imm) &&
2423 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2424 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2425 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2426 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2427 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2429 Base = N.getOperand(0);
2431 return true; // [r+i]
2432 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2433 // Match LOAD (ADD (X, Lo(G))).
2434 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2435 && "Cannot handle constant offsets yet!");
2436 Disp = N.getOperand(1).getOperand(0); // The global address.
2437 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2438 Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2439 Disp.getOpcode() == ISD::TargetConstantPool ||
2440 Disp.getOpcode() == ISD::TargetJumpTable);
2441 Base = N.getOperand(0);
2442 return true; // [&g+r]
2444 } else if (N.getOpcode() == ISD::OR) {
2446 if (isIntS16Immediate(N.getOperand(1), imm) &&
2447 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2448 // If this is an or of disjoint bitfields, we can codegen this as an add
2449 // (for better address arithmetic) if the LHS and RHS of the OR are
2450 // provably disjoint.
2451 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2453 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2454 // If all of the bits are known zero on the LHS or RHS, the add won't
2456 if (FrameIndexSDNode *FI =
2457 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2458 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2459 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2461 Base = N.getOperand(0);
2463 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2467 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2468 // Loading from a constant address.
2470 // If this address fits entirely in a 16-bit sext immediate field, codegen
2473 if (isIntS16Immediate(CN, Imm) &&
2474 (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) {
2475 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2476 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2477 CN->getValueType(0));
2481 // Handle 32-bit sext immediates with LIS + addr mode.
2482 if ((CN->getValueType(0) == MVT::i32 ||
2483 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2484 (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) {
2485 int Addr = (int)CN->getZExtValue();
2487 // Otherwise, break this down into an LIS + disp.
2488 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2490 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2492 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2493 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2498 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2499 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2500 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2501 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2504 return true; // [r+0]
2507 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2508 /// represented as an indexed [r+r] operation.
2509 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2511 SelectionDAG &DAG) const {
2512 // Check to see if we can easily represent this as an [r+r] address. This
2513 // will fail if it thinks that the address is more profitably represented as
2514 // reg+imm, e.g. where imm = 0.
2515 if (SelectAddressRegReg(N, Base, Index, DAG))
2518 // If the address is the result of an add, we will utilize the fact that the
2519 // address calculation includes an implicit add. However, we can reduce
2520 // register pressure if we do not materialize a constant just for use as the
2521 // index register. We only get rid of the add if it is not an add of a
2522 // value and a 16-bit signed constant and both have a single use.
2524 if (N.getOpcode() == ISD::ADD &&
2525 (!isIntS16Immediate(N.getOperand(1), imm) ||
2526 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2527 Base = N.getOperand(0);
2528 Index = N.getOperand(1);
2532 // Otherwise, do it the hard way, using R0 as the base register.
2533 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2539 /// Returns true if we should use a direct load into vector instruction
2540 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2541 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2543 // If there are any other uses other than scalar to vector, then we should
2544 // keep it as a scalar load -> direct move pattern to prevent multiple
2546 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2550 EVT MemVT = LD->getMemoryVT();
2551 if (!MemVT.isSimple())
2553 switch(MemVT.getSimpleVT().SimpleTy) {
2557 if (!ST.hasP8Vector())
2562 if (!ST.hasP9Vector())
2569 SDValue LoadedVal(N, 0);
2570 if (!LoadedVal.hasOneUse())
2573 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2575 if (UI.getUse().get().getResNo() == 0 &&
2576 UI->getOpcode() != ISD::SCALAR_TO_VECTOR)
2582 /// getPreIndexedAddressParts - returns true by value, base pointer and
2583 /// offset pointer and addressing mode by reference if the node's address
2584 /// can be legally represented as pre-indexed load / store address.
2585 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2587 ISD::MemIndexedMode &AM,
2588 SelectionDAG &DAG) const {
2589 if (DisablePPCPreinc) return false;
2595 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2596 Ptr = LD->getBasePtr();
2597 VT = LD->getMemoryVT();
2598 Alignment = LD->getAlignment();
2599 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2600 Ptr = ST->getBasePtr();
2601 VT = ST->getMemoryVT();
2602 Alignment = ST->getAlignment();
2607 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2608 // instructions because we can fold these into a more efficient instruction
2609 // instead, (such as LXSD).
2610 if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2614 // PowerPC doesn't have preinc load/store instructions for vectors (except
2615 // for QPX, which does have preinc r+r forms).
2616 if (VT.isVector()) {
2617 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2619 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2625 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2626 // Common code will reject creating a pre-inc form if the base pointer
2627 // is a frame index, or if N is a store and the base pointer is either
2628 // the same as or a predecessor of the value being stored. Check for
2629 // those situations here, and try with swapped Base/Offset instead.
2632 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2635 SDValue Val = cast<StoreSDNode>(N)->getValue();
2636 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2641 std::swap(Base, Offset);
2647 // LDU/STU can only handle immediates that are a multiple of 4.
2648 if (VT != MVT::i64) {
2649 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2652 // LDU/STU need an address with at least 4-byte alignment.
2656 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2660 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2661 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
2662 // sext i32 to i64 when addr mode is r+i.
2663 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2664 LD->getExtensionType() == ISD::SEXTLOAD &&
2665 isa<ConstantSDNode>(Offset))
2673 //===----------------------------------------------------------------------===//
2674 // LowerOperation implementation
2675 //===----------------------------------------------------------------------===//
2677 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2678 /// and LoOpFlags to the target MO flags.
2679 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2680 unsigned &HiOpFlags, unsigned &LoOpFlags,
2681 const GlobalValue *GV = nullptr) {
2682 HiOpFlags = PPCII::MO_HA;
2683 LoOpFlags = PPCII::MO_LO;
2685 // Don't use the pic base if not in PIC relocation model.
2687 HiOpFlags |= PPCII::MO_PIC_FLAG;
2688 LoOpFlags |= PPCII::MO_PIC_FLAG;
2691 // If this is a reference to a global value that requires a non-lazy-ptr, make
2692 // sure that instruction lowering adds it.
2693 if (GV && Subtarget.hasLazyResolverStub(GV)) {
2694 HiOpFlags |= PPCII::MO_NLP_FLAG;
2695 LoOpFlags |= PPCII::MO_NLP_FLAG;
2697 if (GV->hasHiddenVisibility()) {
2698 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2699 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2704 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2705 SelectionDAG &DAG) {
2707 EVT PtrVT = HiPart.getValueType();
2708 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2710 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2711 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2713 // With PIC, the first instruction is actually "GR+hi(&G)".
2715 Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2716 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2718 // Generate non-pic code that has direct accesses to the constant pool.
2719 // The address of the global is just (hi(&g)+lo(&g)).
2720 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2723 static void setUsesTOCBasePtr(MachineFunction &MF) {
2724 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2725 FuncInfo->setUsesTOCBasePtr();
2728 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2729 setUsesTOCBasePtr(DAG.getMachineFunction());
2732 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2734 const bool Is64Bit = Subtarget.isPPC64();
2735 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2736 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2737 : Subtarget.isAIXABI()
2738 ? DAG.getRegister(PPC::R2, VT)
2739 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2740 SDValue Ops[] = { GA, Reg };
2741 return DAG.getMemIntrinsicNode(
2742 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2743 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0,
2744 MachineMemOperand::MOLoad);
2747 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2748 SelectionDAG &DAG) const {
2749 EVT PtrVT = Op.getValueType();
2750 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2751 const Constant *C = CP->getConstVal();
2753 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2754 // The actual address of the GlobalValue is stored in the TOC.
2755 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2756 setUsesTOCBasePtr(DAG);
2757 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2758 return getTOCEntry(DAG, SDLoc(CP), GA);
2761 unsigned MOHiFlag, MOLoFlag;
2762 bool IsPIC = isPositionIndependent();
2763 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2765 if (IsPIC && Subtarget.isSVR4ABI()) {
2766 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2767 PPCII::MO_PIC_FLAG);
2768 return getTOCEntry(DAG, SDLoc(CP), GA);
2772 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2774 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2775 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2778 // For 64-bit PowerPC, prefer the more compact relative encodings.
2779 // This trades 32 bits per jump table entry for one or two instructions
2780 // on the jump site.
2781 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2782 if (isJumpTableRelative())
2783 return MachineJumpTableInfo::EK_LabelDifference32;
2785 return TargetLowering::getJumpTableEncoding();
2788 bool PPCTargetLowering::isJumpTableRelative() const {
2789 if (UseAbsoluteJumpTables)
2791 if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2793 return TargetLowering::isJumpTableRelative();
2796 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2797 SelectionDAG &DAG) const {
2798 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2799 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2801 switch (getTargetMachine().getCodeModel()) {
2802 case CodeModel::Small:
2803 case CodeModel::Medium:
2804 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2806 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2807 getPointerTy(DAG.getDataLayout()));
2812 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2814 MCContext &Ctx) const {
2815 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2816 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2818 switch (getTargetMachine().getCodeModel()) {
2819 case CodeModel::Small:
2820 case CodeModel::Medium:
2821 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2823 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2827 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2828 EVT PtrVT = Op.getValueType();
2829 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2831 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2832 // The actual address of the GlobalValue is stored in the TOC.
2833 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2834 setUsesTOCBasePtr(DAG);
2835 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2836 return getTOCEntry(DAG, SDLoc(JT), GA);
2839 unsigned MOHiFlag, MOLoFlag;
2840 bool IsPIC = isPositionIndependent();
2841 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2843 if (IsPIC && Subtarget.isSVR4ABI()) {
2844 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2845 PPCII::MO_PIC_FLAG);
2846 return getTOCEntry(DAG, SDLoc(GA), GA);
2849 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2850 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2851 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2854 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2855 SelectionDAG &DAG) const {
2856 EVT PtrVT = Op.getValueType();
2857 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2858 const BlockAddress *BA = BASDN->getBlockAddress();
2860 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2861 // The actual BlockAddress is stored in the TOC.
2862 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2863 setUsesTOCBasePtr(DAG);
2864 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2865 return getTOCEntry(DAG, SDLoc(BASDN), GA);
2868 // 32-bit position-independent ELF stores the BlockAddress in the .got.
2869 if (Subtarget.is32BitELFABI() && isPositionIndependent())
2872 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
2874 unsigned MOHiFlag, MOLoFlag;
2875 bool IsPIC = isPositionIndependent();
2876 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2877 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2878 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2879 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2882 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2883 SelectionDAG &DAG) const {
2884 // FIXME: TLS addresses currently use medium model code sequences,
2885 // which is the most useful form. Eventually support for small and
2886 // large models could be added if users need it, at the cost of
2887 // additional complexity.
2888 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2889 if (DAG.getTarget().useEmulatedTLS())
2890 return LowerToTLSEmulatedModel(GA, DAG);
2893 const GlobalValue *GV = GA->getGlobal();
2894 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2895 bool is64bit = Subtarget.isPPC64();
2896 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2897 PICLevel::Level picLevel = M->getPICLevel();
2899 const TargetMachine &TM = getTargetMachine();
2900 TLSModel::Model Model = TM.getTLSModel(GV);
2902 if (Model == TLSModel::LocalExec) {
2903 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2904 PPCII::MO_TPREL_HA);
2905 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2906 PPCII::MO_TPREL_LO);
2907 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2908 : DAG.getRegister(PPC::R2, MVT::i32);
2910 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2911 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2914 if (Model == TLSModel::InitialExec) {
2915 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2916 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2920 setUsesTOCBasePtr(DAG);
2921 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2922 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2923 PtrVT, GOTReg, TGA);
2925 if (!TM.isPositionIndependent())
2926 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2927 else if (picLevel == PICLevel::SmallPIC)
2928 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2930 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2932 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2933 PtrVT, TGA, GOTPtr);
2934 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2937 if (Model == TLSModel::GeneralDynamic) {
2938 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2941 setUsesTOCBasePtr(DAG);
2942 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2943 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2946 if (picLevel == PICLevel::SmallPIC)
2947 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2949 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2951 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2955 if (Model == TLSModel::LocalDynamic) {
2956 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2959 setUsesTOCBasePtr(DAG);
2960 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2961 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2964 if (picLevel == PICLevel::SmallPIC)
2965 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2967 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2969 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2970 PtrVT, GOTPtr, TGA, TGA);
2971 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2972 PtrVT, TLSAddr, TGA);
2973 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2976 llvm_unreachable("Unknown TLS model!");
2979 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2980 SelectionDAG &DAG) const {
2981 EVT PtrVT = Op.getValueType();
2982 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
2984 const GlobalValue *GV = GSDN->getGlobal();
2986 // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
2987 // The actual address of the GlobalValue is stored in the TOC.
2988 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2989 setUsesTOCBasePtr(DAG);
2990 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
2991 return getTOCEntry(DAG, DL, GA);
2994 unsigned MOHiFlag, MOLoFlag;
2995 bool IsPIC = isPositionIndependent();
2996 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
2998 if (IsPIC && Subtarget.isSVR4ABI()) {
2999 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3001 PPCII::MO_PIC_FLAG);
3002 return getTOCEntry(DAG, DL, GA);
3006 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3008 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3010 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3012 // If the global reference is actually to a non-lazy-pointer, we have to do an
3013 // extra load to get the address of the global.
3014 if (MOHiFlag & PPCII::MO_NLP_FLAG)
3015 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3019 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3020 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3023 if (Op.getValueType() == MVT::v2i64) {
3024 // When the operands themselves are v2i64 values, we need to do something
3025 // special because VSX has no underlying comparison operations for these.
3026 if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3027 // Equality can be handled by casting to the legal type for Altivec
3028 // comparisons, everything else needs to be expanded.
3029 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3030 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3031 DAG.getSetCC(dl, MVT::v4i32,
3032 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3033 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3040 // We handle most of these in the usual way.
3044 // If we're comparing for equality to zero, expose the fact that this is
3045 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3046 // fold the new nodes.
3047 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3050 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3051 // Leave comparisons against 0 and -1 alone for now, since they're usually
3052 // optimized. FIXME: revisit this when we can custom lower all setcc
3054 if (C->isAllOnesValue() || C->isNullValue())
3058 // If we have an integer seteq/setne, turn it into a compare against zero
3059 // by xor'ing the rhs with the lhs, which is faster than setting a
3060 // condition register, reading it back out, and masking the correct bit. The
3061 // normal approach here uses sub to do this instead of xor. Using xor exposes
3062 // the result to other bit-twiddling opportunities.
3063 EVT LHSVT = Op.getOperand(0).getValueType();
3064 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3065 EVT VT = Op.getValueType();
3066 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3068 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3073 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3074 SDNode *Node = Op.getNode();
3075 EVT VT = Node->getValueType(0);
3076 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3077 SDValue InChain = Node->getOperand(0);
3078 SDValue VAListPtr = Node->getOperand(1);
3079 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3082 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3085 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3086 VAListPtr, MachinePointerInfo(SV), MVT::i8);
3087 InChain = GprIndex.getValue(1);
3089 if (VT == MVT::i64) {
3090 // Check if GprIndex is even
3091 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3092 DAG.getConstant(1, dl, MVT::i32));
3093 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3094 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3095 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3096 DAG.getConstant(1, dl, MVT::i32));
3097 // Align GprIndex to be even if it isn't
3098 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3102 // fpr index is 1 byte after gpr
3103 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3104 DAG.getConstant(1, dl, MVT::i32));
3107 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3108 FprPtr, MachinePointerInfo(SV), MVT::i8);
3109 InChain = FprIndex.getValue(1);
3111 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3112 DAG.getConstant(8, dl, MVT::i32));
3114 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3115 DAG.getConstant(4, dl, MVT::i32));
3118 SDValue OverflowArea =
3119 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3120 InChain = OverflowArea.getValue(1);
3122 SDValue RegSaveArea =
3123 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3124 InChain = RegSaveArea.getValue(1);
3126 // select overflow_area if index > 8
3127 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3128 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3130 // adjustment constant gpr_index * 4/8
3131 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3132 VT.isInteger() ? GprIndex : FprIndex,
3133 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3136 // OurReg = RegSaveArea + RegConstant
3137 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3140 // Floating types are 32 bytes into RegSaveArea
3141 if (VT.isFloatingPoint())
3142 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3143 DAG.getConstant(32, dl, MVT::i32));
3145 // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3146 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3147 VT.isInteger() ? GprIndex : FprIndex,
3148 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3151 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3152 VT.isInteger() ? VAListPtr : FprPtr,
3153 MachinePointerInfo(SV), MVT::i8);
3155 // determine if we should load from reg_save_area or overflow_area
3156 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3158 // increase overflow_area by 4/8 if gpr/fpr > 8
3159 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3160 DAG.getConstant(VT.isInteger() ? 4 : 8,
3163 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3166 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3167 MachinePointerInfo(), MVT::i32);
3169 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3172 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3173 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3175 // We have to copy the entire va_list struct:
3176 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3177 return DAG.getMemcpy(Op.getOperand(0), Op,
3178 Op.getOperand(1), Op.getOperand(2),
3179 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
3180 false, MachinePointerInfo(), MachinePointerInfo());
3183 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3184 SelectionDAG &DAG) const {
3185 if (Subtarget.isAIXABI())
3186 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3188 return Op.getOperand(0);
3191 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3192 SelectionDAG &DAG) const {
3193 if (Subtarget.isAIXABI())
3194 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3196 SDValue Chain = Op.getOperand(0);
3197 SDValue Trmp = Op.getOperand(1); // trampoline
3198 SDValue FPtr = Op.getOperand(2); // nested function
3199 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3202 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3203 bool isPPC64 = (PtrVT == MVT::i64);
3204 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3206 TargetLowering::ArgListTy Args;
3207 TargetLowering::ArgListEntry Entry;
3209 Entry.Ty = IntPtrTy;
3210 Entry.Node = Trmp; Args.push_back(Entry);
3212 // TrampSize == (isPPC64 ? 48 : 40);
3213 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3214 isPPC64 ? MVT::i64 : MVT::i32);
3215 Args.push_back(Entry);
3217 Entry.Node = FPtr; Args.push_back(Entry);
3218 Entry.Node = Nest; Args.push_back(Entry);
3220 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3221 TargetLowering::CallLoweringInfo CLI(DAG);
3222 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3223 CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3224 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3226 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3227 return CallResult.second;
3230 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3231 MachineFunction &MF = DAG.getMachineFunction();
3232 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3233 EVT PtrVT = getPointerTy(MF.getDataLayout());
3237 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
3238 // vastart just stores the address of the VarArgsFrameIndex slot into the
3239 // memory location argument.
3240 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3241 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3242 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3243 MachinePointerInfo(SV));
3246 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3247 // We suppose the given va_list is already allocated.
3250 // char gpr; /* index into the array of 8 GPRs
3251 // * stored in the register save area
3252 // * gpr=0 corresponds to r3,
3253 // * gpr=1 to r4, etc.
3255 // char fpr; /* index into the array of 8 FPRs
3256 // * stored in the register save area
3257 // * fpr=0 corresponds to f1,
3258 // * fpr=1 to f2, etc.
3260 // char *overflow_arg_area;
3261 // /* location on stack that holds
3262 // * the next overflow argument
3264 // char *reg_save_area;
3265 // /* where r3:r10 and f1:f8 (if saved)
3270 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3271 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3272 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3274 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3277 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3278 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3280 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3281 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3283 uint64_t FPROffset = 1;
3284 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3286 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3288 // Store first byte : number of int regs
3289 SDValue firstStore =
3290 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3291 MachinePointerInfo(SV), MVT::i8);
3292 uint64_t nextOffset = FPROffset;
3293 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3296 // Store second byte : number of float regs
3297 SDValue secondStore =
3298 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3299 MachinePointerInfo(SV, nextOffset), MVT::i8);
3300 nextOffset += StackOffset;
3301 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3303 // Store second word : arguments given on stack
3304 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3305 MachinePointerInfo(SV, nextOffset));
3306 nextOffset += FrameOffset;
3307 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3309 // Store third word : arguments given in registers
3310 return DAG.getStore(thirdStore, dl, FR, nextPtr,
3311 MachinePointerInfo(SV, nextOffset));
3314 /// FPR - The set of FP registers that should be allocated for arguments
3315 /// on Darwin and AIX.
3316 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3317 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3318 PPC::F11, PPC::F12, PPC::F13};
3320 /// QFPR - The set of QPX registers that should be allocated for arguments.
3321 static const MCPhysReg QFPR[] = {
3322 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
3323 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3325 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3327 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3328 unsigned PtrByteSize) {
3329 unsigned ArgSize = ArgVT.getStoreSize();
3330 if (Flags.isByVal())
3331 ArgSize = Flags.getByValSize();
3333 // Round up to multiples of the pointer size, except for array members,
3334 // which are always packed.
3335 if (!Flags.isInConsecutiveRegs())
3336 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3341 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3343 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3344 ISD::ArgFlagsTy Flags,
3345 unsigned PtrByteSize) {
3346 unsigned Align = PtrByteSize;
3348 // Altivec parameters are padded to a 16 byte boundary.
3349 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3350 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3351 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3352 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3354 // QPX vector types stored in double-precision are padded to a 32 byte
3356 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3359 // ByVal parameters are aligned as requested.
3360 if (Flags.isByVal()) {
3361 unsigned BVAlign = Flags.getByValAlign();
3362 if (BVAlign > PtrByteSize) {
3363 if (BVAlign % PtrByteSize != 0)
3365 "ByVal alignment is not a multiple of the pointer size");
3371 // Array members are always packed to their original alignment.
3372 if (Flags.isInConsecutiveRegs()) {
3373 // If the array member was split into multiple registers, the first
3374 // needs to be aligned to the size of the full type. (Except for
3375 // ppcf128, which is only aligned as its f64 components.)
3376 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3377 Align = OrigVT.getStoreSize();
3379 Align = ArgVT.getStoreSize();
3385 /// CalculateStackSlotUsed - Return whether this argument will use its
3386 /// stack slot (instead of being passed in registers). ArgOffset,
3387 /// AvailableFPRs, and AvailableVRs must hold the current argument
3388 /// position, and will be updated to account for this argument.
3389 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3390 ISD::ArgFlagsTy Flags,
3391 unsigned PtrByteSize,
3392 unsigned LinkageSize,
3393 unsigned ParamAreaSize,
3394 unsigned &ArgOffset,
3395 unsigned &AvailableFPRs,
3396 unsigned &AvailableVRs, bool HasQPX) {
3397 bool UseMemory = false;
3399 // Respect alignment of argument on the stack.
3401 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3402 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3403 // If there's no space left in the argument save area, we must
3404 // use memory (this check also catches zero-sized arguments).
3405 if (ArgOffset >= LinkageSize + ParamAreaSize)
3408 // Allocate argument on the stack.
3409 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3410 if (Flags.isInConsecutiveRegsLast())
3411 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3412 // If we overran the argument save area, we must use memory
3413 // (this check catches arguments passed partially in memory)
3414 if (ArgOffset > LinkageSize + ParamAreaSize)
3417 // However, if the argument is actually passed in an FPR or a VR,
3418 // we don't use memory after all.
3419 if (!Flags.isByVal()) {
3420 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3421 // QPX registers overlap with the scalar FP registers.
3422 (HasQPX && (ArgVT == MVT::v4f32 ||
3423 ArgVT == MVT::v4f64 ||
3424 ArgVT == MVT::v4i1)))
3425 if (AvailableFPRs > 0) {
3429 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3430 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3431 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3432 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3433 if (AvailableVRs > 0) {
3442 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3443 /// ensure minimum alignment required for target.
3444 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3445 unsigned NumBytes) {
3446 unsigned TargetAlign = Lowering->getStackAlignment();
3447 unsigned AlignMask = TargetAlign - 1;
3448 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3452 SDValue PPCTargetLowering::LowerFormalArguments(
3453 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3454 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3455 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3456 if (Subtarget.isAIXABI())
3457 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3459 if (Subtarget.is64BitELFABI())
3460 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3462 if (Subtarget.is32BitELFABI())
3463 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3466 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3470 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3471 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3472 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3473 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3475 // 32-bit SVR4 ABI Stack Frame Layout:
3476 // +-----------------------------------+
3477 // +--> | Back chain |
3478 // | +-----------------------------------+
3479 // | | Floating-point register save area |
3480 // | +-----------------------------------+
3481 // | | General register save area |
3482 // | +-----------------------------------+
3483 // | | CR save word |
3484 // | +-----------------------------------+
3485 // | | VRSAVE save word |
3486 // | +-----------------------------------+
3487 // | | Alignment padding |
3488 // | +-----------------------------------+
3489 // | | Vector register save area |
3490 // | +-----------------------------------+
3491 // | | Local variable space |
3492 // | +-----------------------------------+
3493 // | | Parameter list area |
3494 // | +-----------------------------------+
3495 // | | LR save word |
3496 // | +-----------------------------------+
3497 // SP--> +--- | Back chain |
3498 // +-----------------------------------+
3501 // System V Application Binary Interface PowerPC Processor Supplement
3502 // AltiVec Technology Programming Interface Manual
3504 MachineFunction &MF = DAG.getMachineFunction();
3505 MachineFrameInfo &MFI = MF.getFrameInfo();
3506 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3508 EVT PtrVT = getPointerTy(MF.getDataLayout());
3509 // Potential tail calls could cause overwriting of argument stack slots.
3510 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3511 (CallConv == CallingConv::Fast));
3512 unsigned PtrByteSize = 4;
3514 // Assign locations to all of the incoming arguments.
3515 SmallVector<CCValAssign, 16> ArgLocs;
3516 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3519 // Reserve space for the linkage area on the stack.
3520 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3521 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3523 CCInfo.PreAnalyzeFormalArguments(Ins);
3525 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3526 CCInfo.clearWasPPCF128();
3528 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3529 CCValAssign &VA = ArgLocs[i];
3531 // Arguments stored in registers.
3532 if (VA.isRegLoc()) {
3533 const TargetRegisterClass *RC;
3534 EVT ValVT = VA.getValVT();
3536 switch (ValVT.getSimpleVT().SimpleTy) {
3538 llvm_unreachable("ValVT not supported by formal arguments Lowering");
3541 RC = &PPC::GPRCRegClass;
3544 if (Subtarget.hasP8Vector())
3545 RC = &PPC::VSSRCRegClass;
3546 else if (Subtarget.hasSPE())
3547 RC = &PPC::GPRCRegClass;
3549 RC = &PPC::F4RCRegClass;
3552 if (Subtarget.hasVSX())
3553 RC = &PPC::VSFRCRegClass;
3554 else if (Subtarget.hasSPE())
3555 // SPE passes doubles in GPR pairs.
3556 RC = &PPC::GPRCRegClass;
3558 RC = &PPC::F8RCRegClass;
3563 RC = &PPC::VRRCRegClass;
3566 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3570 RC = &PPC::VRRCRegClass;
3573 RC = &PPC::QFRCRegClass;
3576 RC = &PPC::QBRCRegClass;
3581 // Transform the arguments stored in physical registers into
3583 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3584 assert(i + 1 < e && "No second half of double precision argument");
3585 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3586 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3587 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3588 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3589 if (!Subtarget.isLittleEndian())
3590 std::swap (ArgValueLo, ArgValueHi);
3591 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3594 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3595 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3596 ValVT == MVT::i1 ? MVT::i32 : ValVT);
3597 if (ValVT == MVT::i1)
3598 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3601 InVals.push_back(ArgValue);
3603 // Argument stored in memory.
3604 assert(VA.isMemLoc());
3606 // Get the extended size of the argument type in stack
3607 unsigned ArgSize = VA.getLocVT().getStoreSize();
3608 // Get the actual size of the argument type
3609 unsigned ObjSize = VA.getValVT().getStoreSize();
3610 unsigned ArgOffset = VA.getLocMemOffset();
3611 // Stack objects in PPC32 are right justified.
3612 ArgOffset += ArgSize - ObjSize;
3613 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3615 // Create load nodes to retrieve arguments from the stack.
3616 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3618 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3622 // Assign locations to all of the incoming aggregate by value arguments.
3623 // Aggregates passed by value are stored in the local variable space of the
3624 // caller's stack frame, right above the parameter list area.
3625 SmallVector<CCValAssign, 16> ByValArgLocs;
3626 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3627 ByValArgLocs, *DAG.getContext());
3629 // Reserve stack space for the allocations in CCInfo.
3630 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3632 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3634 // Area that is at least reserved in the caller of this function.
3635 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3636 MinReservedArea = std::max(MinReservedArea, LinkageSize);
3638 // Set the size that is at least reserved in caller of this function. Tail
3639 // call optimized function's reserved stack space needs to be aligned so that
3640 // taking the difference between two stack areas will result in an aligned
3643 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3644 FuncInfo->setMinReservedArea(MinReservedArea);
3646 SmallVector<SDValue, 8> MemOps;
3648 // If the function takes variable number of arguments, make a frame index for
3649 // the start of the first vararg value... for expansion of llvm.va_start.
3651 static const MCPhysReg GPArgRegs[] = {
3652 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3653 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3655 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3657 static const MCPhysReg FPArgRegs[] = {
3658 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3661 unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3663 if (useSoftFloat() || hasSPE())
3666 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3667 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3669 // Make room for NumGPArgRegs and NumFPArgRegs.
3670 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3671 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3673 FuncInfo->setVarArgsStackOffset(
3674 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3675 CCInfo.getNextStackOffset(), true));
3677 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3678 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3680 // The fixed integer arguments of a variadic function are stored to the
3681 // VarArgsFrameIndex on the stack so that they may be loaded by
3682 // dereferencing the result of va_next.
3683 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3684 // Get an existing live-in vreg, or add a new one.
3685 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3687 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3689 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3691 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3692 MemOps.push_back(Store);
3693 // Increment the address by four for the next argument to store
3694 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3695 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3698 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3700 // The double arguments are stored to the VarArgsFrameIndex
3702 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3703 // Get an existing live-in vreg, or add a new one.
3704 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3706 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3708 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3710 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3711 MemOps.push_back(Store);
3712 // Increment the address by eight for the next argument to store
3713 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3715 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3719 if (!MemOps.empty())
3720 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3725 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3726 // value to MVT::i64 and then truncate to the correct register size.
3727 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3728 EVT ObjectVT, SelectionDAG &DAG,
3730 const SDLoc &dl) const {
3732 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3733 DAG.getValueType(ObjectVT));
3734 else if (Flags.isZExt())
3735 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3736 DAG.getValueType(ObjectVT));
3738 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3741 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3742 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3743 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3744 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3745 // TODO: add description of PPC stack frame format, or at least some docs.
3747 bool isELFv2ABI = Subtarget.isELFv2ABI();
3748 bool isLittleEndian = Subtarget.isLittleEndian();
3749 MachineFunction &MF = DAG.getMachineFunction();
3750 MachineFrameInfo &MFI = MF.getFrameInfo();
3751 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3753 assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3754 "fastcc not supported on varargs functions");
3756 EVT PtrVT = getPointerTy(MF.getDataLayout());
3757 // Potential tail calls could cause overwriting of argument stack slots.
3758 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3759 (CallConv == CallingConv::Fast));
3760 unsigned PtrByteSize = 8;
3761 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3763 static const MCPhysReg GPR[] = {
3764 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3765 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3767 static const MCPhysReg VR[] = {
3768 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3769 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3772 const unsigned Num_GPR_Regs = array_lengthof(GPR);
3773 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3774 const unsigned Num_VR_Regs = array_lengthof(VR);
3775 const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3777 // Do a first pass over the arguments to determine whether the ABI
3778 // guarantees that our caller has allocated the parameter save area
3779 // on its stack frame. In the ELFv1 ABI, this is always the case;
3780 // in the ELFv2 ABI, it is true if this is a vararg function or if
3781 // any parameter is located in a stack slot.
3783 bool HasParameterArea = !isELFv2ABI || isVarArg;
3784 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3785 unsigned NumBytes = LinkageSize;
3786 unsigned AvailableFPRs = Num_FPR_Regs;
3787 unsigned AvailableVRs = Num_VR_Regs;
3788 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3789 if (Ins[i].Flags.isNest())
3792 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3793 PtrByteSize, LinkageSize, ParamAreaSize,
3794 NumBytes, AvailableFPRs, AvailableVRs,
3795 Subtarget.hasQPX()))
3796 HasParameterArea = true;
3799 // Add DAG nodes to load the arguments or copy them out of registers. On
3800 // entry to a function on PPC, the arguments start after the linkage area,
3801 // although the first ones are often in registers.
3803 unsigned ArgOffset = LinkageSize;
3804 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3805 unsigned &QFPR_idx = FPR_idx;
3806 SmallVector<SDValue, 8> MemOps;
3807 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3808 unsigned CurArgIdx = 0;
3809 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3811 bool needsLoad = false;
3812 EVT ObjectVT = Ins[ArgNo].VT;
3813 EVT OrigVT = Ins[ArgNo].ArgVT;
3814 unsigned ObjSize = ObjectVT.getStoreSize();
3815 unsigned ArgSize = ObjSize;
3816 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3817 if (Ins[ArgNo].isOrigArg()) {
3818 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3819 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3821 // We re-align the argument offset for each argument, except when using the
3822 // fast calling convention, when we need to make sure we do that only when
3823 // we'll actually use a stack slot.
3824 unsigned CurArgOffset, Align;
3825 auto ComputeArgOffset = [&]() {
3826 /* Respect alignment of argument on the stack. */
3827 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3828 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3829 CurArgOffset = ArgOffset;
3832 if (CallConv != CallingConv::Fast) {
3835 /* Compute GPR index associated with argument offset. */
3836 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3837 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3840 // FIXME the codegen can be much improved in some cases.
3841 // We do not have to keep everything in memory.
3842 if (Flags.isByVal()) {
3843 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3845 if (CallConv == CallingConv::Fast)
3848 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3849 ObjSize = Flags.getByValSize();
3850 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3851 // Empty aggregate parameters do not take up registers. Examples:
3855 // etc. However, we have to provide a place-holder in InVals, so
3856 // pretend we have an 8-byte item at the current address for that
3859 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3860 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3861 InVals.push_back(FIN);
3865 // Create a stack object covering all stack doublewords occupied
3866 // by the argument. If the argument is (fully or partially) on
3867 // the stack, or if the argument is fully in registers but the
3868 // caller has allocated the parameter save anyway, we can refer
3869 // directly to the caller's stack frame. Otherwise, create a
3870 // local copy in our own frame.
3872 if (HasParameterArea ||
3873 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3874 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3876 FI = MFI.CreateStackObject(ArgSize, Align, false);
3877 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3879 // Handle aggregates smaller than 8 bytes.
3880 if (ObjSize < PtrByteSize) {
3881 // The value of the object is its address, which differs from the
3882 // address of the enclosing doubleword on big-endian systems.
3884 if (!isLittleEndian) {
3885 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3886 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3888 InVals.push_back(Arg);
3890 if (GPR_idx != Num_GPR_Regs) {
3891 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3892 FuncInfo->addLiveInAttr(VReg, Flags);
3893 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3896 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3897 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3898 (ObjSize == 2 ? MVT::i16 : MVT::i32));
3899 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3900 MachinePointerInfo(&*FuncArg), ObjType);
3902 // For sizes that don't fit a truncating store (3, 5, 6, 7),
3903 // store the whole register as-is to the parameter save area
3905 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3906 MachinePointerInfo(&*FuncArg));
3909 MemOps.push_back(Store);
3911 // Whether we copied from a register or not, advance the offset
3912 // into the parameter save area by a full doubleword.
3913 ArgOffset += PtrByteSize;
3917 // The value of the object is its address, which is the address of
3918 // its first stack doubleword.
3919 InVals.push_back(FIN);
3921 // Store whatever pieces of the object are in registers to memory.
3922 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3923 if (GPR_idx == Num_GPR_Regs)
3926 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3927 FuncInfo->addLiveInAttr(VReg, Flags);
3928 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3931 SDValue Off = DAG.getConstant(j, dl, PtrVT);
3932 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3934 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3935 MachinePointerInfo(&*FuncArg, j));
3936 MemOps.push_back(Store);
3939 ArgOffset += ArgSize;
3943 switch (ObjectVT.getSimpleVT().SimpleTy) {
3944 default: llvm_unreachable("Unhandled argument type!");
3948 if (Flags.isNest()) {
3949 // The 'nest' parameter, if any, is passed in R11.
3950 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3951 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3953 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3954 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3959 // These can be scalar arguments or elements of an integer array type
3960 // passed directly. Clang may use those instead of "byval" aggregate
3961 // types to avoid forcing arguments to memory unnecessarily.
3962 if (GPR_idx != Num_GPR_Regs) {
3963 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3964 FuncInfo->addLiveInAttr(VReg, Flags);
3965 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3967 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3968 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3969 // value to MVT::i64 and then truncate to the correct register size.
3970 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3972 if (CallConv == CallingConv::Fast)
3976 ArgSize = PtrByteSize;
3978 if (CallConv != CallingConv::Fast || needsLoad)
3984 // These can be scalar arguments or elements of a float array type
3985 // passed directly. The latter are used to implement ELFv2 homogenous
3986 // float aggregates.
3987 if (FPR_idx != Num_FPR_Regs) {
3990 if (ObjectVT == MVT::f32)
3991 VReg = MF.addLiveIn(FPR[FPR_idx],
3992 Subtarget.hasP8Vector()
3993 ? &PPC::VSSRCRegClass
3994 : &PPC::F4RCRegClass);
3996 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
3997 ? &PPC::VSFRCRegClass
3998 : &PPC::F8RCRegClass);
4000 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4002 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4003 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4004 // once we support fp <-> gpr moves.
4006 // This can only ever happen in the presence of f32 array types,
4007 // since otherwise we never run out of FPRs before running out
4009 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4010 FuncInfo->addLiveInAttr(VReg, Flags);
4011 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4013 if (ObjectVT == MVT::f32) {
4014 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4015 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4016 DAG.getConstant(32, dl, MVT::i32));
4017 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4020 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4022 if (CallConv == CallingConv::Fast)
4028 // When passing an array of floats, the array occupies consecutive
4029 // space in the argument area; only round up to the next doubleword
4030 // at the end of the array. Otherwise, each float takes 8 bytes.
4031 if (CallConv != CallingConv::Fast || needsLoad) {
4032 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4033 ArgOffset += ArgSize;
4034 if (Flags.isInConsecutiveRegsLast())
4035 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4046 if (!Subtarget.hasQPX()) {
4047 // These can be scalar arguments or elements of a vector array type
4048 // passed directly. The latter are used to implement ELFv2 homogenous
4049 // vector aggregates.
4050 if (VR_idx != Num_VR_Regs) {
4051 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4052 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4055 if (CallConv == CallingConv::Fast)
4059 if (CallConv != CallingConv::Fast || needsLoad)
4064 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4065 "Invalid QPX parameter type");
4070 // QPX vectors are treated like their scalar floating-point subregisters
4071 // (except that they're larger).
4072 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4073 if (QFPR_idx != Num_QFPR_Regs) {
4074 const TargetRegisterClass *RC;
4075 switch (ObjectVT.getSimpleVT().SimpleTy) {
4076 case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4077 case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4078 default: RC = &PPC::QBRCRegClass; break;
4081 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4082 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4085 if (CallConv == CallingConv::Fast)
4089 if (CallConv != CallingConv::Fast || needsLoad)
4094 // We need to load the argument to a virtual register if we determined
4095 // above that we ran out of physical registers of the appropriate type.
4097 if (ObjSize < ArgSize && !isLittleEndian)
4098 CurArgOffset += ArgSize - ObjSize;
4099 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4100 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4101 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4104 InVals.push_back(ArgVal);
4107 // Area that is at least reserved in the caller of this function.
4108 unsigned MinReservedArea;
4109 if (HasParameterArea)
4110 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4112 MinReservedArea = LinkageSize;
4114 // Set the size that is at least reserved in caller of this function. Tail
4115 // call optimized functions' reserved stack space needs to be aligned so that
4116 // taking the difference between two stack areas will result in an aligned
4119 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4120 FuncInfo->setMinReservedArea(MinReservedArea);
4122 // If the function takes variable number of arguments, make a frame index for
4123 // the start of the first vararg value... for expansion of llvm.va_start.
4125 int Depth = ArgOffset;
4127 FuncInfo->setVarArgsFrameIndex(
4128 MFI.CreateFixedObject(PtrByteSize, Depth, true));
4129 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4131 // If this function is vararg, store any remaining integer argument regs
4132 // to their spots on the stack so that they may be loaded by dereferencing
4133 // the result of va_next.
4134 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4135 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4136 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4137 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4139 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4140 MemOps.push_back(Store);
4141 // Increment the address by four for the next argument to store
4142 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4143 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4147 if (!MemOps.empty())
4148 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4153 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4154 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4155 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4156 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4157 // TODO: add description of PPC stack frame format, or at least some docs.
4159 MachineFunction &MF = DAG.getMachineFunction();
4160 MachineFrameInfo &MFI = MF.getFrameInfo();
4161 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4163 EVT PtrVT = getPointerTy(MF.getDataLayout());
4164 bool isPPC64 = PtrVT == MVT::i64;
4165 // Potential tail calls could cause overwriting of argument stack slots.
4166 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4167 (CallConv == CallingConv::Fast));
4168 unsigned PtrByteSize = isPPC64 ? 8 : 4;
4169 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4170 unsigned ArgOffset = LinkageSize;
4171 // Area that is at least reserved in caller of this function.
4172 unsigned MinReservedArea = ArgOffset;
4174 static const MCPhysReg GPR_32[] = { // 32-bit registers.
4175 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4176 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4178 static const MCPhysReg GPR_64[] = { // 64-bit registers.
4179 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4180 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4182 static const MCPhysReg VR[] = {
4183 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4184 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4187 const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4188 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4189 const unsigned Num_VR_Regs = array_lengthof( VR);
4191 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4193 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4195 // In 32-bit non-varargs functions, the stack space for vectors is after the
4196 // stack space for non-vectors. We do not use this space unless we have
4197 // too many vectors to fit in registers, something that only occurs in
4198 // constructed examples:), but we have to walk the arglist to figure
4199 // that out...for the pathological case, compute VecArgOffset as the
4200 // start of the vector parameter area. Computing VecArgOffset is the
4201 // entire point of the following loop.
4202 unsigned VecArgOffset = ArgOffset;
4203 if (!isVarArg && !isPPC64) {
4204 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4206 EVT ObjectVT = Ins[ArgNo].VT;
4207 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4209 if (Flags.isByVal()) {
4210 // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4211 unsigned ObjSize = Flags.getByValSize();
4213 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4214 VecArgOffset += ArgSize;
4218 switch(ObjectVT.getSimpleVT().SimpleTy) {
4219 default: llvm_unreachable("Unhandled argument type!");
4225 case MVT::i64: // PPC64
4227 // FIXME: We are guaranteed to be !isPPC64 at this point.
4228 // Does MVT::i64 apply?
4235 // Nothing to do, we're only looking at Nonvector args here.
4240 // We've found where the vector parameter area in memory is. Skip the
4241 // first 12 parameters; these don't use that memory.
4242 VecArgOffset = ((VecArgOffset+15)/16)*16;
4243 VecArgOffset += 12*16;
4245 // Add DAG nodes to load the arguments or copy them out of registers. On
4246 // entry to a function on PPC, the arguments start after the linkage area,
4247 // although the first ones are often in registers.
4249 SmallVector<SDValue, 8> MemOps;
4250 unsigned nAltivecParamsAtEnd = 0;
4251 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4252 unsigned CurArgIdx = 0;
4253 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4255 bool needsLoad = false;
4256 EVT ObjectVT = Ins[ArgNo].VT;
4257 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4258 unsigned ArgSize = ObjSize;
4259 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4260 if (Ins[ArgNo].isOrigArg()) {
4261 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4262 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4264 unsigned CurArgOffset = ArgOffset;
4266 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4267 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4268 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4269 if (isVarArg || isPPC64) {
4270 MinReservedArea = ((MinReservedArea+15)/16)*16;
4271 MinReservedArea += CalculateStackSlotSize(ObjectVT,
4274 } else nAltivecParamsAtEnd++;
4276 // Calculate min reserved area.
4277 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4281 // FIXME the codegen can be much improved in some cases.
4282 // We do not have to keep everything in memory.
4283 if (Flags.isByVal()) {
4284 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4286 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4287 ObjSize = Flags.getByValSize();
4288 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4289 // Objects of size 1 and 2 are right justified, everything else is
4290 // left justified. This means the memory address is adjusted forwards.
4291 if (ObjSize==1 || ObjSize==2) {
4292 CurArgOffset = CurArgOffset + (4 - ObjSize);
4294 // The value of the object is its address.
4295 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4296 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4297 InVals.push_back(FIN);
4298 if (ObjSize==1 || ObjSize==2) {
4299 if (GPR_idx != Num_GPR_Regs) {
4302 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4304 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4305 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4306 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4308 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4309 MachinePointerInfo(&*FuncArg), ObjType);
4310 MemOps.push_back(Store);
4314 ArgOffset += PtrByteSize;
4318 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4319 // Store whatever pieces of the object are in registers
4320 // to memory. ArgOffset will be the address of the beginning
4322 if (GPR_idx != Num_GPR_Regs) {
4325 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4327 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4328 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4329 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4330 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4331 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4332 MachinePointerInfo(&*FuncArg, j));
4333 MemOps.push_back(Store);
4335 ArgOffset += PtrByteSize;
4337 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4344 switch (ObjectVT.getSimpleVT().SimpleTy) {
4345 default: llvm_unreachable("Unhandled argument type!");
4349 if (GPR_idx != Num_GPR_Regs) {
4350 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4351 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4353 if (ObjectVT == MVT::i1)
4354 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4359 ArgSize = PtrByteSize;
4361 // All int arguments reserve stack space in the Darwin ABI.
4362 ArgOffset += PtrByteSize;
4366 case MVT::i64: // PPC64
4367 if (GPR_idx != Num_GPR_Regs) {
4368 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4369 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4371 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4372 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4373 // value to MVT::i64 and then truncate to the correct register size.
4374 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4379 ArgSize = PtrByteSize;
4381 // All int arguments reserve stack space in the Darwin ABI.
4387 // Every 4 bytes of argument space consumes one of the GPRs available for
4388 // argument passing.
4389 if (GPR_idx != Num_GPR_Regs) {
4391 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4394 if (FPR_idx != Num_FPR_Regs) {
4397 if (ObjectVT == MVT::f32)
4398 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4400 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4402 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4408 // All FP arguments reserve stack space in the Darwin ABI.
4409 ArgOffset += isPPC64 ? 8 : ObjSize;
4415 // Note that vector arguments in registers don't reserve stack space,
4416 // except in varargs functions.
4417 if (VR_idx != Num_VR_Regs) {
4418 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4419 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4421 while ((ArgOffset % 16) != 0) {
4422 ArgOffset += PtrByteSize;
4423 if (GPR_idx != Num_GPR_Regs)
4427 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4431 if (!isVarArg && !isPPC64) {
4432 // Vectors go after all the nonvectors.
4433 CurArgOffset = VecArgOffset;
4436 // Vectors are aligned.
4437 ArgOffset = ((ArgOffset+15)/16)*16;
4438 CurArgOffset = ArgOffset;
4446 // We need to load the argument to a virtual register if we determined above
4447 // that we ran out of physical registers of the appropriate type.
4449 int FI = MFI.CreateFixedObject(ObjSize,
4450 CurArgOffset + (ArgSize - ObjSize),
4452 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4453 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4456 InVals.push_back(ArgVal);
4459 // Allow for Altivec parameters at the end, if needed.
4460 if (nAltivecParamsAtEnd) {
4461 MinReservedArea = ((MinReservedArea+15)/16)*16;
4462 MinReservedArea += 16*nAltivecParamsAtEnd;
4465 // Area that is at least reserved in the caller of this function.
4466 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4468 // Set the size that is at least reserved in caller of this function. Tail
4469 // call optimized functions' reserved stack space needs to be aligned so that
4470 // taking the difference between two stack areas will result in an aligned
4473 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4474 FuncInfo->setMinReservedArea(MinReservedArea);
4476 // If the function takes variable number of arguments, make a frame index for
4477 // the start of the first vararg value... for expansion of llvm.va_start.
4479 int Depth = ArgOffset;
4481 FuncInfo->setVarArgsFrameIndex(
4482 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4484 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4486 // If this function is vararg, store any remaining integer argument regs
4487 // to their spots on the stack so that they may be loaded by dereferencing
4488 // the result of va_next.
4489 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4493 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4495 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4497 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4499 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4500 MemOps.push_back(Store);
4501 // Increment the address by four for the next argument to store
4502 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4503 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4507 if (!MemOps.empty())
4508 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4513 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4514 /// adjusted to accommodate the arguments for the tailcall.
4515 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4516 unsigned ParamSize) {
4518 if (!isTailCall) return 0;
4520 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4521 unsigned CallerMinReservedArea = FI->getMinReservedArea();
4522 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4523 // Remember only if the new adjustment is bigger.
4524 if (SPDiff < FI->getTailCallSPDelta())
4525 FI->setTailCallSPDelta(SPDiff);
4530 static bool isFunctionGlobalAddress(SDValue Callee);
4533 callsShareTOCBase(const Function *Caller, SDValue Callee,
4534 const TargetMachine &TM) {
4535 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4536 // don't have enough information to determine if the caller and calle share
4537 // the same TOC base, so we have to pessimistically assume they don't for
4539 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4543 const GlobalValue *GV = G->getGlobal();
4544 // The medium and large code models are expected to provide a sufficiently
4545 // large TOC to provide all data addressing needs of a module with a
4546 // single TOC. Since each module will be addressed with a single TOC then we
4547 // only need to check that caller and callee don't cross dso boundaries.
4548 if (CodeModel::Medium == TM.getCodeModel() ||
4549 CodeModel::Large == TM.getCodeModel())
4550 return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV);
4552 // Otherwise we need to ensure callee and caller are in the same section,
4553 // since the linker may allocate multiple TOCs, and we don't know which
4554 // sections will belong to the same TOC base.
4556 if (!GV->isStrongDefinitionForLinker())
4559 // Any explicitly-specified sections and section prefixes must also match.
4560 // Also, if we're using -ffunction-sections, then each function is always in
4561 // a different section (the same is true for COMDAT functions).
4562 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4563 GV->getSection() != Caller->getSection())
4565 if (const auto *F = dyn_cast<Function>(GV)) {
4566 if (F->getSectionPrefix() != Caller->getSectionPrefix())
4570 // If the callee might be interposed, then we can't assume the ultimate call
4571 // target will be in the same section. Even in cases where we can assume that
4572 // interposition won't happen, in any case where the linker might insert a
4573 // stub to allow for interposition, we must generate code as though
4574 // interposition might occur. To understand why this matters, consider a
4575 // situation where: a -> b -> c where the arrows indicate calls. b and c are
4576 // in the same section, but a is in a different module (i.e. has a different
4577 // TOC base pointer). If the linker allows for interposition between b and c,
4578 // then it will generate a stub for the call edge between b and c which will
4579 // save the TOC pointer into the designated stack slot allocated by b. If we
4580 // return true here, and therefore allow a tail call between b and c, that
4581 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4582 // pointer into the stack slot allocated by a (where the a -> b stub saved
4583 // a's TOC base pointer). If we're not considering a tail call, but rather,
4584 // whether a nop is needed after the call instruction in b, because the linker
4585 // will insert a stub, it might complain about a missing nop if we omit it
4586 // (although many don't complain in this case).
4587 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4594 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4595 const SmallVectorImpl<ISD::OutputArg> &Outs) {
4596 assert(Subtarget.is64BitELFABI());
4598 const unsigned PtrByteSize = 8;
4599 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4601 static const MCPhysReg GPR[] = {
4602 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4603 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4605 static const MCPhysReg VR[] = {
4606 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4607 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4610 const unsigned NumGPRs = array_lengthof(GPR);
4611 const unsigned NumFPRs = 13;
4612 const unsigned NumVRs = array_lengthof(VR);
4613 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4615 unsigned NumBytes = LinkageSize;
4616 unsigned AvailableFPRs = NumFPRs;
4617 unsigned AvailableVRs = NumVRs;
4619 for (const ISD::OutputArg& Param : Outs) {
4620 if (Param.Flags.isNest()) continue;
4622 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4623 PtrByteSize, LinkageSize, ParamAreaSize,
4624 NumBytes, AvailableFPRs, AvailableVRs,
4625 Subtarget.hasQPX()))
4632 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4633 if (CS.arg_size() != CallerFn->arg_size())
4636 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4637 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4638 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4640 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4641 const Value* CalleeArg = *CalleeArgIter;
4642 const Value* CallerArg = &(*CallerArgIter);
4643 if (CalleeArg == CallerArg)
4646 // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4647 // tail call @callee([4 x i64] undef, [4 x i64] %b)
4649 // 1st argument of callee is undef and has the same type as caller.
4650 if (CalleeArg->getType() == CallerArg->getType() &&
4651 isa<UndefValue>(CalleeArg))
4660 // Returns true if TCO is possible between the callers and callees
4661 // calling conventions.
4663 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4664 CallingConv::ID CalleeCC) {
4665 // Tail calls are possible with fastcc and ccc.
4666 auto isTailCallableCC = [] (CallingConv::ID CC){
4667 return CC == CallingConv::C || CC == CallingConv::Fast;
4669 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4672 // We can safely tail call both fastcc and ccc callees from a c calling
4673 // convention caller. If the caller is fastcc, we may have less stack space
4674 // than a non-fastcc caller with the same signature so disable tail-calls in
4676 return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4680 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4682 CallingConv::ID CalleeCC,
4683 ImmutableCallSite CS,
4685 const SmallVectorImpl<ISD::OutputArg> &Outs,
4686 const SmallVectorImpl<ISD::InputArg> &Ins,
4687 SelectionDAG& DAG) const {
4688 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4690 if (DisableSCO && !TailCallOpt) return false;
4692 // Variadic argument functions are not supported.
4693 if (isVarArg) return false;
4695 auto &Caller = DAG.getMachineFunction().getFunction();
4696 // Check that the calling conventions are compatible for tco.
4697 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4700 // Caller contains any byval parameter is not supported.
4701 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4704 // Callee contains any byval parameter is not supported, too.
4705 // Note: This is a quick work around, because in some cases, e.g.
4706 // caller's stack size > callee's stack size, we are still able to apply
4707 // sibling call optimization. For example, gcc is able to do SCO for caller1
4708 // in the following example, but not for caller2.
4713 // __attribute__((noinline)) int callee(struct test v, struct test *b) {
4717 // void caller1(struct test a, struct test c, struct test *b) {
4718 // callee(gTest, b); }
4719 // void caller2(struct test *b) { callee(gTest, b); }
4720 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4723 // If callee and caller use different calling conventions, we cannot pass
4724 // parameters on stack since offsets for the parameter area may be different.
4725 if (Caller.getCallingConv() != CalleeCC &&
4726 needStackSlotPassParameters(Subtarget, Outs))
4729 // No TCO/SCO on indirect call because Caller have to restore its TOC
4730 if (!isFunctionGlobalAddress(Callee) &&
4731 !isa<ExternalSymbolSDNode>(Callee))
4734 // If the caller and callee potentially have different TOC bases then we
4735 // cannot tail call since we need to restore the TOC pointer after the call.
4736 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4737 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4740 // TCO allows altering callee ABI, so we don't have to check further.
4741 if (CalleeCC == CallingConv::Fast && TailCallOpt)
4744 if (DisableSCO) return false;
4746 // If callee use the same argument list that caller is using, then we can
4747 // apply SCO on this case. If it is not, then we need to check if callee needs
4748 // stack for passing arguments.
4749 if (!hasSameArgumentList(&Caller, CS) &&
4750 needStackSlotPassParameters(Subtarget, Outs)) {
4757 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4758 /// for tail call optimization. Targets which want to do tail call
4759 /// optimization should implement this function.
4761 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4762 CallingConv::ID CalleeCC,
4764 const SmallVectorImpl<ISD::InputArg> &Ins,
4765 SelectionDAG& DAG) const {
4766 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4769 // Variable argument functions are not supported.
4773 MachineFunction &MF = DAG.getMachineFunction();
4774 CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4775 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4776 // Functions containing by val parameters are not supported.
4777 for (unsigned i = 0; i != Ins.size(); i++) {
4778 ISD::ArgFlagsTy Flags = Ins[i].Flags;
4779 if (Flags.isByVal()) return false;
4782 // Non-PIC/GOT tail calls are supported.
4783 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4786 // At the moment we can only do local tail calls (in same module, hidden
4787 // or protected) if we are generating PIC.
4788 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4789 return G->getGlobal()->hasHiddenVisibility()
4790 || G->getGlobal()->hasProtectedVisibility();
4796 /// isCallCompatibleAddress - Return the immediate to use if the specified
4797 /// 32-bit value is representable in the immediate field of a BxA instruction.
4798 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4799 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4800 if (!C) return nullptr;
4802 int Addr = C->getZExtValue();
4803 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
4804 SignExtend32<26>(Addr) != Addr)
4805 return nullptr; // Top 6 bits have to be sext of immediate.
4809 (int)C->getZExtValue() >> 2, SDLoc(Op),
4810 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4816 struct TailCallArgumentInfo {
4821 TailCallArgumentInfo() = default;
4824 } // end anonymous namespace
4826 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4827 static void StoreTailCallArgumentsToStackSlot(
4828 SelectionDAG &DAG, SDValue Chain,
4829 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4830 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4831 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4832 SDValue Arg = TailCallArgs[i].Arg;
4833 SDValue FIN = TailCallArgs[i].FrameIdxOp;
4834 int FI = TailCallArgs[i].FrameIdx;
4835 // Store relative to framepointer.
4836 MemOpChains.push_back(DAG.getStore(
4837 Chain, dl, Arg, FIN,
4838 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4842 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4843 /// the appropriate stack slot for the tail call optimized function call.
4844 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4845 SDValue OldRetAddr, SDValue OldFP,
4846 int SPDiff, const SDLoc &dl) {
4848 // Calculate the new stack slot for the return address.
4849 MachineFunction &MF = DAG.getMachineFunction();
4850 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4851 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4852 bool isPPC64 = Subtarget.isPPC64();
4853 int SlotSize = isPPC64 ? 8 : 4;
4854 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4855 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4856 NewRetAddrLoc, true);
4857 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4858 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4859 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4860 MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4862 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4863 // slot as the FP is never overwritten.
4864 if (Subtarget.isDarwinABI()) {
4865 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4866 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4868 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4869 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4870 MachinePointerInfo::getFixedStack(
4871 DAG.getMachineFunction(), NewFPIdx));
4877 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4878 /// the position of the argument.
4880 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4881 SDValue Arg, int SPDiff, unsigned ArgOffset,
4882 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4883 int Offset = ArgOffset + SPDiff;
4884 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4885 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4886 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4887 SDValue FIN = DAG.getFrameIndex(FI, VT);
4888 TailCallArgumentInfo Info;
4890 Info.FrameIdxOp = FIN;
4892 TailCallArguments.push_back(Info);
4895 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4896 /// stack slot. Returns the chain as result and the loaded frame pointers in
4897 /// LROpOut/FPOpout. Used when tail calling.
4898 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4899 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4900 SDValue &FPOpOut, const SDLoc &dl) const {
4902 // Load the LR and FP stack slot for later adjusting.
4903 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4904 LROpOut = getReturnAddrFrameIndex(DAG);
4905 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4906 Chain = SDValue(LROpOut.getNode(), 1);
4908 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4909 // slot as the FP is never overwritten.
4910 if (Subtarget.isDarwinABI()) {
4911 FPOpOut = getFramePointerFrameIndex(DAG);
4912 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4913 Chain = SDValue(FPOpOut.getNode(), 1);
4919 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4920 /// by "Src" to address "Dst" of size "Size". Alignment information is
4921 /// specified by the specific parameter attribute. The copy will be passed as
4922 /// a byval function parameter.
4923 /// Sometimes what we are copying is the end of a larger object, the part that
4924 /// does not fit in registers.
4925 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4926 SDValue Chain, ISD::ArgFlagsTy Flags,
4927 SelectionDAG &DAG, const SDLoc &dl) {
4928 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4929 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4930 false, false, false, MachinePointerInfo(),
4931 MachinePointerInfo());
4934 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4936 static void LowerMemOpCallTo(
4937 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4938 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4939 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4940 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4941 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4946 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4948 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4949 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4950 DAG.getConstant(ArgOffset, dl, PtrVT));
4952 MemOpChains.push_back(
4953 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4954 // Calculate and remember argument location.
4955 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4960 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4961 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4963 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4964 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4965 // might overwrite each other in case of tail call optimization.
4966 SmallVector<SDValue, 8> MemOpChains2;
4967 // Do not flag preceding copytoreg stuff together with the following stuff.
4969 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4971 if (!MemOpChains2.empty())
4972 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4974 // Store the return address to the appropriate stack slot.
4975 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4977 // Emit callseq_end just before tailcall node.
4978 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4979 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4980 InFlag = Chain.getValue(1);
4983 // Is this global address that of a function that can be called by name? (as
4984 // opposed to something that must hold a descriptor for an indirect call).
4985 static bool isFunctionGlobalAddress(SDValue Callee) {
4986 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4987 if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4988 Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4991 return G->getGlobal()->getValueType()->isFunctionTy();
4997 SDValue PPCTargetLowering::LowerCallResult(
4998 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4999 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5000 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5001 SmallVector<CCValAssign, 16> RVLocs;
5002 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5005 CCRetInfo.AnalyzeCallResult(
5006 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5010 // Copy all of the result registers out of their specified physreg.
5011 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5012 CCValAssign &VA = RVLocs[i];
5013 assert(VA.isRegLoc() && "Can only return in registers!");
5017 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5018 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5020 Chain = Lo.getValue(1);
5021 InFlag = Lo.getValue(2);
5022 VA = RVLocs[++i]; // skip ahead to next loc
5023 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5025 Chain = Hi.getValue(1);
5026 InFlag = Hi.getValue(2);
5027 if (!Subtarget.isLittleEndian())
5029 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5031 Val = DAG.getCopyFromReg(Chain, dl,
5032 VA.getLocReg(), VA.getLocVT(), InFlag);
5033 Chain = Val.getValue(1);
5034 InFlag = Val.getValue(2);
5037 switch (VA.getLocInfo()) {
5038 default: llvm_unreachable("Unknown loc info!");
5039 case CCValAssign::Full: break;
5040 case CCValAssign::AExt:
5041 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5043 case CCValAssign::ZExt:
5044 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5045 DAG.getValueType(VA.getValVT()));
5046 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5048 case CCValAssign::SExt:
5049 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5050 DAG.getValueType(VA.getValVT()));
5051 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5055 InVals.push_back(Val);
5061 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5062 const PPCSubtarget &Subtarget, bool isPatchPoint) {
5063 // PatchPoint calls are not indirect.
5067 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5070 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5071 // becuase the immediate function pointer points to a descriptor instead of
5072 // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5073 // pointer immediate points to the global entry point, while the BLA would
5074 // need to jump to the local entry point (see rL211174).
5075 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5076 isBLACompatibleAddress(Callee, DAG))
5082 static unsigned getCallOpcode(bool isIndirectCall, bool isPatchPoint,
5083 bool isTailCall, const Function &Caller,
5084 const SDValue &Callee,
5085 const PPCSubtarget &Subtarget,
5086 const TargetMachine &TM) {
5088 return PPCISD::TC_RETURN;
5090 // This is a call through a function pointer.
5091 if (isIndirectCall) {
5092 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5093 // indirect calls. The save of the caller's TOC pointer to the stack will be
5094 // inserted into the DAG as part of call lowering. The restore of the TOC
5095 // pointer is modeled by using a pseudo instruction for the call opcode that
5096 // represents the 2 instruction sequence of an indirect branch and link,
5097 // immediately followed by a load of the TOC pointer from the the stack save
5099 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5100 return PPCISD::BCTRL_LOAD_TOC;
5102 // An indirect call that does not need a TOC restore.
5103 return PPCISD::BCTRL;
5106 // The ABIs that maintain a TOC pointer accross calls need to have a nop
5107 // immediately following the call instruction if the caller and callee may
5108 // have different TOC bases. At link time if the linker determines the calls
5109 // may not share a TOC base, the call is redirected to a trampoline inserted
5110 // by the linker. The trampoline will (among other things) save the callers
5111 // TOC pointer at an ABI designated offset in the linkage area and the linker
5112 // will rewrite the nop to be a load of the TOC pointer from the linkage area
5114 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5115 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5118 return PPCISD::CALL;
5121 static bool isValidAIXExternalSymSDNode(StringRef SymName) {
5122 return StringSwitch<bool>(SymName)
5123 .Cases("__divdi3", "__fixunsdfdi", "__floatundidf", "__floatundisf",
5124 "__moddi3", "__udivdi3", "__umoddi3", true)
5125 .Cases("ceil", "floor", "memcpy", "memmove", "memset", "round", true)
5129 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5130 const SDLoc &dl, const PPCSubtarget &Subtarget) {
5131 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5132 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5133 return SDValue(Dest, 0);
5135 // Returns true if the callee is local, and false otherwise.
5136 auto isLocalCallee = [&]() {
5137 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5138 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5139 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5141 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5142 !dyn_cast_or_null<GlobalIFunc>(GV);
5145 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in
5146 // a static relocation model causes some versions of GNU LD (2.17.50, at
5147 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5148 // built with secure-PLT.
5150 Subtarget.is32BitELFABI() && !isLocalCallee() &&
5151 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5153 // On AIX, direct function calls reference the symbol for the function's
5154 // entry point, which is named by prepending a "." before the function's
5156 const auto getAIXFuncEntryPointSymbolSDNode =
5157 [&](StringRef FuncName, bool IsDeclaration,
5158 const XCOFF::StorageClass &SC) {
5159 auto &Context = DAG.getMachineFunction().getMMI().getContext();
5161 MCSymbolXCOFF *S = cast<MCSymbolXCOFF>(
5162 Context.getOrCreateSymbol(Twine(".") + Twine(FuncName)));
5164 if (IsDeclaration && !S->hasContainingCsect()) {
5165 // On AIX, an undefined symbol needs to be associated with a
5166 // MCSectionXCOFF to get the correct storage mapping class.
5167 // In this case, XCOFF::XMC_PR.
5168 MCSectionXCOFF *Sec = Context.getXCOFFSection(
5169 S->getName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5170 SectionKind::getMetadata());
5171 S->setContainingCsect(Sec);
5175 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5176 return DAG.getMCSymbol(S, PtrVT);
5179 if (isFunctionGlobalAddress(Callee)) {
5180 const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5181 const GlobalValue *GV = G->getGlobal();
5183 if (!Subtarget.isAIXABI())
5184 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5185 UsePlt ? PPCII::MO_PLT : 0);
5187 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5188 const GlobalObject *GO = cast<GlobalObject>(GV);
5189 const XCOFF::StorageClass SC =
5190 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO);
5191 return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(),
5195 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5196 const char *SymName = S->getSymbol();
5197 if (!Subtarget.isAIXABI())
5198 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5199 UsePlt ? PPCII::MO_PLT : 0);
5201 // If there exists a user-declared function whose name is the same as the
5202 // ExternalSymbol's, then we pick up the user-declared version.
5203 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5204 if (const Function *F =
5205 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5206 const XCOFF::StorageClass SC =
5207 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5208 return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(),
5212 // TODO: Remove this when the support for ExternalSymbolSDNode is complete.
5213 if (isValidAIXExternalSymSDNode(SymName)) {
5214 return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT);
5217 report_fatal_error("Unexpected ExternalSymbolSDNode: " + Twine(SymName));
5220 // No transformation needed.
5221 assert(Callee.getNode() && "What no callee?");
5225 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5226 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5227 "Expected a CALLSEQ_STARTSDNode.");
5229 // The last operand is the chain, except when the node has glue. If the node
5230 // has glue, then the last operand is the glue, and the chain is the second
5232 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5233 if (LastValue.getValueType() != MVT::Glue)
5236 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5239 // Creates the node that moves a functions address into the count register
5240 // to prepare for an indirect call instruction.
5241 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5242 SDValue &Glue, SDValue &Chain,
5244 SDValue MTCTROps[] = {Chain, Callee, Glue};
5245 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5246 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5247 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5248 // The glue is the second value produced.
5249 Glue = Chain.getValue(1);
5252 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5253 SDValue &Glue, SDValue &Chain,
5254 SDValue CallSeqStart,
5255 ImmutableCallSite CS, const SDLoc &dl,
5257 const PPCSubtarget &Subtarget) {
5258 // Function pointers in the 64-bit SVR4 ABI do not point to the function
5259 // entry point, but to the function descriptor (the function entry point
5260 // address is part of the function descriptor though).
5261 // The function descriptor is a three doubleword structure with the
5262 // following fields: function entry point, TOC base address and
5263 // environment pointer.
5264 // Thus for a call through a function pointer, the following actions need
5266 // 1. Save the TOC of the caller in the TOC save area of its stack
5267 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5268 // 2. Load the address of the function entry point from the function
5270 // 3. Load the TOC of the callee from the function descriptor into r2.
5271 // 4. Load the environment pointer from the function descriptor into
5273 // 5. Branch to the function entry point address.
5274 // 6. On return of the callee, the TOC of the caller needs to be
5275 // restored (this is done in FinishCall()).
5277 // The loads are scheduled at the beginning of the call sequence, and the
5278 // register copies are flagged together to ensure that no other
5279 // operations can be scheduled in between. E.g. without flagging the
5280 // copies together, a TOC access in the caller could be scheduled between
5281 // the assignment of the callee TOC and the branch to the callee, which leads
5282 // to incorrect code.
5284 // Start by loading the function address from the descriptor.
5285 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5286 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5287 ? (MachineMemOperand::MODereferenceable |
5288 MachineMemOperand::MOInvariant)
5289 : MachineMemOperand::MONone;
5291 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
5293 // Registers used in building the DAG.
5294 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5295 const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5297 // Offsets of descriptor members.
5298 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5299 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5301 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5302 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5304 // One load for the functions entry point address.
5305 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5306 Alignment, MMOFlags);
5308 // One for loading the TOC anchor for the module that contains the called
5310 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5311 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5313 DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5314 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5316 // One for loading the environment pointer.
5317 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5318 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5319 SDValue LoadEnvPtr =
5320 DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5321 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5324 // Then copy the newly loaded TOC anchor to the TOC pointer.
5325 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5326 Chain = TOCVal.getValue(0);
5327 Glue = TOCVal.getValue(1);
5329 // If the function call has an explicit 'nest' parameter, it takes the
5330 // place of the environment pointer.
5331 assert((!hasNest || !Subtarget.isAIXABI()) &&
5332 "Nest parameter is not supported on AIX.");
5334 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5335 Chain = EnvVal.getValue(0);
5336 Glue = EnvVal.getValue(1);
5339 // The rest of the indirect call sequence is the same as the non-descriptor
5341 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5345 buildCallOperands(SmallVectorImpl<SDValue> &Ops, CallingConv::ID CallConv,
5346 const SDLoc &dl, bool isTailCall, bool isVarArg,
5347 bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5348 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5349 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5350 const PPCSubtarget &Subtarget, bool isIndirect) {
5351 const bool IsPPC64 = Subtarget.isPPC64();
5352 // MVT for a general purpose register.
5353 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5355 // First operand is always the chain.
5356 Ops.push_back(Chain);
5358 // If it's a direct call pass the callee as the second operand.
5360 Ops.push_back(Callee);
5362 assert(!isPatchPoint && "Patch point call are not indirect.");
5364 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5365 // on the stack (this would have been done in `LowerCall_64SVR4` or
5366 // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5367 // represents both the indirect branch and a load that restores the TOC
5368 // pointer from the linkage area. The operand for the TOC restore is an add
5369 // of the TOC save offset to the stack pointer. This must be the second
5370 // operand: after the chain input but before any other variadic arguments.
5371 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
5372 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5374 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5375 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5376 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5377 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5378 Ops.push_back(AddTOC);
5381 // Add the register used for the environment pointer.
5382 if (Subtarget.usesFunctionDescriptors() && !hasNest)
5383 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5387 // Add CTR register as callee so a bctr can be emitted later.
5389 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5392 // If this is a tail call add stack pointer delta.
5394 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5396 // Add argument registers to the end of the list so that they are known live
5398 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5399 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5400 RegsToPass[i].second.getValueType()));
5402 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5403 // no way to mark dependencies as implicit here.
5404 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5405 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && !isPatchPoint)
5406 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5408 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5409 if (isVarArg && Subtarget.is32BitELFABI())
5410 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5412 // Add a register mask operand representing the call-preserved registers.
5413 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5414 const uint32_t *Mask =
5415 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
5416 assert(Mask && "Missing call preserved mask for calling convention");
5417 Ops.push_back(DAG.getRegisterMask(Mask));
5419 // If the glue is valid, it is the last operand.
5421 Ops.push_back(Glue);
5424 SDValue PPCTargetLowering::FinishCall(
5425 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
5426 bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5427 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5428 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5429 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5430 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
5432 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI())
5433 setUsesTOCBasePtr(DAG);
5435 const bool isIndirect = isIndirectCall(Callee, DAG, Subtarget, isPatchPoint);
5436 unsigned CallOpc = getCallOpcode(isIndirect, isPatchPoint, isTailCall,
5437 DAG.getMachineFunction().getFunction(),
5438 Callee, Subtarget, DAG.getTarget());
5441 Callee = transformCallee(Callee, DAG, dl, Subtarget);
5442 else if (Subtarget.usesFunctionDescriptors())
5443 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS,
5444 dl, hasNest, Subtarget);
5446 prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5448 // Build the operand list for the call instruction.
5449 SmallVector<SDValue, 8> Ops;
5450 buildCallOperands(Ops, CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5451 hasNest, DAG, RegsToPass, Glue, Chain, Callee, SPDiff,
5452 Subtarget, isIndirect);
5456 assert(((Callee.getOpcode() == ISD::Register &&
5457 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5458 Callee.getOpcode() == ISD::TargetExternalSymbol ||
5459 Callee.getOpcode() == ISD::TargetGlobalAddress ||
5460 isa<ConstantSDNode>(Callee)) &&
5461 "Expecting a global address, external symbol, absolute value or "
5463 assert(CallOpc == PPCISD::TC_RETURN &&
5464 "Unexpected call opcode for a tail call.");
5465 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5466 return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5469 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5470 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5471 Glue = Chain.getValue(1);
5473 // When performing tail call optimization the callee pops its arguments off
5474 // the stack. Account for this here so these bytes can be pushed back on in
5475 // PPCFrameLowering::eliminateCallFramePseudoInstr.
5476 int BytesCalleePops = (CallConv == CallingConv::Fast &&
5477 getTargetMachine().Options.GuaranteedTailCallOpt)
5481 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5482 DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5484 Glue = Chain.getValue(1);
5486 return LowerCallResult(Chain, Glue, CallConv, isVarArg, Ins, dl, DAG, InVals);
5490 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5491 SmallVectorImpl<SDValue> &InVals) const {
5492 SelectionDAG &DAG = CLI.DAG;
5494 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5495 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5496 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
5497 SDValue Chain = CLI.Chain;
5498 SDValue Callee = CLI.Callee;
5499 bool &isTailCall = CLI.IsTailCall;
5500 CallingConv::ID CallConv = CLI.CallConv;
5501 bool isVarArg = CLI.IsVarArg;
5502 bool isPatchPoint = CLI.IsPatchPoint;
5503 ImmutableCallSite CS = CLI.CS;
5506 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5508 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5510 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5511 isVarArg, Outs, Ins, DAG);
5513 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5517 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5520 assert(isa<GlobalAddressSDNode>(Callee) &&
5521 "Callee should be an llvm::Function object.");
5523 const GlobalValue *GV =
5524 cast<GlobalAddressSDNode>(Callee)->getGlobal();
5525 const unsigned Width =
5526 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
5527 dbgs() << "TCO caller: "
5528 << left_justify(DAG.getMachineFunction().getName(), Width)
5529 << ", callee linkage: " << GV->getVisibility() << ", "
5530 << GV->getLinkage() << "\n");
5534 if (!isTailCall && CS && CS.isMustTailCall())
5535 report_fatal_error("failed to perform tail call elimination on a call "
5536 "site marked musttail");
5538 // When long calls (i.e. indirect calls) are always used, calls are always
5539 // made via function pointer. If we have a function name, first translate it
5541 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5543 Callee = LowerGlobalAddress(Callee, DAG);
5545 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5546 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5547 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5548 dl, DAG, InVals, CS);
5550 if (Subtarget.isSVR4ABI())
5551 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5552 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5553 dl, DAG, InVals, CS);
5555 if (Subtarget.isAIXABI())
5556 return LowerCall_AIX(Chain, Callee, CallConv, isVarArg,
5557 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5558 dl, DAG, InVals, CS);
5560 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5561 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5562 dl, DAG, InVals, CS);
5565 SDValue PPCTargetLowering::LowerCall_32SVR4(
5566 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5567 bool isTailCall, bool isPatchPoint,
5568 const SmallVectorImpl<ISD::OutputArg> &Outs,
5569 const SmallVectorImpl<SDValue> &OutVals,
5570 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5571 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5572 ImmutableCallSite CS) const {
5573 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5574 // of the 32-bit SVR4 ABI stack frame layout.
5576 assert((CallConv == CallingConv::C ||
5577 CallConv == CallingConv::Cold ||
5578 CallConv == CallingConv::Fast) && "Unknown calling convention!");
5580 unsigned PtrByteSize = 4;
5582 MachineFunction &MF = DAG.getMachineFunction();
5584 // Mark this function as potentially containing a function that contains a
5585 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5586 // and restoring the callers stack pointer in this functions epilog. This is
5587 // done because by tail calling the called function might overwrite the value
5588 // in this function's (MF) stack pointer stack slot 0(SP).
5589 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5590 CallConv == CallingConv::Fast)
5591 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5593 // Count how many bytes are to be pushed on the stack, including the linkage
5594 // area, parameter list area and the part of the local variable space which
5595 // contains copies of aggregates which are passed by value.
5597 // Assign locations to all of the outgoing arguments.
5598 SmallVector<CCValAssign, 16> ArgLocs;
5599 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5601 // Reserve space for the linkage area on the stack.
5602 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5605 CCInfo.PreAnalyzeCallOperands(Outs);
5608 // Handle fixed and variable vector arguments differently.
5609 // Fixed vector arguments go into registers as long as registers are
5610 // available. Variable vector arguments always go into memory.
5611 unsigned NumArgs = Outs.size();
5613 for (unsigned i = 0; i != NumArgs; ++i) {
5614 MVT ArgVT = Outs[i].VT;
5615 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5618 if (Outs[i].IsFixed) {
5619 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5622 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5628 errs() << "Call operand #" << i << " has unhandled type "
5629 << EVT(ArgVT).getEVTString() << "\n";
5631 llvm_unreachable(nullptr);
5635 // All arguments are treated the same.
5636 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5638 CCInfo.clearWasPPCF128();
5640 // Assign locations to all of the outgoing aggregate by value arguments.
5641 SmallVector<CCValAssign, 16> ByValArgLocs;
5642 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
5644 // Reserve stack space for the allocations in CCInfo.
5645 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5647 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5649 // Size of the linkage area, parameter list area and the part of the local
5650 // space variable where copies of aggregates which are passed by value are
5652 unsigned NumBytes = CCByValInfo.getNextStackOffset();
5654 // Calculate by how many bytes the stack has to be adjusted in case of tail
5655 // call optimization.
5656 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5658 // Adjust the stack pointer for the new arguments...
5659 // These operations are automatically eliminated by the prolog/epilog pass
5660 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5661 SDValue CallSeqStart = Chain;
5663 // Load the return address and frame pointer so it can be moved somewhere else
5666 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5668 // Set up a copy of the stack pointer for use loading and storing any
5669 // arguments that may not fit in the registers available for argument
5671 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5673 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5674 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5675 SmallVector<SDValue, 8> MemOpChains;
5677 bool seenFloatArg = false;
5678 // Walk the register/memloc assignments, inserting copies/loads.
5679 // i - Tracks the index into the list of registers allocated for the call
5680 // RealArgIdx - Tracks the index into the list of actual function arguments
5681 // j - Tracks the index into the list of byval arguments
5682 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5684 ++i, ++RealArgIdx) {
5685 CCValAssign &VA = ArgLocs[i];
5686 SDValue Arg = OutVals[RealArgIdx];
5687 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5689 if (Flags.isByVal()) {
5690 // Argument is an aggregate which is passed by value, thus we need to
5691 // create a copy of it in the local variable space of the current stack
5692 // frame (which is the stack frame of the caller) and pass the address of
5693 // this copy to the callee.
5694 assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5695 CCValAssign &ByValVA = ByValArgLocs[j++];
5696 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5698 // Memory reserved in the local variable space of the callers stack frame.
5699 unsigned LocMemOffset = ByValVA.getLocMemOffset();
5701 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5702 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5705 // Create a copy of the argument in the local area of the current
5707 SDValue MemcpyCall =
5708 CreateCopyOfByValArgument(Arg, PtrOff,
5709 CallSeqStart.getNode()->getOperand(0),
5712 // This must go outside the CALLSEQ_START..END.
5713 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5715 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5716 NewCallSeqStart.getNode());
5717 Chain = CallSeqStart = NewCallSeqStart;
5719 // Pass the address of the aggregate copy on the stack either in a
5720 // physical register or in the parameter list area of the current stack
5721 // frame to the callee.
5725 // When useCRBits() is true, there can be i1 arguments.
5726 // It is because getRegisterType(MVT::i1) => MVT::i1,
5727 // and for other integer types getRegisterType() => MVT::i32.
5728 // Extend i1 and ensure callee will get i32.
5729 if (Arg.getValueType() == MVT::i1)
5730 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5733 if (VA.isRegLoc()) {
5734 seenFloatArg |= VA.getLocVT().isFloatingPoint();
5735 // Put argument in a physical register.
5736 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5737 bool IsLE = Subtarget.isLittleEndian();
5738 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5739 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5740 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5741 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5742 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5743 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5746 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5748 // Put argument in the parameter list area of the current stack frame.
5749 assert(VA.isMemLoc());
5750 unsigned LocMemOffset = VA.getLocMemOffset();
5753 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5754 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5757 MemOpChains.push_back(
5758 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5760 // Calculate and remember argument location.
5761 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5767 if (!MemOpChains.empty())
5768 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5770 // Build a sequence of copy-to-reg nodes chained together with token chain
5771 // and flag operands which copy the outgoing args into the appropriate regs.
5773 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5774 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5775 RegsToPass[i].second, InFlag);
5776 InFlag = Chain.getValue(1);
5779 // Set CR bit 6 to true if this is a vararg call with floating args passed in
5782 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5783 SDValue Ops[] = { Chain, InFlag };
5785 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5786 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5788 InFlag = Chain.getValue(1);
5792 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5795 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5796 /* unused except on PPC64 ELFv1 */ false, DAG,
5797 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5798 NumBytes, Ins, InVals, CS);
5801 // Copy an argument into memory, being careful to do this outside the
5802 // call sequence for the call to which the argument belongs.
5803 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5804 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5805 SelectionDAG &DAG, const SDLoc &dl) const {
5806 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5807 CallSeqStart.getNode()->getOperand(0),
5809 // The MEMCPY must go outside the CALLSEQ_START..END.
5810 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5811 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5813 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5814 NewCallSeqStart.getNode());
5815 return NewCallSeqStart;
5818 SDValue PPCTargetLowering::LowerCall_64SVR4(
5819 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5820 bool isTailCall, bool isPatchPoint,
5821 const SmallVectorImpl<ISD::OutputArg> &Outs,
5822 const SmallVectorImpl<SDValue> &OutVals,
5823 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5824 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5825 ImmutableCallSite CS) const {
5826 bool isELFv2ABI = Subtarget.isELFv2ABI();
5827 bool isLittleEndian = Subtarget.isLittleEndian();
5828 unsigned NumOps = Outs.size();
5829 bool hasNest = false;
5830 bool IsSibCall = false;
5832 EVT PtrVT = getPointerTy(DAG.getDataLayout());
5833 unsigned PtrByteSize = 8;
5835 MachineFunction &MF = DAG.getMachineFunction();
5837 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5840 // Mark this function as potentially containing a function that contains a
5841 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5842 // and restoring the callers stack pointer in this functions epilog. This is
5843 // done because by tail calling the called function might overwrite the value
5844 // in this function's (MF) stack pointer stack slot 0(SP).
5845 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5846 CallConv == CallingConv::Fast)
5847 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5849 assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5850 "fastcc not supported on varargs functions");
5852 // Count how many bytes are to be pushed on the stack, including the linkage
5853 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
5854 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5855 // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5856 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5857 unsigned NumBytes = LinkageSize;
5858 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5859 unsigned &QFPR_idx = FPR_idx;
5861 static const MCPhysReg GPR[] = {
5862 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5863 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5865 static const MCPhysReg VR[] = {
5866 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5867 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5870 const unsigned NumGPRs = array_lengthof(GPR);
5871 const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5872 const unsigned NumVRs = array_lengthof(VR);
5873 const unsigned NumQFPRs = NumFPRs;
5875 // On ELFv2, we can avoid allocating the parameter area if all the arguments
5876 // can be passed to the callee in registers.
5877 // For the fast calling convention, there is another check below.
5878 // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5879 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast;
5880 if (!HasParameterArea) {
5881 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5882 unsigned AvailableFPRs = NumFPRs;
5883 unsigned AvailableVRs = NumVRs;
5884 unsigned NumBytesTmp = NumBytes;
5885 for (unsigned i = 0; i != NumOps; ++i) {
5886 if (Outs[i].Flags.isNest()) continue;
5887 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5888 PtrByteSize, LinkageSize, ParamAreaSize,
5889 NumBytesTmp, AvailableFPRs, AvailableVRs,
5890 Subtarget.hasQPX()))
5891 HasParameterArea = true;
5895 // When using the fast calling convention, we don't provide backing for
5896 // arguments that will be in registers.
5897 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5899 // Avoid allocating parameter area for fastcc functions if all the arguments
5900 // can be passed in the registers.
5901 if (CallConv == CallingConv::Fast)
5902 HasParameterArea = false;
5904 // Add up all the space actually used.
5905 for (unsigned i = 0; i != NumOps; ++i) {
5906 ISD::ArgFlagsTy Flags = Outs[i].Flags;
5907 EVT ArgVT = Outs[i].VT;
5908 EVT OrigVT = Outs[i].ArgVT;
5913 if (CallConv == CallingConv::Fast) {
5914 if (Flags.isByVal()) {
5915 NumGPRsUsed += (Flags.getByValSize()+7)/8;
5916 if (NumGPRsUsed > NumGPRs)
5917 HasParameterArea = true;
5919 switch (ArgVT.getSimpleVT().SimpleTy) {
5920 default: llvm_unreachable("Unexpected ValueType for argument!");
5924 if (++NumGPRsUsed <= NumGPRs)
5934 if (++NumVRsUsed <= NumVRs)
5938 // When using QPX, this is handled like a FP register, otherwise, it
5939 // is an Altivec register.
5940 if (Subtarget.hasQPX()) {
5941 if (++NumFPRsUsed <= NumFPRs)
5944 if (++NumVRsUsed <= NumVRs)
5950 case MVT::v4f64: // QPX
5951 case MVT::v4i1: // QPX
5952 if (++NumFPRsUsed <= NumFPRs)
5956 HasParameterArea = true;
5960 /* Respect alignment of argument on the stack. */
5962 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5963 NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5965 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5966 if (Flags.isInConsecutiveRegsLast())
5967 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5970 unsigned NumBytesActuallyUsed = NumBytes;
5972 // In the old ELFv1 ABI,
5973 // the prolog code of the callee may store up to 8 GPR argument registers to
5974 // the stack, allowing va_start to index over them in memory if its varargs.
5975 // Because we cannot tell if this is needed on the caller side, we have to
5976 // conservatively assume that it is needed. As such, make sure we have at
5977 // least enough stack space for the caller to store the 8 GPRs.
5978 // In the ELFv2 ABI, we allocate the parameter area iff a callee
5979 // really requires memory operands, e.g. a vararg function.
5980 if (HasParameterArea)
5981 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5983 NumBytes = LinkageSize;
5985 // Tail call needs the stack to be aligned.
5986 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5987 CallConv == CallingConv::Fast)
5988 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5992 // Calculate by how many bytes the stack has to be adjusted in case of tail
5993 // call optimization.
5995 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5997 // To protect arguments on the stack from being clobbered in a tail call,
5998 // force all the loads to happen before doing any other lowering.
6000 Chain = DAG.getStackArgumentTokenFactor(Chain);
6002 // Adjust the stack pointer for the new arguments...
6003 // These operations are automatically eliminated by the prolog/epilog pass
6005 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6006 SDValue CallSeqStart = Chain;
6008 // Load the return address and frame pointer so it can be move somewhere else
6011 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6013 // Set up a copy of the stack pointer for use loading and storing any
6014 // arguments that may not fit in the registers available for argument
6016 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6018 // Figure out which arguments are going to go in registers, and which in
6019 // memory. Also, if this is a vararg function, floating point operations
6020 // must be stored to our stack, and loaded into integer regs as well, if
6021 // any integer regs are available for argument passing.
6022 unsigned ArgOffset = LinkageSize;
6024 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6025 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6027 SmallVector<SDValue, 8> MemOpChains;
6028 for (unsigned i = 0; i != NumOps; ++i) {
6029 SDValue Arg = OutVals[i];
6030 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6031 EVT ArgVT = Outs[i].VT;
6032 EVT OrigVT = Outs[i].ArgVT;
6034 // PtrOff will be used to store the current argument to the stack if a
6035 // register cannot be found for it.
6038 // We re-align the argument offset for each argument, except when using the
6039 // fast calling convention, when we need to make sure we do that only when
6040 // we'll actually use a stack slot.
6041 auto ComputePtrOff = [&]() {
6042 /* Respect alignment of argument on the stack. */
6044 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6045 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
6047 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6049 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6052 if (CallConv != CallingConv::Fast) {
6055 /* Compute GPR index associated with argument offset. */
6056 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6057 GPR_idx = std::min(GPR_idx, NumGPRs);
6060 // Promote integers to 64-bit values.
6061 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6062 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6063 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6064 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6067 // FIXME memcpy is used way more than necessary. Correctness first.
6068 // Note: "by value" is code for passing a structure by value, not
6070 if (Flags.isByVal()) {
6071 // Note: Size includes alignment padding, so
6072 // struct x { short a; char b; }
6073 // will have Size = 4. With #pragma pack(1), it will have Size = 3.
6074 // These are the proper values we need for right-justifying the
6075 // aggregate in a parameter register.
6076 unsigned Size = Flags.getByValSize();
6078 // An empty aggregate parameter takes up no storage and no
6083 if (CallConv == CallingConv::Fast)
6086 // All aggregates smaller than 8 bytes must be passed right-justified.
6087 if (Size==1 || Size==2 || Size==4) {
6088 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6089 if (GPR_idx != NumGPRs) {
6090 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6091 MachinePointerInfo(), VT);
6092 MemOpChains.push_back(Load.getValue(1));
6093 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6095 ArgOffset += PtrByteSize;
6100 if (GPR_idx == NumGPRs && Size < 8) {
6101 SDValue AddPtr = PtrOff;
6102 if (!isLittleEndian) {
6103 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6104 PtrOff.getValueType());
6105 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6107 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6110 ArgOffset += PtrByteSize;
6113 // Copy entire object into memory. There are cases where gcc-generated
6114 // code assumes it is there, even if it could be put entirely into
6115 // registers. (This is not what the doc says.)
6117 // FIXME: The above statement is likely due to a misunderstanding of the
6118 // documents. All arguments must be copied into the parameter area BY
6119 // THE CALLEE in the event that the callee takes the address of any
6120 // formal argument. That has not yet been implemented. However, it is
6121 // reasonable to use the stack area as a staging area for the register
6124 // Skip this for small aggregates, as we will use the same slot for a
6125 // right-justified copy, below.
6127 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6131 // When a register is available, pass a small aggregate right-justified.
6132 if (Size < 8 && GPR_idx != NumGPRs) {
6133 // The easiest way to get this right-justified in a register
6134 // is to copy the structure into the rightmost portion of a
6135 // local variable slot, then load the whole slot into the
6137 // FIXME: The memcpy seems to produce pretty awful code for
6138 // small aggregates, particularly for packed ones.
6139 // FIXME: It would be preferable to use the slot in the
6140 // parameter save area instead of a new local variable.
6141 SDValue AddPtr = PtrOff;
6142 if (!isLittleEndian) {
6143 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6144 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6146 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6150 // Load the slot into the register.
6152 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6153 MemOpChains.push_back(Load.getValue(1));
6154 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6156 // Done with this argument.
6157 ArgOffset += PtrByteSize;
6161 // For aggregates larger than PtrByteSize, copy the pieces of the
6162 // object that fit into registers from the parameter save area.
6163 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6164 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6165 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6166 if (GPR_idx != NumGPRs) {
6168 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6169 MemOpChains.push_back(Load.getValue(1));
6170 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6171 ArgOffset += PtrByteSize;
6173 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6180 switch (Arg.getSimpleValueType().SimpleTy) {
6181 default: llvm_unreachable("Unexpected ValueType for argument!");
6185 if (Flags.isNest()) {
6186 // The 'nest' parameter, if any, is passed in R11.
6187 RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6192 // These can be scalar arguments or elements of an integer array type
6193 // passed directly. Clang may use those instead of "byval" aggregate
6194 // types to avoid forcing arguments to memory unnecessarily.
6195 if (GPR_idx != NumGPRs) {
6196 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6198 if (CallConv == CallingConv::Fast)
6201 assert(HasParameterArea &&
6202 "Parameter area must exist to pass an argument in memory.");
6203 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6204 true, isTailCall, false, MemOpChains,
6205 TailCallArguments, dl);
6206 if (CallConv == CallingConv::Fast)
6207 ArgOffset += PtrByteSize;
6209 if (CallConv != CallingConv::Fast)
6210 ArgOffset += PtrByteSize;
6214 // These can be scalar arguments or elements of a float array type
6215 // passed directly. The latter are used to implement ELFv2 homogenous
6216 // float aggregates.
6218 // Named arguments go into FPRs first, and once they overflow, the
6219 // remaining arguments go into GPRs and then the parameter save area.
6220 // Unnamed arguments for vararg functions always go to GPRs and
6221 // then the parameter save area. For now, put all arguments to vararg
6222 // routines always in both locations (FPR *and* GPR or stack slot).
6223 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
6224 bool NeededLoad = false;
6226 // First load the argument into the next available FPR.
6227 if (FPR_idx != NumFPRs)
6228 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6230 // Next, load the argument into GPR or stack slot if needed.
6231 if (!NeedGPROrStack)
6233 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
6234 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6235 // once we support fp <-> gpr moves.
6237 // In the non-vararg case, this can only ever happen in the
6238 // presence of f32 array types, since otherwise we never run
6239 // out of FPRs before running out of GPRs.
6242 // Double values are always passed in a single GPR.
6243 if (Arg.getValueType() != MVT::f32) {
6244 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6246 // Non-array float values are extended and passed in a GPR.
6247 } else if (!Flags.isInConsecutiveRegs()) {
6248 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6249 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6251 // If we have an array of floats, we collect every odd element
6252 // together with its predecessor into one GPR.
6253 } else if (ArgOffset % PtrByteSize != 0) {
6255 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6256 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6257 if (!isLittleEndian)
6259 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6261 // The final element, if even, goes into the first half of a GPR.
6262 } else if (Flags.isInConsecutiveRegsLast()) {
6263 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6264 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6265 if (!isLittleEndian)
6266 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6267 DAG.getConstant(32, dl, MVT::i32));
6269 // Non-final even elements are skipped; they will be handled
6270 // together the with subsequent argument on the next go-around.
6274 if (ArgVal.getNode())
6275 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6277 if (CallConv == CallingConv::Fast)
6280 // Single-precision floating-point values are mapped to the
6281 // second (rightmost) word of the stack doubleword.
6282 if (Arg.getValueType() == MVT::f32 &&
6283 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6284 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6285 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6288 assert(HasParameterArea &&
6289 "Parameter area must exist to pass an argument in memory.");
6290 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6291 true, isTailCall, false, MemOpChains,
6292 TailCallArguments, dl);
6296 // When passing an array of floats, the array occupies consecutive
6297 // space in the argument area; only round up to the next doubleword
6298 // at the end of the array. Otherwise, each float takes 8 bytes.
6299 if (CallConv != CallingConv::Fast || NeededLoad) {
6300 ArgOffset += (Arg.getValueType() == MVT::f32 &&
6301 Flags.isInConsecutiveRegs()) ? 4 : 8;
6302 if (Flags.isInConsecutiveRegsLast())
6303 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6315 if (!Subtarget.hasQPX()) {
6316 // These can be scalar arguments or elements of a vector array type
6317 // passed directly. The latter are used to implement ELFv2 homogenous
6318 // vector aggregates.
6320 // For a varargs call, named arguments go into VRs or on the stack as
6321 // usual; unnamed arguments always go to the stack or the corresponding
6322 // GPRs when within range. For now, we always put the value in both
6323 // locations (or even all three).
6325 assert(HasParameterArea &&
6326 "Parameter area must exist if we have a varargs call.");
6327 // We could elide this store in the case where the object fits
6328 // entirely in R registers. Maybe later.
6330 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6331 MemOpChains.push_back(Store);
6332 if (VR_idx != NumVRs) {
6334 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6335 MemOpChains.push_back(Load.getValue(1));
6336 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6339 for (unsigned i=0; i<16; i+=PtrByteSize) {
6340 if (GPR_idx == NumGPRs)
6342 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6343 DAG.getConstant(i, dl, PtrVT));
6345 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6346 MemOpChains.push_back(Load.getValue(1));
6347 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6352 // Non-varargs Altivec params go into VRs or on the stack.
6353 if (VR_idx != NumVRs) {
6354 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6356 if (CallConv == CallingConv::Fast)
6359 assert(HasParameterArea &&
6360 "Parameter area must exist to pass an argument in memory.");
6361 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6362 true, isTailCall, true, MemOpChains,
6363 TailCallArguments, dl);
6364 if (CallConv == CallingConv::Fast)
6368 if (CallConv != CallingConv::Fast)
6373 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6374 "Invalid QPX parameter type");
6379 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6381 assert(HasParameterArea &&
6382 "Parameter area must exist if we have a varargs call.");
6383 // We could elide this store in the case where the object fits
6384 // entirely in R registers. Maybe later.
6386 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6387 MemOpChains.push_back(Store);
6388 if (QFPR_idx != NumQFPRs) {
6389 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6390 PtrOff, MachinePointerInfo());
6391 MemOpChains.push_back(Load.getValue(1));
6392 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6394 ArgOffset += (IsF32 ? 16 : 32);
6395 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6396 if (GPR_idx == NumGPRs)
6398 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6399 DAG.getConstant(i, dl, PtrVT));
6401 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6402 MemOpChains.push_back(Load.getValue(1));
6403 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6408 // Non-varargs QPX params go into registers or on the stack.
6409 if (QFPR_idx != NumQFPRs) {
6410 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6412 if (CallConv == CallingConv::Fast)
6415 assert(HasParameterArea &&
6416 "Parameter area must exist to pass an argument in memory.");
6417 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6418 true, isTailCall, true, MemOpChains,
6419 TailCallArguments, dl);
6420 if (CallConv == CallingConv::Fast)
6421 ArgOffset += (IsF32 ? 16 : 32);
6424 if (CallConv != CallingConv::Fast)
6425 ArgOffset += (IsF32 ? 16 : 32);
6431 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6432 "mismatch in size of parameter area");
6433 (void)NumBytesActuallyUsed;
6435 if (!MemOpChains.empty())
6436 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6438 // Check if this is an indirect call (MTCTR/BCTRL).
6439 // See prepareDescriptorIndirectCall and buildCallOperands for more
6440 // information about calls through function pointers in the 64-bit SVR4 ABI.
6441 if (!isTailCall && !isPatchPoint &&
6442 !isFunctionGlobalAddress(Callee) &&
6443 !isa<ExternalSymbolSDNode>(Callee)) {
6444 // Load r2 into a virtual register and store it to the TOC save area.
6445 setUsesTOCBasePtr(DAG);
6446 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6447 // TOC save area offset.
6448 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6449 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6450 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6451 Chain = DAG.getStore(
6452 Val.getValue(1), dl, Val, AddPtr,
6453 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6454 // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6455 // This does not mean the MTCTR instruction must use R12; it's easier
6456 // to model this as an extra parameter, so do that.
6457 if (isELFv2ABI && !isPatchPoint)
6458 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6461 // Build a sequence of copy-to-reg nodes chained together with token chain
6462 // and flag operands which copy the outgoing args into the appropriate regs.
6464 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6465 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6466 RegsToPass[i].second, InFlag);
6467 InFlag = Chain.getValue(1);
6470 if (isTailCall && !IsSibCall)
6471 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6474 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6475 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6476 SPDiff, NumBytes, Ins, InVals, CS);
6479 SDValue PPCTargetLowering::LowerCall_Darwin(
6480 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6481 bool isTailCall, bool isPatchPoint,
6482 const SmallVectorImpl<ISD::OutputArg> &Outs,
6483 const SmallVectorImpl<SDValue> &OutVals,
6484 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6485 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6486 ImmutableCallSite CS) const {
6487 unsigned NumOps = Outs.size();
6489 EVT PtrVT = getPointerTy(DAG.getDataLayout());
6490 bool isPPC64 = PtrVT == MVT::i64;
6491 unsigned PtrByteSize = isPPC64 ? 8 : 4;
6493 MachineFunction &MF = DAG.getMachineFunction();
6495 // Mark this function as potentially containing a function that contains a
6496 // tail call. As a consequence the frame pointer will be used for dynamicalloc
6497 // and restoring the callers stack pointer in this functions epilog. This is
6498 // done because by tail calling the called function might overwrite the value
6499 // in this function's (MF) stack pointer stack slot 0(SP).
6500 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6501 CallConv == CallingConv::Fast)
6502 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6504 // Count how many bytes are to be pushed on the stack, including the linkage
6505 // area, and parameter passing area. We start with 24/48 bytes, which is
6506 // prereserved space for [SP][CR][LR][3 x unused].
6507 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6508 unsigned NumBytes = LinkageSize;
6510 // Add up all the space actually used.
6511 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6512 // they all go in registers, but we must reserve stack space for them for
6513 // possible use by the caller. In varargs or 64-bit calls, parameters are
6514 // assigned stack space in order, with padding so Altivec parameters are
6516 unsigned nAltivecParamsAtEnd = 0;
6517 for (unsigned i = 0; i != NumOps; ++i) {
6518 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6519 EVT ArgVT = Outs[i].VT;
6520 // Varargs Altivec parameters are padded to a 16 byte boundary.
6521 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6522 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6523 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6524 if (!isVarArg && !isPPC64) {
6525 // Non-varargs Altivec parameters go after all the non-Altivec
6526 // parameters; handle those later so we know how much padding we need.
6527 nAltivecParamsAtEnd++;
6530 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6531 NumBytes = ((NumBytes+15)/16)*16;
6533 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6536 // Allow for Altivec parameters at the end, if needed.
6537 if (nAltivecParamsAtEnd) {
6538 NumBytes = ((NumBytes+15)/16)*16;
6539 NumBytes += 16*nAltivecParamsAtEnd;
6542 // The prolog code of the callee may store up to 8 GPR argument registers to
6543 // the stack, allowing va_start to index over them in memory if its varargs.
6544 // Because we cannot tell if this is needed on the caller side, we have to
6545 // conservatively assume that it is needed. As such, make sure we have at
6546 // least enough stack space for the caller to store the 8 GPRs.
6547 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6549 // Tail call needs the stack to be aligned.
6550 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6551 CallConv == CallingConv::Fast)
6552 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6554 // Calculate by how many bytes the stack has to be adjusted in case of tail
6555 // call optimization.
6556 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6558 // To protect arguments on the stack from being clobbered in a tail call,
6559 // force all the loads to happen before doing any other lowering.
6561 Chain = DAG.getStackArgumentTokenFactor(Chain);
6563 // Adjust the stack pointer for the new arguments...
6564 // These operations are automatically eliminated by the prolog/epilog pass
6565 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6566 SDValue CallSeqStart = Chain;
6568 // Load the return address and frame pointer so it can be move somewhere else
6571 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6573 // Set up a copy of the stack pointer for use loading and storing any
6574 // arguments that may not fit in the registers available for argument
6578 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6580 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6582 // Figure out which arguments are going to go in registers, and which in
6583 // memory. Also, if this is a vararg function, floating point operations
6584 // must be stored to our stack, and loaded into integer regs as well, if
6585 // any integer regs are available for argument passing.
6586 unsigned ArgOffset = LinkageSize;
6587 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6589 static const MCPhysReg GPR_32[] = { // 32-bit registers.
6590 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6591 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6593 static const MCPhysReg GPR_64[] = { // 64-bit registers.
6594 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6595 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6597 static const MCPhysReg VR[] = {
6598 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6599 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6601 const unsigned NumGPRs = array_lengthof(GPR_32);
6602 const unsigned NumFPRs = 13;
6603 const unsigned NumVRs = array_lengthof(VR);
6605 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6607 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6608 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6610 SmallVector<SDValue, 8> MemOpChains;
6611 for (unsigned i = 0; i != NumOps; ++i) {
6612 SDValue Arg = OutVals[i];
6613 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6615 // PtrOff will be used to store the current argument to the stack if a
6616 // register cannot be found for it.
6619 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6621 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6623 // On PPC64, promote integers to 64-bit values.
6624 if (isPPC64 && Arg.getValueType() == MVT::i32) {
6625 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6626 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6627 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6630 // FIXME memcpy is used way more than necessary. Correctness first.
6631 // Note: "by value" is code for passing a structure by value, not
6633 if (Flags.isByVal()) {
6634 unsigned Size = Flags.getByValSize();
6635 // Very small objects are passed right-justified. Everything else is
6636 // passed left-justified.
6637 if (Size==1 || Size==2) {
6638 EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6639 if (GPR_idx != NumGPRs) {
6640 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6641 MachinePointerInfo(), VT);
6642 MemOpChains.push_back(Load.getValue(1));
6643 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6645 ArgOffset += PtrByteSize;
6647 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6648 PtrOff.getValueType());
6649 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6650 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6653 ArgOffset += PtrByteSize;
6657 // Copy entire object into memory. There are cases where gcc-generated
6658 // code assumes it is there, even if it could be put entirely into
6659 // registers. (This is not what the doc says.)
6660 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6664 // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6665 // copy the pieces of the object that fit into registers from the
6666 // parameter save area.
6667 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6668 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6669 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6670 if (GPR_idx != NumGPRs) {
6672 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6673 MemOpChains.push_back(Load.getValue(1));
6674 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6675 ArgOffset += PtrByteSize;
6677 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6684 switch (Arg.getSimpleValueType().SimpleTy) {
6685 default: llvm_unreachable("Unexpected ValueType for argument!");
6689 if (GPR_idx != NumGPRs) {
6690 if (Arg.getValueType() == MVT::i1)
6691 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6693 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6695 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6696 isPPC64, isTailCall, false, MemOpChains,
6697 TailCallArguments, dl);
6699 ArgOffset += PtrByteSize;
6703 if (FPR_idx != NumFPRs) {
6704 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6708 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6709 MemOpChains.push_back(Store);
6711 // Float varargs are always shadowed in available integer registers
6712 if (GPR_idx != NumGPRs) {
6714 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6715 MemOpChains.push_back(Load.getValue(1));
6716 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6718 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6719 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6720 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6722 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6723 MemOpChains.push_back(Load.getValue(1));
6724 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6727 // If we have any FPRs remaining, we may also have GPRs remaining.
6728 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6730 if (GPR_idx != NumGPRs)
6732 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6733 !isPPC64) // PPC64 has 64-bit GPR's obviously :)
6737 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6738 isPPC64, isTailCall, false, MemOpChains,
6739 TailCallArguments, dl);
6743 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6750 // These go aligned on the stack, or in the corresponding R registers
6751 // when within range. The Darwin PPC ABI doc claims they also go in
6752 // V registers; in fact gcc does this only for arguments that are
6753 // prototyped, not for those that match the ... We do it for all
6754 // arguments, seems to work.
6755 while (ArgOffset % 16 !=0) {
6756 ArgOffset += PtrByteSize;
6757 if (GPR_idx != NumGPRs)
6760 // We could elide this store in the case where the object fits
6761 // entirely in R registers. Maybe later.
6762 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6763 DAG.getConstant(ArgOffset, dl, PtrVT));
6765 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6766 MemOpChains.push_back(Store);
6767 if (VR_idx != NumVRs) {
6769 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6770 MemOpChains.push_back(Load.getValue(1));
6771 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6774 for (unsigned i=0; i<16; i+=PtrByteSize) {
6775 if (GPR_idx == NumGPRs)
6777 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6778 DAG.getConstant(i, dl, PtrVT));
6780 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6781 MemOpChains.push_back(Load.getValue(1));
6782 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6787 // Non-varargs Altivec params generally go in registers, but have
6788 // stack space allocated at the end.
6789 if (VR_idx != NumVRs) {
6790 // Doesn't have GPR space allocated.
6791 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6792 } else if (nAltivecParamsAtEnd==0) {
6793 // We are emitting Altivec params in order.
6794 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6795 isPPC64, isTailCall, true, MemOpChains,
6796 TailCallArguments, dl);
6802 // If all Altivec parameters fit in registers, as they usually do,
6803 // they get stack space following the non-Altivec parameters. We
6804 // don't track this here because nobody below needs it.
6805 // If there are more Altivec parameters than fit in registers emit
6807 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6809 // Offset is aligned; skip 1st 12 params which go in V registers.
6810 ArgOffset = ((ArgOffset+15)/16)*16;
6812 for (unsigned i = 0; i != NumOps; ++i) {
6813 SDValue Arg = OutVals[i];
6814 EVT ArgType = Outs[i].VT;
6815 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6816 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6819 // We are emitting Altivec params in order.
6820 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6821 isPPC64, isTailCall, true, MemOpChains,
6822 TailCallArguments, dl);
6829 if (!MemOpChains.empty())
6830 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6832 // On Darwin, R12 must contain the address of an indirect callee. This does
6833 // not mean the MTCTR instruction must use R12; it's easier to model this as
6834 // an extra parameter, so do that.
6836 !isFunctionGlobalAddress(Callee) &&
6837 !isa<ExternalSymbolSDNode>(Callee) &&
6838 !isBLACompatibleAddress(Callee, DAG))
6839 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6840 PPC::R12), Callee));
6842 // Build a sequence of copy-to-reg nodes chained together with token chain
6843 // and flag operands which copy the outgoing args into the appropriate regs.
6845 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6846 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6847 RegsToPass[i].second, InFlag);
6848 InFlag = Chain.getValue(1);
6852 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6855 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6856 /* unused except on PPC64 ELFv1 */ false, DAG,
6857 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6858 NumBytes, Ins, InVals, CS);
6861 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6862 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6865 if (ValVT == MVT::f128)
6866 report_fatal_error("f128 is unimplemented on AIX.");
6868 if (ArgFlags.isByVal())
6869 report_fatal_error("Passing structure by value is unimplemented.");
6871 if (ArgFlags.isNest())
6872 report_fatal_error("Nest arguments are unimplemented.");
6874 if (ValVT.isVector() || LocVT.isVector())
6875 report_fatal_error("Vector arguments are unimplemented on AIX.");
6877 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6878 State.getMachineFunction().getSubtarget());
6879 const bool IsPPC64 = Subtarget.isPPC64();
6880 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6882 static const MCPhysReg GPR_32[] = {// 32-bit registers.
6883 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6884 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6885 static const MCPhysReg GPR_64[] = {// 64-bit registers.
6886 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6887 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6889 // Arguments always reserve parameter save area.
6890 switch (ValVT.SimpleTy) {
6892 report_fatal_error("Unhandled value type for argument.");
6894 // i64 arguments should have been split to i32 for PPC32.
6895 assert(IsPPC64 && "PPC32 should have split i64 values.");
6899 State.AllocateStack(PtrByteSize, PtrByteSize);
6900 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6901 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6902 // Promote integers if needed.
6903 if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
6904 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6905 : CCValAssign::LocInfo::ZExt;
6906 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6909 report_fatal_error("Handling of placing parameters on the stack is "
6915 // Parameter save area (PSA) is reserved even if the float passes in fpr.
6916 const unsigned StoreSize = LocVT.getStoreSize();
6917 // Floats are always 4-byte aligned in the PSA on AIX.
6918 // This includes f64 in 64-bit mode for ABI compatibility.
6919 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4);
6920 if (unsigned Reg = State.AllocateReg(FPR))
6921 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6923 report_fatal_error("Handling of placing parameters on the stack is "
6926 // AIX requires that GPRs are reserved for float arguments.
6927 // Successfully reserved GPRs are only initialized for vararg calls.
6928 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6929 for (unsigned I = 0; I < StoreSize; I += PtrByteSize) {
6930 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6931 if (State.isVarArg()) {
6932 // Custom handling is required for:
6933 // f64 in PPC32 needs to be split into 2 GPRs.
6934 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6936 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6938 } else if (State.isVarArg()) {
6939 report_fatal_error("Handling of placing parameters on the stack is "
6950 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6952 assert((IsPPC64 || SVT != MVT::i64) &&
6953 "i64 should have been split for 32-bit codegen.");
6957 report_fatal_error("Unexpected value type for formal argument");
6961 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6963 return &PPC::F4RCRegClass;
6965 return &PPC::F8RCRegClass;
6969 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6970 SelectionDAG &DAG, SDValue ArgValue,
6971 MVT LocVT, const SDLoc &dl) {
6972 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6973 assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
6976 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6977 DAG.getValueType(ValVT));
6978 else if (Flags.isZExt())
6979 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6980 DAG.getValueType(ValVT));
6982 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6985 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6986 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6987 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6988 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6990 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
6991 CallConv == CallingConv::Fast) &&
6992 "Unexpected calling convention!");
6995 report_fatal_error("This call type is unimplemented on AIX.");
6997 if (getTargetMachine().Options.GuaranteedTailCallOpt)
6998 report_fatal_error("Tail call support is unimplemented on AIX.");
7001 report_fatal_error("Soft float support is unimplemented on AIX.");
7003 const PPCSubtarget &Subtarget =
7004 static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7005 if (Subtarget.hasQPX())
7006 report_fatal_error("QPX support is not supported on AIX.");
7008 const bool IsPPC64 = Subtarget.isPPC64();
7009 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7011 // Assign locations to all of the incoming arguments.
7012 SmallVector<CCValAssign, 16> ArgLocs;
7013 MachineFunction &MF = DAG.getMachineFunction();
7014 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7016 // Reserve space for the linkage area on the stack.
7017 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7018 // On AIX a minimum of 8 words is saved to the parameter save area.
7019 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7020 CCInfo.AllocateStack(LinkageSize + MinParameterSaveArea, PtrByteSize);
7021 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7023 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7024 CCValAssign &VA = ArgLocs[i];
7026 ISD::ArgFlagsTy Flags = Ins[i].Flags;
7027 if (VA.isRegLoc()) {
7028 EVT ValVT = VA.getValVT();
7029 MVT LocVT = VA.getLocVT();
7030 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7032 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7033 ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7034 if (ValVT.isScalarInteger() &&
7035 (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7037 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7039 InVals.push_back(ArgValue);
7041 report_fatal_error("Handling of formal arguments on the stack is "
7046 // Area that is at least reserved in the caller of this function.
7047 unsigned MinReservedArea = CCInfo.getNextStackOffset();
7049 // Set the size that is at least reserved in caller of this function. Tail
7050 // call optimized function's reserved stack space needs to be aligned so
7051 // that taking the difference between two stack areas will result in an
7054 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
7055 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7056 FuncInfo->setMinReservedArea(MinReservedArea);
7061 SDValue PPCTargetLowering::LowerCall_AIX(
7062 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
7063 bool isTailCall, bool isPatchPoint,
7064 const SmallVectorImpl<ISD::OutputArg> &Outs,
7065 const SmallVectorImpl<SDValue> &OutVals,
7066 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7067 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7068 ImmutableCallSite CS) const {
7070 assert((CallConv == CallingConv::C ||
7071 CallConv == CallingConv::Cold ||
7072 CallConv == CallingConv::Fast) && "Unexpected calling convention!");
7075 report_fatal_error("This call type is unimplemented on AIX.");
7077 const PPCSubtarget& Subtarget =
7078 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7079 if (Subtarget.hasQPX())
7080 report_fatal_error("QPX is not supported on AIX.");
7081 if (Subtarget.hasAltivec())
7082 report_fatal_error("Altivec support is unimplemented on AIX.");
7084 MachineFunction &MF = DAG.getMachineFunction();
7085 SmallVector<CCValAssign, 16> ArgLocs;
7086 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7088 // Reserve space for the linkage save area (LSA) on the stack.
7089 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7090 // [SP][CR][LR][2 x reserved][TOC].
7091 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7092 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7093 const bool IsPPC64 = Subtarget.isPPC64();
7094 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7095 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
7096 CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7098 // The prolog code of the callee may store up to 8 GPR argument registers to
7099 // the stack, allowing va_start to index over them in memory if the callee
7101 // Because we cannot tell if this is needed on the caller side, we have to
7102 // conservatively assume that it is needed. As such, make sure we have at
7103 // least enough stack space for the caller to store the 8 GPRs.
7104 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7105 const unsigned NumBytes = LinkageSize + MinParameterSaveAreaSize;
7107 // Adjust the stack pointer for the new arguments...
7108 // These operations are automatically eliminated by the prolog/epilog pass.
7109 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7110 SDValue CallSeqStart = Chain;
7112 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7114 for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7115 CCValAssign &VA = ArgLocs[I++];
7118 report_fatal_error("Handling of placing parameters on the stack is "
7122 "Unexpected non-register location for function call argument.");
7124 SDValue Arg = OutVals[VA.getValNo()];
7126 if (!VA.needsCustom()) {
7127 switch (VA.getLocInfo()) {
7129 report_fatal_error("Unexpected argument extension type.");
7130 case CCValAssign::Full:
7132 case CCValAssign::ZExt:
7133 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7135 case CCValAssign::SExt:
7136 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7139 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7144 // Custom handling is used for GPR initializations for vararg float
7146 assert(isVarArg && VA.getValVT().isFloatingPoint() &&
7147 VA.getLocVT().isInteger() &&
7148 "Unexpected custom register handling for calling convention.");
7151 DAG.getBitcast(MVT::getIntegerVT(VA.getValVT().getSizeInBits()), Arg);
7153 if (Arg.getValueType().getStoreSize() == VA.getLocVT().getStoreSize())
7154 // f32 in 32-bit GPR
7155 // f64 in 64-bit GPR
7156 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7157 else if (Arg.getValueType().getSizeInBits() < VA.getLocVT().getSizeInBits())
7158 // f32 in 64-bit GPR.
7159 RegsToPass.push_back(std::make_pair(
7160 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, VA.getLocVT())));
7162 // f64 in two 32-bit GPRs
7163 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7164 assert(Arg.getValueType() == MVT::f64 && isVarArg && !IsPPC64 &&
7165 "Unexpected custom register for argument!");
7166 CCValAssign &GPR1 = VA;
7167 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7168 DAG.getConstant(32, dl, MVT::i8));
7169 RegsToPass.push_back(std::make_pair(
7170 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7171 assert(I != E && "A second custom GPR is expected!");
7172 CCValAssign &GPR2 = ArgLocs[I++];
7173 assert(GPR2.isRegLoc() && GPR2.getValNo() == GPR1.getValNo() &&
7174 GPR2.needsCustom() && "A second custom GPR is expected!");
7175 RegsToPass.push_back(std::make_pair(
7176 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7180 // For indirect calls, we need to save the TOC base to the stack for
7181 // restoration after the call.
7182 if (!isTailCall && !isPatchPoint &&
7183 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) {
7184 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7185 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7186 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7187 const unsigned TOCSaveOffset =
7188 Subtarget.getFrameLowering()->getTOCSaveOffset();
7190 setUsesTOCBasePtr(DAG);
7191 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7192 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7193 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7194 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7195 Chain = DAG.getStore(
7196 Val.getValue(1), dl, Val, AddPtr,
7197 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7200 // Build a sequence of copy-to-reg nodes chained together with token chain
7201 // and flag operands which copy the outgoing args into the appropriate regs.
7203 for (auto Reg : RegsToPass) {
7204 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7205 InFlag = Chain.getValue(1);
7208 const int SPDiff = 0;
7209 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
7210 /* unused except on PPC64 ELFv1 */ false, DAG, RegsToPass,
7211 InFlag, Chain, CallSeqStart, Callee, SPDiff, NumBytes, Ins,
7216 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7217 MachineFunction &MF, bool isVarArg,
7218 const SmallVectorImpl<ISD::OutputArg> &Outs,
7219 LLVMContext &Context) const {
7220 SmallVector<CCValAssign, 16> RVLocs;
7221 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7222 return CCInfo.CheckReturn(
7223 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7229 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7231 const SmallVectorImpl<ISD::OutputArg> &Outs,
7232 const SmallVectorImpl<SDValue> &OutVals,
7233 const SDLoc &dl, SelectionDAG &DAG) const {
7234 SmallVector<CCValAssign, 16> RVLocs;
7235 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7237 CCInfo.AnalyzeReturn(Outs,
7238 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7243 SmallVector<SDValue, 4> RetOps(1, Chain);
7245 // Copy the result values into the output registers.
7246 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7247 CCValAssign &VA = RVLocs[i];
7248 assert(VA.isRegLoc() && "Can only return in registers!");
7250 SDValue Arg = OutVals[RealResIdx];
7252 switch (VA.getLocInfo()) {
7253 default: llvm_unreachable("Unknown loc info!");
7254 case CCValAssign::Full: break;
7255 case CCValAssign::AExt:
7256 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7258 case CCValAssign::ZExt:
7259 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7261 case CCValAssign::SExt:
7262 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7265 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7266 bool isLittleEndian = Subtarget.isLittleEndian();
7267 // Legalize ret f64 -> ret 2 x i32.
7269 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7270 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7271 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7272 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7273 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7274 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7275 Flag = Chain.getValue(1);
7276 VA = RVLocs[++i]; // skip ahead to next loc
7277 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7279 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7280 Flag = Chain.getValue(1);
7281 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7284 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
7285 const MCPhysReg *I =
7286 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
7290 if (PPC::G8RCRegClass.contains(*I))
7291 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7292 else if (PPC::F8RCRegClass.contains(*I))
7293 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7294 else if (PPC::CRRCRegClass.contains(*I))
7295 RetOps.push_back(DAG.getRegister(*I, MVT::i1));
7296 else if (PPC::VRRCRegClass.contains(*I))
7297 RetOps.push_back(DAG.getRegister(*I, MVT::Other));
7299 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7303 RetOps[0] = Chain; // Update chain.
7305 // Add the flag if we have it.
7307 RetOps.push_back(Flag);
7309 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7313 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7314 SelectionDAG &DAG) const {
7317 // Get the correct type for integers.
7318 EVT IntVT = Op.getValueType();
7321 SDValue Chain = Op.getOperand(0);
7322 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7323 // Build a DYNAREAOFFSET node.
7324 SDValue Ops[2] = {Chain, FPSIdx};
7325 SDVTList VTs = DAG.getVTList(IntVT);
7326 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7329 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7330 SelectionDAG &DAG) const {
7331 // When we pop the dynamic allocation we need to restore the SP link.
7334 // Get the correct type for pointers.
7335 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7337 // Construct the stack pointer operand.
7338 bool isPPC64 = Subtarget.isPPC64();
7339 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7340 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7342 // Get the operands for the STACKRESTORE.
7343 SDValue Chain = Op.getOperand(0);
7344 SDValue SaveSP = Op.getOperand(1);
7346 // Load the old link SP.
7347 SDValue LoadLinkSP =
7348 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7350 // Restore the stack pointer.
7351 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7353 // Store the old link SP.
7354 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7357 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7358 MachineFunction &MF = DAG.getMachineFunction();
7359 bool isPPC64 = Subtarget.isPPC64();
7360 EVT PtrVT = getPointerTy(MF.getDataLayout());
7362 // Get current frame pointer save index. The users of this index will be
7363 // primarily DYNALLOC instructions.
7364 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7365 int RASI = FI->getReturnAddrSaveIndex();
7367 // If the frame pointer save index hasn't been defined yet.
7369 // Find out what the fix offset of the frame pointer save area.
7370 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7371 // Allocate the frame index for frame pointer save area.
7372 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7374 FI->setReturnAddrSaveIndex(RASI);
7376 return DAG.getFrameIndex(RASI, PtrVT);
7380 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7381 MachineFunction &MF = DAG.getMachineFunction();
7382 bool isPPC64 = Subtarget.isPPC64();
7383 EVT PtrVT = getPointerTy(MF.getDataLayout());
7385 // Get current frame pointer save index. The users of this index will be
7386 // primarily DYNALLOC instructions.
7387 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7388 int FPSI = FI->getFramePointerSaveIndex();
7390 // If the frame pointer save index hasn't been defined yet.
7392 // Find out what the fix offset of the frame pointer save area.
7393 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7394 // Allocate the frame index for frame pointer save area.
7395 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7397 FI->setFramePointerSaveIndex(FPSI);
7399 return DAG.getFrameIndex(FPSI, PtrVT);
7402 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7403 SelectionDAG &DAG) const {
7405 SDValue Chain = Op.getOperand(0);
7406 SDValue Size = Op.getOperand(1);
7409 // Get the correct type for pointers.
7410 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7412 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7413 DAG.getConstant(0, dl, PtrVT), Size);
7414 // Construct a node for the frame pointer save index.
7415 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7416 // Build a DYNALLOC node.
7417 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7418 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7419 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7422 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7423 SelectionDAG &DAG) const {
7424 MachineFunction &MF = DAG.getMachineFunction();
7426 bool isPPC64 = Subtarget.isPPC64();
7427 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7429 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7430 return DAG.getFrameIndex(FI, PtrVT);
7433 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7434 SelectionDAG &DAG) const {
7436 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7437 DAG.getVTList(MVT::i32, MVT::Other),
7438 Op.getOperand(0), Op.getOperand(1));
7441 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7442 SelectionDAG &DAG) const {
7444 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7445 Op.getOperand(0), Op.getOperand(1));
7448 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7449 if (Op.getValueType().isVector())
7450 return LowerVectorLoad(Op, DAG);
7452 assert(Op.getValueType() == MVT::i1 &&
7453 "Custom lowering only for i1 loads");
7455 // First, load 8 bits into 32 bits, then truncate to 1 bit.
7458 LoadSDNode *LD = cast<LoadSDNode>(Op);
7460 SDValue Chain = LD->getChain();
7461 SDValue BasePtr = LD->getBasePtr();
7462 MachineMemOperand *MMO = LD->getMemOperand();
7465 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7466 BasePtr, MVT::i8, MMO);
7467 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7469 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7470 return DAG.getMergeValues(Ops, dl);
7473 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7474 if (Op.getOperand(1).getValueType().isVector())
7475 return LowerVectorStore(Op, DAG);
7477 assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7478 "Custom lowering only for i1 stores");
7480 // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7483 StoreSDNode *ST = cast<StoreSDNode>(Op);
7485 SDValue Chain = ST->getChain();
7486 SDValue BasePtr = ST->getBasePtr();
7487 SDValue Value = ST->getValue();
7488 MachineMemOperand *MMO = ST->getMemOperand();
7490 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7492 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7495 // FIXME: Remove this once the ANDI glue bug is fixed:
7496 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7497 assert(Op.getValueType() == MVT::i1 &&
7498 "Custom lowering only for i1 results");
7501 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7504 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7505 SelectionDAG &DAG) const {
7507 // Implements a vector truncate that fits in a vector register as a shuffle.
7508 // We want to legalize vector truncates down to where the source fits in
7509 // a vector register (and target is therefore smaller than vector register
7510 // size). At that point legalization will try to custom lower the sub-legal
7511 // result and get here - where we can contain the truncate as a single target
7514 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7515 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7517 // We will implement it for big-endian ordering as this (where x denotes
7519 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7520 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7522 // The same operation in little-endian ordering will be:
7523 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7524 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7526 assert(Op.getValueType().isVector() && "Vector type expected.");
7529 SDValue N1 = Op.getOperand(0);
7530 unsigned SrcSize = N1.getValueType().getSizeInBits();
7531 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
7532 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7534 EVT TrgVT = Op.getValueType();
7535 unsigned TrgNumElts = TrgVT.getVectorNumElements();
7536 EVT EltVT = TrgVT.getVectorElementType();
7537 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7538 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7540 // First list the elements we want to keep.
7541 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7542 SmallVector<int, 16> ShuffV;
7543 if (Subtarget.isLittleEndian())
7544 for (unsigned i = 0; i < TrgNumElts; ++i)
7545 ShuffV.push_back(i * SizeMult);
7547 for (unsigned i = 1; i <= TrgNumElts; ++i)
7548 ShuffV.push_back(i * SizeMult - 1);
7550 // Populate the remaining elements with undefs.
7551 for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7552 // ShuffV.push_back(i + WideNumElts);
7553 ShuffV.push_back(WideNumElts + 1);
7555 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
7556 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
7559 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7561 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7562 // Not FP? Not a fsel.
7563 if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7564 !Op.getOperand(2).getValueType().isFloatingPoint())
7567 bool HasNoInfs = DAG.getTarget().Options.NoInfsFPMath;
7568 bool HasNoNaNs = DAG.getTarget().Options.NoNaNsFPMath;
7569 // We might be able to do better than this under some circumstances, but in
7570 // general, fsel-based lowering of select is a finite-math-only optimization.
7571 // For more information, see section F.3 of the 2.06 ISA specification.
7572 // With ISA 3.0, we have xsmaxcdp/xsmincdp which are OK to emit even in the
7573 // presence of infinities.
7574 if (!Subtarget.hasP9Vector() && (!HasNoInfs || !HasNoNaNs))
7576 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7578 EVT ResVT = Op.getValueType();
7579 EVT CmpVT = Op.getOperand(0).getValueType();
7580 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7581 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
7584 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7587 // Not a min/max but with finite math, we may still be able to use fsel.
7588 if (HasNoInfs && HasNoNaNs)
7593 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7596 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7600 // TODO: Propagate flags from the select rather than global settings.
7602 Flags.setNoInfs(true);
7603 Flags.setNoNaNs(true);
7605 // If the RHS of the comparison is a 0.0, we don't need to do the
7606 // subtraction at all.
7608 if (isFloatingPointZero(RHS))
7610 default: break; // SETUO etc aren't handled by fsel.
7615 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7616 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7617 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7618 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7619 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7620 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7621 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7624 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7628 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7629 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7630 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7633 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7637 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7638 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7639 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7640 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7645 default: break; // SETUO etc aren't handled by fsel.
7650 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7651 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7652 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7653 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7654 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7655 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7656 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7657 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7660 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7661 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7662 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7663 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7666 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7667 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7668 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7669 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7672 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7673 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7674 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7675 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7678 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7679 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7680 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7681 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7686 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7688 const SDLoc &dl) const {
7689 assert(Op.getOperand(0).getValueType().isFloatingPoint());
7690 SDValue Src = Op.getOperand(0);
7691 if (Src.getValueType() == MVT::f32)
7692 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7695 switch (Op.getSimpleValueType().SimpleTy) {
7696 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7699 Op.getOpcode() == ISD::FP_TO_SINT
7701 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7705 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7706 "i64 FP_TO_UINT is supported only with FPCVT");
7707 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7713 // Convert the FP value to an int value through memory.
7714 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7715 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
7716 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7717 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7718 MachinePointerInfo MPI =
7719 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7721 // Emit a store to the stack slot.
7724 MachineFunction &MF = DAG.getMachineFunction();
7725 MachineMemOperand *MMO =
7726 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, 4);
7727 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7728 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7729 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7731 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI);
7733 // Result is a load from the stack slot. If loading 4 bytes, make sure to
7734 // add in a bias on big endian.
7735 if (Op.getValueType() == MVT::i32 && !i32Stack) {
7736 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7737 DAG.getConstant(4, dl, FIPtr.getValueType()));
7738 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7746 /// Custom lowers floating point to integer conversions to use
7747 /// the direct move instructions available in ISA 2.07 to avoid the
7748 /// need for load/store combinations.
7749 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7751 const SDLoc &dl) const {
7752 assert(Op.getOperand(0).getValueType().isFloatingPoint());
7753 SDValue Src = Op.getOperand(0);
7755 if (Src.getValueType() == MVT::f32)
7756 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7759 switch (Op.getSimpleValueType().SimpleTy) {
7760 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7763 Op.getOpcode() == ISD::FP_TO_SINT
7765 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7767 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
7770 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7771 "i64 FP_TO_UINT is supported only with FPCVT");
7772 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7775 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
7781 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7782 const SDLoc &dl) const {
7784 // FP to INT conversions are legal for f128.
7785 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128))
7788 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7789 // PPC (the libcall is not available).
7790 if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
7791 if (Op.getValueType() == MVT::i32) {
7792 if (Op.getOpcode() == ISD::FP_TO_SINT) {
7793 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7794 MVT::f64, Op.getOperand(0),
7795 DAG.getIntPtrConstant(0, dl));
7796 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7797 MVT::f64, Op.getOperand(0),
7798 DAG.getIntPtrConstant(1, dl));
7800 // Add the two halves of the long double in round-to-zero mode.
7801 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7803 // Now use a smaller FP_TO_SINT.
7804 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7806 if (Op.getOpcode() == ISD::FP_TO_UINT) {
7807 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7808 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7809 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
7810 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7811 // FIXME: generated code sucks.
7812 // TODO: Are there fast-math-flags to propagate to this FSUB?
7813 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
7814 Op.getOperand(0), Tmp);
7815 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7816 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
7817 DAG.getConstant(0x80000000, dl, MVT::i32));
7818 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
7820 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
7828 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7829 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7832 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7834 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7835 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7838 // We're trying to insert a regular store, S, and then a load, L. If the
7839 // incoming value, O, is a load, we might just be able to have our load use the
7840 // address used by O. However, we don't know if anything else will store to
7841 // that address before we can load from it. To prevent this situation, we need
7842 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7843 // the same chain operand as O, we create a token factor from the chain results
7844 // of O and L, and we replace all uses of O's chain result with that token
7845 // factor (see spliceIntoChain below for this last part).
7846 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7849 ISD::LoadExtType ET) const {
7851 if (ET == ISD::NON_EXTLOAD &&
7852 (Op.getOpcode() == ISD::FP_TO_UINT ||
7853 Op.getOpcode() == ISD::FP_TO_SINT) &&
7854 isOperationLegalOrCustom(Op.getOpcode(),
7855 Op.getOperand(0).getValueType())) {
7857 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7861 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7862 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7863 LD->isNonTemporal())
7865 if (LD->getMemoryVT() != MemVT)
7868 RLI.Ptr = LD->getBasePtr();
7869 if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7870 assert(LD->getAddressingMode() == ISD::PRE_INC &&
7871 "Non-pre-inc AM on PPC?");
7872 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7876 RLI.Chain = LD->getChain();
7877 RLI.MPI = LD->getPointerInfo();
7878 RLI.IsDereferenceable = LD->isDereferenceable();
7879 RLI.IsInvariant = LD->isInvariant();
7880 RLI.Alignment = LD->getAlignment();
7881 RLI.AAInfo = LD->getAAInfo();
7882 RLI.Ranges = LD->getRanges();
7884 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7888 // Given the head of the old chain, ResChain, insert a token factor containing
7889 // it and NewResChain, and make users of ResChain now be users of that token
7891 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7892 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7893 SDValue NewResChain,
7894 SelectionDAG &DAG) const {
7898 SDLoc dl(NewResChain);
7900 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7901 NewResChain, DAG.getUNDEF(MVT::Other));
7902 assert(TF.getNode() != NewResChain.getNode() &&
7903 "A new TF really is required here");
7905 DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7906 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7909 /// Analyze profitability of direct move
7910 /// prefer float load to int load plus direct move
7911 /// when there is no integer use of int load
7912 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7913 SDNode *Origin = Op.getOperand(0).getNode();
7914 if (Origin->getOpcode() != ISD::LOAD)
7917 // If there is no LXSIBZX/LXSIHZX, like Power8,
7918 // prefer direct move if the memory size is 1 or 2 bytes.
7919 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7920 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7923 for (SDNode::use_iterator UI = Origin->use_begin(),
7924 UE = Origin->use_end();
7927 // Only look at the users of the loaded value.
7928 if (UI.getUse().get().getResNo() != 0)
7931 if (UI->getOpcode() != ISD::SINT_TO_FP &&
7932 UI->getOpcode() != ISD::UINT_TO_FP)
7939 /// Custom lowers integer to floating point conversions to use
7940 /// the direct move instructions available in ISA 2.07 to avoid the
7941 /// need for load/store combinations.
7942 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7944 const SDLoc &dl) const {
7945 assert((Op.getValueType() == MVT::f32 ||
7946 Op.getValueType() == MVT::f64) &&
7947 "Invalid floating point type as target of conversion");
7948 assert(Subtarget.hasFPCVT() &&
7949 "Int to FP conversions with direct moves require FPCVT");
7951 SDValue Src = Op.getOperand(0);
7952 bool SinglePrec = Op.getValueType() == MVT::f32;
7953 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7954 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
7955 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
7956 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
7959 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
7961 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7964 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
7965 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7971 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
7973 EVT VecVT = Vec.getValueType();
7974 assert(VecVT.isVector() && "Expected a vector type.");
7975 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
7977 EVT EltVT = VecVT.getVectorElementType();
7978 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7979 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7981 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
7982 SmallVector<SDValue, 16> Ops(NumConcat);
7984 SDValue UndefVec = DAG.getUNDEF(VecVT);
7985 for (unsigned i = 1; i < NumConcat; ++i)
7988 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
7991 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
7992 const SDLoc &dl) const {
7994 unsigned Opc = Op.getOpcode();
7995 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
7996 "Unexpected conversion type");
7997 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
7998 "Supports conversions to v2f64/v4f32 only.");
8000 bool SignedConv = Opc == ISD::SINT_TO_FP;
8001 bool FourEltRes = Op.getValueType() == MVT::v4f32;
8003 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8004 EVT WideVT = Wide.getValueType();
8005 unsigned WideNumElts = WideVT.getVectorNumElements();
8006 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8008 SmallVector<int, 16> ShuffV;
8009 for (unsigned i = 0; i < WideNumElts; ++i)
8010 ShuffV.push_back(i + WideNumElts);
8012 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8013 int SaveElts = FourEltRes ? 4 : 2;
8014 if (Subtarget.isLittleEndian())
8015 for (int i = 0; i < SaveElts; i++)
8016 ShuffV[i * Stride] = i;
8018 for (int i = 1; i <= SaveElts; i++)
8019 ShuffV[i * Stride - 1] = i - 1;
8021 SDValue ShuffleSrc2 =
8022 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8023 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8025 SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST;
8028 if (!Subtarget.hasP9Altivec() && SignedConv) {
8029 Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8030 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8031 DAG.getValueType(Op.getOperand(0).getValueType()));
8033 Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange);
8035 return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8038 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8039 SelectionDAG &DAG) const {
8042 EVT InVT = Op.getOperand(0).getValueType();
8043 EVT OutVT = Op.getValueType();
8044 if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8045 isOperationCustom(Op.getOpcode(), InVT))
8046 return LowerINT_TO_FPVector(Op, DAG, dl);
8048 // Conversions to f128 are legal.
8049 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128))
8052 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8053 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8056 SDValue Value = Op.getOperand(0);
8057 // The values are now known to be -1 (false) or 1 (true). To convert this
8058 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8059 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8060 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8062 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8064 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8066 if (Op.getValueType() != MVT::v4f64)
8067 Value = DAG.getNode(ISD::FP_ROUND, dl,
8068 Op.getValueType(), Value,
8069 DAG.getIntPtrConstant(1, dl));
8073 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8074 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8077 if (Op.getOperand(0).getValueType() == MVT::i1)
8078 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8079 DAG.getConstantFP(1.0, dl, Op.getValueType()),
8080 DAG.getConstantFP(0.0, dl, Op.getValueType()));
8082 // If we have direct moves, we can do all the conversion, skip the store/load
8083 // however, without FPCVT we can't do most conversions.
8084 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8085 Subtarget.isPPC64() && Subtarget.hasFPCVT())
8086 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8088 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8089 "UINT_TO_FP is supported only with FPCVT");
8091 // If we have FCFIDS, then use it when converting to single-precision.
8092 // Otherwise, convert to double-precision and then round.
8093 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8094 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8096 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8098 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8102 if (Op.getOperand(0).getValueType() == MVT::i64) {
8103 SDValue SINT = Op.getOperand(0);
8104 // When converting to single-precision, we actually need to convert
8105 // to double-precision first and then round to single-precision.
8106 // To avoid double-rounding effects during that operation, we have
8107 // to prepare the input operand. Bits that might be truncated when
8108 // converting to double-precision are replaced by a bit that won't
8109 // be lost at this stage, but is below the single-precision rounding
8112 // However, if -enable-unsafe-fp-math is in effect, accept double
8113 // rounding to avoid the extra overhead.
8114 if (Op.getValueType() == MVT::f32 &&
8115 !Subtarget.hasFPCVT() &&
8116 !DAG.getTarget().Options.UnsafeFPMath) {
8118 // Twiddle input to make sure the low 11 bits are zero. (If this
8119 // is the case, we are guaranteed the value will fit into the 53 bit
8120 // mantissa of an IEEE double-precision value without rounding.)
8121 // If any of those low 11 bits were not zero originally, make sure
8122 // bit 12 (value 2048) is set instead, so that the final rounding
8123 // to single-precision gets the correct result.
8124 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8125 SINT, DAG.getConstant(2047, dl, MVT::i64));
8126 Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8127 Round, DAG.getConstant(2047, dl, MVT::i64));
8128 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8129 Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8130 Round, DAG.getConstant(-2048, dl, MVT::i64));
8132 // However, we cannot use that value unconditionally: if the magnitude
8133 // of the input value is small, the bit-twiddling we did above might
8134 // end up visibly changing the output. Fortunately, in that case, we
8135 // don't need to twiddle bits since the original input will convert
8136 // exactly to double-precision floating-point already. Therefore,
8137 // construct a conditional to use the original value if the top 11
8138 // bits are all sign-bit copies, and use the rounded value computed
8140 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8141 SINT, DAG.getConstant(53, dl, MVT::i32));
8142 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8143 Cond, DAG.getConstant(1, dl, MVT::i64));
8144 Cond = DAG.getSetCC(dl, MVT::i32,
8145 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8147 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8153 MachineFunction &MF = DAG.getMachineFunction();
8154 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8155 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8156 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8157 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8158 } else if (Subtarget.hasLFIWAX() &&
8159 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8160 MachineMemOperand *MMO =
8161 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8162 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8163 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8164 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8165 DAG.getVTList(MVT::f64, MVT::Other),
8166 Ops, MVT::i32, MMO);
8167 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8168 } else if (Subtarget.hasFPCVT() &&
8169 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8170 MachineMemOperand *MMO =
8171 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8172 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8173 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8174 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8175 DAG.getVTList(MVT::f64, MVT::Other),
8176 Ops, MVT::i32, MMO);
8177 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8178 } else if (((Subtarget.hasLFIWAX() &&
8179 SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8180 (Subtarget.hasFPCVT() &&
8181 SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8182 SINT.getOperand(0).getValueType() == MVT::i32) {
8183 MachineFrameInfo &MFI = MF.getFrameInfo();
8184 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8186 int FrameIdx = MFI.CreateStackObject(4, 4, false);
8187 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8190 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8191 MachinePointerInfo::getFixedStack(
8192 DAG.getMachineFunction(), FrameIdx));
8194 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8195 "Expected an i32 store");
8200 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8203 MachineMemOperand *MMO =
8204 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8205 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8206 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8207 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8208 PPCISD::LFIWZX : PPCISD::LFIWAX,
8209 dl, DAG.getVTList(MVT::f64, MVT::Other),
8210 Ops, MVT::i32, MMO);
8212 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8214 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8216 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8217 FP = DAG.getNode(ISD::FP_ROUND, dl,
8218 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8222 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8223 "Unhandled INT_TO_FP type in custom expander!");
8224 // Since we only generate this in 64-bit mode, we can take advantage of
8225 // 64-bit registers. In particular, sign extend the input value into the
8226 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8227 // then lfd it and fcfid it.
8228 MachineFunction &MF = DAG.getMachineFunction();
8229 MachineFrameInfo &MFI = MF.getFrameInfo();
8230 EVT PtrVT = getPointerTy(MF.getDataLayout());
8233 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8236 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8238 int FrameIdx = MFI.CreateStackObject(4, 4, false);
8239 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8242 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8243 MachinePointerInfo::getFixedStack(
8244 DAG.getMachineFunction(), FrameIdx));
8246 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8247 "Expected an i32 store");
8252 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8256 MachineMemOperand *MMO =
8257 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8258 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8259 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8260 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8261 PPCISD::LFIWZX : PPCISD::LFIWAX,
8262 dl, DAG.getVTList(MVT::f64, MVT::Other),
8263 Ops, MVT::i32, MMO);
8265 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8267 assert(Subtarget.isPPC64() &&
8268 "i32->FP without LFIWAX supported only on PPC64");
8270 int FrameIdx = MFI.CreateStackObject(8, 8, false);
8271 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8273 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8276 // STD the extended value into the stack slot.
8277 SDValue Store = DAG.getStore(
8278 DAG.getEntryNode(), dl, Ext64, FIdx,
8279 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8281 // Load the value as a double.
8283 MVT::f64, dl, Store, FIdx,
8284 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8287 // FCFID it and return it.
8288 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8289 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8290 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8291 DAG.getIntPtrConstant(0, dl));
8295 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8296 SelectionDAG &DAG) const {
8299 The rounding mode is in bits 30:31 of FPSR, and has the following
8306 FLT_ROUNDS, on the other hand, expects the following:
8313 To perform the conversion, we do:
8314 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8317 MachineFunction &MF = DAG.getMachineFunction();
8318 EVT VT = Op.getValueType();
8319 EVT PtrVT = getPointerTy(MF.getDataLayout());
8321 // Save FP Control Word to register
8323 MVT::f64, // return register
8324 MVT::Glue // unused in this context
8326 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
8328 // Save FP register to stack slot
8329 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
8330 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8331 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
8332 MachinePointerInfo());
8334 // Load FP Control Word from low 32 bits of stack slot.
8335 SDValue Four = DAG.getConstant(4, dl, PtrVT);
8336 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8337 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
8339 // Transform as necessary
8341 DAG.getNode(ISD::AND, dl, MVT::i32,
8342 CWD, DAG.getConstant(3, dl, MVT::i32));
8344 DAG.getNode(ISD::SRL, dl, MVT::i32,
8345 DAG.getNode(ISD::AND, dl, MVT::i32,
8346 DAG.getNode(ISD::XOR, dl, MVT::i32,
8347 CWD, DAG.getConstant(3, dl, MVT::i32)),
8348 DAG.getConstant(3, dl, MVT::i32)),
8349 DAG.getConstant(1, dl, MVT::i32));
8352 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8354 return DAG.getNode((VT.getSizeInBits() < 16 ?
8355 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
8358 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8359 EVT VT = Op.getValueType();
8360 unsigned BitWidth = VT.getSizeInBits();
8362 assert(Op.getNumOperands() == 3 &&
8363 VT == Op.getOperand(1).getValueType() &&
8366 // Expand into a bunch of logical ops. Note that these ops
8367 // depend on the PPC behavior for oversized shift amounts.
8368 SDValue Lo = Op.getOperand(0);
8369 SDValue Hi = Op.getOperand(1);
8370 SDValue Amt = Op.getOperand(2);
8371 EVT AmtVT = Amt.getValueType();
8373 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8374 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8375 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8376 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8377 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8378 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8379 DAG.getConstant(-BitWidth, dl, AmtVT));
8380 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8381 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8382 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8383 SDValue OutOps[] = { OutLo, OutHi };
8384 return DAG.getMergeValues(OutOps, dl);
8387 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8388 EVT VT = Op.getValueType();
8390 unsigned BitWidth = VT.getSizeInBits();
8391 assert(Op.getNumOperands() == 3 &&
8392 VT == Op.getOperand(1).getValueType() &&
8395 // Expand into a bunch of logical ops. Note that these ops
8396 // depend on the PPC behavior for oversized shift amounts.
8397 SDValue Lo = Op.getOperand(0);
8398 SDValue Hi = Op.getOperand(1);
8399 SDValue Amt = Op.getOperand(2);
8400 EVT AmtVT = Amt.getValueType();
8402 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8403 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8404 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8405 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8406 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8407 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8408 DAG.getConstant(-BitWidth, dl, AmtVT));
8409 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8410 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8411 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8412 SDValue OutOps[] = { OutLo, OutHi };
8413 return DAG.getMergeValues(OutOps, dl);
8416 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8418 EVT VT = Op.getValueType();
8419 unsigned BitWidth = VT.getSizeInBits();
8420 assert(Op.getNumOperands() == 3 &&
8421 VT == Op.getOperand(1).getValueType() &&
8424 // Expand into a bunch of logical ops, followed by a select_cc.
8425 SDValue Lo = Op.getOperand(0);
8426 SDValue Hi = Op.getOperand(1);
8427 SDValue Amt = Op.getOperand(2);
8428 EVT AmtVT = Amt.getValueType();
8430 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8431 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8432 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8433 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8434 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8435 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8436 DAG.getConstant(-BitWidth, dl, AmtVT));
8437 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8438 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8439 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8440 Tmp4, Tmp6, ISD::SETLE);
8441 SDValue OutOps[] = { OutLo, OutHi };
8442 return DAG.getMergeValues(OutOps, dl);
8445 //===----------------------------------------------------------------------===//
8446 // Vector related lowering.
8449 /// BuildSplatI - Build a canonical splati of Val with an element size of
8450 /// SplatSize. Cast the result to VT.
8451 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
8452 SelectionDAG &DAG, const SDLoc &dl) {
8453 static const MVT VTys[] = { // canonical VT to use for each size.
8454 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8457 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8459 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
8463 EVT CanonicalVT = VTys[SplatSize-1];
8465 // Build a canonical splat for this value.
8466 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8469 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8470 /// specified intrinsic ID.
8471 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8472 const SDLoc &dl, EVT DestVT = MVT::Other) {
8473 if (DestVT == MVT::Other) DestVT = Op.getValueType();
8474 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8475 DAG.getConstant(IID, dl, MVT::i32), Op);
8478 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8479 /// specified intrinsic ID.
8480 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8481 SelectionDAG &DAG, const SDLoc &dl,
8482 EVT DestVT = MVT::Other) {
8483 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8484 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8485 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8488 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8489 /// specified intrinsic ID.
8490 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8491 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8492 EVT DestVT = MVT::Other) {
8493 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8494 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8495 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8498 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8499 /// amount. The result has the specified value type.
8500 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8501 SelectionDAG &DAG, const SDLoc &dl) {
8502 // Force LHS/RHS to be the right type.
8503 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8504 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8507 for (unsigned i = 0; i != 16; ++i)
8509 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8510 return DAG.getNode(ISD::BITCAST, dl, VT, T);
8513 /// Do we have an efficient pattern in a .td file for this node?
8515 /// \param V - pointer to the BuildVectorSDNode being matched
8516 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8518 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8519 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8520 /// the opposite is true (expansion is beneficial) are:
8521 /// - The node builds a vector out of integers that are not 32 or 64-bits
8522 /// - The node builds a vector out of constants
8523 /// - The node is a "load-and-splat"
8524 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8525 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8528 EVT VecVT = V->getValueType(0);
8529 bool RightType = VecVT == MVT::v2f64 ||
8530 (HasP8Vector && VecVT == MVT::v4f32) ||
8531 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8535 bool IsSplat = true;
8536 bool IsLoad = false;
8537 SDValue Op0 = V->getOperand(0);
8539 // This function is called in a block that confirms the node is not a constant
8540 // splat. So a constant BUILD_VECTOR here means the vector is built out of
8541 // different constants.
8542 if (V->isConstant())
8544 for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8545 if (V->getOperand(i).isUndef())
8547 // We want to expand nodes that represent load-and-splat even if the
8548 // loaded value is a floating point truncation or conversion to int.
8549 if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8550 (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8551 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8552 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8553 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8554 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8555 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8557 // If the operands are different or the input is not a load and has more
8558 // uses than just this BV node, then it isn't a splat.
8559 if (V->getOperand(i) != Op0 ||
8560 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8563 return !(IsSplat && IsLoad);
8566 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8567 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8570 SDValue Op0 = Op->getOperand(0);
8572 if (!EnableQuadPrecision ||
8573 (Op.getValueType() != MVT::f128 ) ||
8574 (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8575 (Op0.getOperand(0).getValueType() != MVT::i64) ||
8576 (Op0.getOperand(1).getValueType() != MVT::i64))
8579 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8583 static const SDValue *getNormalLoadInput(const SDValue &Op) {
8584 const SDValue *InputLoad = &Op;
8585 if (InputLoad->getOpcode() == ISD::BITCAST)
8586 InputLoad = &InputLoad->getOperand(0);
8587 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR)
8588 InputLoad = &InputLoad->getOperand(0);
8589 if (InputLoad->getOpcode() != ISD::LOAD)
8591 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8592 return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8595 // If this is a case we can't handle, return null and let the default
8596 // expansion code take care of it. If we CAN select this case, and if it
8597 // selects to a single instruction, return Op. Otherwise, if we can codegen
8598 // this case more efficiently than a constant pool load, lower it to the
8599 // sequence of ops that should be used.
8600 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8601 SelectionDAG &DAG) const {
8603 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8604 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8606 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
8607 // We first build an i32 vector, load it into a QPX register,
8608 // then convert it to a floating-point vector and compare it
8609 // to a zero vector to get the boolean result.
8610 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8611 int FrameIdx = MFI.CreateStackObject(16, 16, false);
8612 MachinePointerInfo PtrInfo =
8613 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8614 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8615 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8617 assert(BVN->getNumOperands() == 4 &&
8618 "BUILD_VECTOR for v4i1 does not have 4 operands");
8620 bool IsConst = true;
8621 for (unsigned i = 0; i < 4; ++i) {
8622 if (BVN->getOperand(i).isUndef()) continue;
8623 if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
8631 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
8633 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
8636 for (unsigned i = 0; i < 4; ++i) {
8637 if (BVN->getOperand(i).isUndef())
8638 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
8639 else if (isNullConstant(BVN->getOperand(i)))
8645 Constant *CP = ConstantVector::get(CV);
8646 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
8647 16 /* alignment */);
8649 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
8650 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
8651 return DAG.getMemIntrinsicNode(
8652 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
8653 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
8656 SmallVector<SDValue, 4> Stores;
8657 for (unsigned i = 0; i < 4; ++i) {
8658 if (BVN->getOperand(i).isUndef()) continue;
8660 unsigned Offset = 4*i;
8661 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
8662 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
8664 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
8665 if (StoreSize > 4) {
8667 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
8668 PtrInfo.getWithOffset(Offset), MVT::i32));
8670 SDValue StoreValue = BVN->getOperand(i);
8672 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
8674 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
8675 PtrInfo.getWithOffset(Offset)));
8680 if (!Stores.empty())
8681 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
8683 StoreChain = DAG.getEntryNode();
8685 // Now load from v4i32 into the QPX register; this will extend it to
8686 // v4i64 but not yet convert it to a floating point. Nevertheless, this
8687 // is typed as v4f64 because the QPX register integer states are not
8688 // explicitly represented.
8690 SDValue Ops[] = {StoreChain,
8691 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
8693 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
8695 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
8696 dl, VTs, Ops, MVT::v4i32, PtrInfo);
8697 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8698 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
8701 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
8703 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
8706 // All other QPX vectors are handled by generic code.
8707 if (Subtarget.hasQPX())
8710 // Check if this is a splat of a constant value.
8711 APInt APSplatBits, APSplatUndef;
8712 unsigned SplatBitSize;
8714 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8715 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
8716 SplatBitSize > 32) {
8718 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
8719 // Handle load-and-splat patterns as we have instructions that will do this
8721 if (InputLoad && DAG.isSplatValue(Op, true)) {
8722 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8724 // We have handling for 4 and 8 byte elements.
8725 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8727 // Checking for a single use of this load, we have to check for vector
8728 // width (128 bits) / ElementSize uses (since each operand of the
8729 // BUILD_VECTOR is a separate use of the value.
8730 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
8731 ((Subtarget.hasVSX() && ElementSize == 64) ||
8732 (Subtarget.hasP9Vector() && ElementSize == 32))) {
8734 LD->getChain(), // Chain
8735 LD->getBasePtr(), // Ptr
8736 DAG.getValueType(Op.getValueType()) // VT
8739 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
8740 DAG.getVTList(Op.getValueType(), MVT::Other),
8741 Ops, LD->getMemoryVT(), LD->getMemOperand());
8745 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
8746 // lowered to VSX instructions under certain conditions.
8747 // Without VSX, there is no pattern more efficient than expanding the node.
8748 if (Subtarget.hasVSX() &&
8749 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8750 Subtarget.hasP8Vector()))
8755 unsigned SplatBits = APSplatBits.getZExtValue();
8756 unsigned SplatUndef = APSplatUndef.getZExtValue();
8757 unsigned SplatSize = SplatBitSize / 8;
8759 // First, handle single instruction cases.
8762 if (SplatBits == 0) {
8763 // Canonicalize all zero vectors to be v4i32.
8764 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8765 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8766 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8771 // We have XXSPLTIB for constant splats one byte wide
8772 // FIXME: SplatBits is an unsigned int being cast to an int while passing it
8773 // as an argument to BuildSplatiI. Given SplatSize == 1 it is okay here.
8774 if (Subtarget.hasP9Vector() && SplatSize == 1)
8775 return BuildSplatI(SplatBits, SplatSize, Op.getValueType(), DAG, dl);
8777 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8778 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8780 if (SextVal >= -16 && SextVal <= 15)
8781 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
8783 // Two instruction sequences.
8785 // If this value is in the range [-32,30] and is even, use:
8786 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8787 // If this value is in the range [17,31] and is odd, use:
8788 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8789 // If this value is in the range [-31,-17] and is odd, use:
8790 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8791 // Note the last two are three-instruction sequences.
8792 if (SextVal >= -32 && SextVal <= 31) {
8793 // To avoid having these optimizations undone by constant folding,
8794 // we convert to a pseudo that will be expanded later into one of
8796 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
8797 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
8798 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
8799 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
8800 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
8801 if (VT == Op.getValueType())
8804 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8807 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
8808 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
8810 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8811 // Make -1 and vspltisw -1:
8812 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
8814 // Make the VSLW intrinsic, computing 0x8000_0000.
8815 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8818 // xor by OnesV to invert it.
8819 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8820 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8823 // Check to see if this is a wide variety of vsplti*, binop self cases.
8824 static const signed char SplatCsts[] = {
8825 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8826 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8829 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8830 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8831 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
8832 int i = SplatCsts[idx];
8834 // Figure out what shift amount will be used by altivec if shifted by i in
8836 unsigned TypeShiftAmt = i & (SplatBitSize-1);
8838 // vsplti + shl self.
8839 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8840 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8841 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8842 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8843 Intrinsic::ppc_altivec_vslw
8845 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8846 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8849 // vsplti + srl self.
8850 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8851 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8852 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8853 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8854 Intrinsic::ppc_altivec_vsrw
8856 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8857 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8860 // vsplti + sra self.
8861 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8862 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8863 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8864 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
8865 Intrinsic::ppc_altivec_vsraw
8867 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8868 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8871 // vsplti + rol self.
8872 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8873 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8874 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8875 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8876 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8877 Intrinsic::ppc_altivec_vrlw
8879 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8880 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8883 // t = vsplti c, result = vsldoi t, t, 1
8884 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8885 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8886 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8887 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8889 // t = vsplti c, result = vsldoi t, t, 2
8890 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8891 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8892 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8893 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8895 // t = vsplti c, result = vsldoi t, t, 3
8896 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8897 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8898 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8899 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8906 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8907 /// the specified operations to build the shuffle.
8908 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8909 SDValue RHS, SelectionDAG &DAG,
8911 unsigned OpNum = (PFEntry >> 26) & 0x0F;
8912 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8913 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
8916 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8928 if (OpNum == OP_COPY) {
8929 if (LHSID == (1*9+2)*9+3) return LHS;
8930 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8934 SDValue OpLHS, OpRHS;
8935 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8936 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8940 default: llvm_unreachable("Unknown i32 permute!");
8942 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
8943 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8944 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
8945 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8948 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8949 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8950 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8951 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8954 for (unsigned i = 0; i != 16; ++i)
8955 ShufIdxs[i] = (i&3)+0;
8958 for (unsigned i = 0; i != 16; ++i)
8959 ShufIdxs[i] = (i&3)+4;
8962 for (unsigned i = 0; i != 16; ++i)
8963 ShufIdxs[i] = (i&3)+8;
8966 for (unsigned i = 0; i != 16; ++i)
8967 ShufIdxs[i] = (i&3)+12;
8970 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8972 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8974 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8976 EVT VT = OpLHS.getValueType();
8977 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
8978 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
8979 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
8980 return DAG.getNode(ISD::BITCAST, dl, VT, T);
8983 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
8984 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
8986 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
8987 SelectionDAG &DAG) const {
8988 const unsigned BytesInVector = 16;
8989 bool IsLE = Subtarget.isLittleEndian();
8991 SDValue V1 = N->getOperand(0);
8992 SDValue V2 = N->getOperand(1);
8993 unsigned ShiftElts = 0, InsertAtByte = 0;
8996 // Shifts required to get the byte we want at element 7.
8997 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
8998 0, 15, 14, 13, 12, 11, 10, 9};
8999 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9000 1, 2, 3, 4, 5, 6, 7, 8};
9002 ArrayRef<int> Mask = N->getMask();
9003 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9005 // For each mask element, find out if we're just inserting something
9006 // from V2 into V1 or vice versa.
9007 // Possible permutations inserting an element from V2 into V1:
9008 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9009 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9011 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9012 // Inserting from V1 into V2 will be similar, except mask range will be
9015 bool FoundCandidate = false;
9016 // If both vector operands for the shuffle are the same vector, the mask
9017 // will contain only elements from the first one and the second one will be
9019 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9020 // Go through the mask of half-words to find an element that's being moved
9021 // from one vector to the other.
9022 for (unsigned i = 0; i < BytesInVector; ++i) {
9023 unsigned CurrentElement = Mask[i];
9024 // If 2nd operand is undefined, we should only look for element 7 in the
9026 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9029 bool OtherElementsInOrder = true;
9030 // Examine the other elements in the Mask to see if they're in original
9032 for (unsigned j = 0; j < BytesInVector; ++j) {
9035 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9036 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
9037 // in which we always assume we're always picking from the 1st operand.
9039 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9040 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9041 OtherElementsInOrder = false;
9045 // If other elements are in original order, we record the number of shifts
9046 // we need to get the element we want into element 7. Also record which byte
9047 // in the vector we should insert into.
9048 if (OtherElementsInOrder) {
9049 // If 2nd operand is undefined, we assume no shifts and no swapping.
9054 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9055 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9056 : BigEndianShifts[CurrentElement & 0xF];
9057 Swap = CurrentElement < BytesInVector;
9059 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9060 FoundCandidate = true;
9065 if (!FoundCandidate)
9068 // Candidate found, construct the proper SDAG sequence with VINSERTB,
9069 // optionally with VECSHL if shift is required.
9075 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9076 DAG.getConstant(ShiftElts, dl, MVT::i32));
9077 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9078 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9080 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9081 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9084 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9085 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9087 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9088 SelectionDAG &DAG) const {
9089 const unsigned NumHalfWords = 8;
9090 const unsigned BytesInVector = NumHalfWords * 2;
9091 // Check that the shuffle is on half-words.
9092 if (!isNByteElemShuffleMask(N, 2, 1))
9095 bool IsLE = Subtarget.isLittleEndian();
9097 SDValue V1 = N->getOperand(0);
9098 SDValue V2 = N->getOperand(1);
9099 unsigned ShiftElts = 0, InsertAtByte = 0;
9102 // Shifts required to get the half-word we want at element 3.
9103 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9104 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9107 uint32_t OriginalOrderLow = 0x1234567;
9108 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9109 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a
9110 // 32-bit space, only need 4-bit nibbles per element.
9111 for (unsigned i = 0; i < NumHalfWords; ++i) {
9112 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9113 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9116 // For each mask element, find out if we're just inserting something
9117 // from V2 into V1 or vice versa. Possible permutations inserting an element
9119 // X, 1, 2, 3, 4, 5, 6, 7
9120 // 0, X, 2, 3, 4, 5, 6, 7
9121 // 0, 1, X, 3, 4, 5, 6, 7
9122 // 0, 1, 2, X, 4, 5, 6, 7
9123 // 0, 1, 2, 3, X, 5, 6, 7
9124 // 0, 1, 2, 3, 4, X, 6, 7
9125 // 0, 1, 2, 3, 4, 5, X, 7
9126 // 0, 1, 2, 3, 4, 5, 6, X
9127 // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9129 bool FoundCandidate = false;
9130 // Go through the mask of half-words to find an element that's being moved
9131 // from one vector to the other.
9132 for (unsigned i = 0; i < NumHalfWords; ++i) {
9133 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9134 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9135 uint32_t MaskOtherElts = ~(0xF << MaskShift);
9136 uint32_t TargetOrder = 0x0;
9138 // If both vector operands for the shuffle are the same vector, the mask
9139 // will contain only elements from the first one and the second one will be
9143 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9144 TargetOrder = OriginalOrderLow;
9146 // Skip if not the correct element or mask of other elements don't equal
9147 // to our expected order.
9148 if (MaskOneElt == VINSERTHSrcElem &&
9149 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9150 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9151 FoundCandidate = true;
9154 } else { // If both operands are defined.
9155 // Target order is [8,15] if the current mask is between [0,7].
9157 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9158 // Skip if mask of other elements don't equal our expected order.
9159 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9160 // We only need the last 3 bits for the number of shifts.
9161 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9162 : BigEndianShifts[MaskOneElt & 0x7];
9163 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9164 Swap = MaskOneElt < NumHalfWords;
9165 FoundCandidate = true;
9171 if (!FoundCandidate)
9174 // Candidate found, construct the proper SDAG sequence with VINSERTH,
9175 // optionally with VECSHL if shift is required.
9180 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9182 // Double ShiftElts because we're left shifting on v16i8 type.
9183 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9184 DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9185 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9186 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9187 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9188 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9190 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9191 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9192 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9193 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9196 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
9197 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
9198 /// return the code it can be lowered into. Worst case, it can always be
9199 /// lowered into a vperm.
9200 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9201 SelectionDAG &DAG) const {
9203 SDValue V1 = Op.getOperand(0);
9204 SDValue V2 = Op.getOperand(1);
9205 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9206 EVT VT = Op.getValueType();
9207 bool isLittleEndian = Subtarget.isLittleEndian();
9209 unsigned ShiftElts, InsertAtByte;
9212 // If this is a load-and-splat, we can do that with a single instruction
9213 // in some cases. However if the load has multiple uses, we don't want to
9214 // combine it because that will just produce multiple loads.
9215 const SDValue *InputLoad = getNormalLoadInput(V1);
9216 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9217 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9218 InputLoad->hasOneUse()) {
9219 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9221 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9223 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9224 // For 4-byte load-and-splat, we need Power9.
9225 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9226 uint64_t Offset = 0;
9228 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9230 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9231 SDValue BasePtr = LD->getBasePtr();
9233 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9234 BasePtr, DAG.getIntPtrConstant(Offset, dl));
9236 LD->getChain(), // Chain
9238 DAG.getValueType(Op.getValueType()) // VT
9241 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9243 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9244 Ops, LD->getMemoryVT(), LD->getMemOperand());
9245 if (LdSplt.getValueType() != SVOp->getValueType(0))
9246 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9250 if (Subtarget.hasP9Vector() &&
9251 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9255 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9256 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9258 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9259 DAG.getConstant(ShiftElts, dl, MVT::i32));
9260 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9261 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9262 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9264 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9265 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9266 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9269 if (Subtarget.hasP9Altivec()) {
9271 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9274 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9278 if (Subtarget.hasVSX() &&
9279 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9282 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9284 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9286 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9287 DAG.getConstant(ShiftElts, dl, MVT::i32));
9288 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9291 if (Subtarget.hasVSX() &&
9292 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9295 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9297 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9299 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9300 DAG.getConstant(ShiftElts, dl, MVT::i32));
9301 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9304 if (Subtarget.hasP9Vector()) {
9305 if (PPC::isXXBRHShuffleMask(SVOp)) {
9306 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9307 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9308 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9309 } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9310 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9311 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9312 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9313 } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9314 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9315 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9316 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9317 } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9318 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9319 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9320 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9324 if (Subtarget.hasVSX()) {
9325 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9326 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9328 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9329 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9330 DAG.getConstant(SplatIdx, dl, MVT::i32));
9331 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9334 // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9335 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9336 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9337 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9338 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9342 if (Subtarget.hasQPX()) {
9343 if (VT.getVectorNumElements() != 4)
9346 if (V2.isUndef()) V2 = V1;
9348 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
9349 if (AlignIdx != -1) {
9350 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
9351 DAG.getConstant(AlignIdx, dl, MVT::i32));
9352 } else if (SVOp->isSplat()) {
9353 int SplatIdx = SVOp->getSplatIndex();
9354 if (SplatIdx >= 4) {
9359 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
9360 DAG.getConstant(SplatIdx, dl, MVT::i32));
9363 // Lower this into a qvgpci/qvfperm pair.
9365 // Compute the qvgpci literal
9367 for (unsigned i = 0; i < 4; ++i) {
9368 int m = SVOp->getMaskElt(i);
9369 unsigned mm = m >= 0 ? (unsigned) m : i;
9370 idx |= mm << (3-i)*3;
9373 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
9374 DAG.getConstant(idx, dl, MVT::i32));
9375 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
9378 // Cases that are handled by instructions that take permute immediates
9379 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9380 // selected by the instruction selector.
9382 if (PPC::isSplatShuffleMask(SVOp, 1) ||
9383 PPC::isSplatShuffleMask(SVOp, 2) ||
9384 PPC::isSplatShuffleMask(SVOp, 4) ||
9385 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9386 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9387 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9388 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9389 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9390 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9391 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9392 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9393 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9394 (Subtarget.hasP8Altivec() && (
9395 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9396 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9397 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9402 // Altivec has a variety of "shuffle immediates" that take two vector inputs
9403 // and produce a fixed permutation. If any of these match, do not lower to
9405 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9406 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9407 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9408 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9409 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9410 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9411 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9412 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9413 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9414 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9415 (Subtarget.hasP8Altivec() && (
9416 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9417 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9418 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9421 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
9422 // perfect shuffle table to emit an optimal matching sequence.
9423 ArrayRef<int> PermMask = SVOp->getMask();
9425 unsigned PFIndexes[4];
9426 bool isFourElementShuffle = true;
9427 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9428 unsigned EltNo = 8; // Start out undef.
9429 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
9430 if (PermMask[i*4+j] < 0)
9431 continue; // Undef, ignore it.
9433 unsigned ByteSource = PermMask[i*4+j];
9434 if ((ByteSource & 3) != j) {
9435 isFourElementShuffle = false;
9440 EltNo = ByteSource/4;
9441 } else if (EltNo != ByteSource/4) {
9442 isFourElementShuffle = false;
9446 PFIndexes[i] = EltNo;
9449 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9450 // perfect shuffle vector to determine if it is cost effective to do this as
9451 // discrete instructions, or whether we should use a vperm.
9452 // For now, we skip this for little endian until such time as we have a
9453 // little-endian perfect shuffle table.
9454 if (isFourElementShuffle && !isLittleEndian) {
9455 // Compute the index in the perfect shuffle table.
9456 unsigned PFTableIndex =
9457 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9459 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9460 unsigned Cost = (PFEntry >> 30);
9462 // Determining when to avoid vperm is tricky. Many things affect the cost
9463 // of vperm, particularly how many times the perm mask needs to be computed.
9464 // For example, if the perm mask can be hoisted out of a loop or is already
9465 // used (perhaps because there are multiple permutes with the same shuffle
9466 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
9467 // the loop requires an extra register.
9469 // As a compromise, we only emit discrete instructions if the shuffle can be
9470 // generated in 3 or fewer operations. When we have loop information
9471 // available, if this block is within a loop, we should avoid using vperm
9472 // for 3-operation perms and use a constant pool load instead.
9474 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9477 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9478 // vector that will get spilled to the constant pool.
9479 if (V2.isUndef()) V2 = V1;
9481 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9482 // that it is in input element units, not in bytes. Convert now.
9484 // For little endian, the order of the input vectors is reversed, and
9485 // the permutation mask is complemented with respect to 31. This is
9486 // necessary to produce proper semantics with the big-endian-biased vperm
9488 EVT EltVT = V1.getValueType().getVectorElementType();
9489 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9491 SmallVector<SDValue, 16> ResultMask;
9492 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9493 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9495 for (unsigned j = 0; j != BytesPerElement; ++j)
9497 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9500 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9504 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9506 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9509 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9513 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9514 /// vector comparison. If it is, return true and fill in Opc/isDot with
9515 /// information about the intrinsic.
9516 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9517 bool &isDot, const PPCSubtarget &Subtarget) {
9518 unsigned IntrinsicID =
9519 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9522 switch (IntrinsicID) {
9525 // Comparison predicates.
9526 case Intrinsic::ppc_altivec_vcmpbfp_p:
9530 case Intrinsic::ppc_altivec_vcmpeqfp_p:
9534 case Intrinsic::ppc_altivec_vcmpequb_p:
9538 case Intrinsic::ppc_altivec_vcmpequh_p:
9542 case Intrinsic::ppc_altivec_vcmpequw_p:
9546 case Intrinsic::ppc_altivec_vcmpequd_p:
9547 if (Subtarget.hasP8Altivec()) {
9553 case Intrinsic::ppc_altivec_vcmpneb_p:
9554 case Intrinsic::ppc_altivec_vcmpneh_p:
9555 case Intrinsic::ppc_altivec_vcmpnew_p:
9556 case Intrinsic::ppc_altivec_vcmpnezb_p:
9557 case Intrinsic::ppc_altivec_vcmpnezh_p:
9558 case Intrinsic::ppc_altivec_vcmpnezw_p:
9559 if (Subtarget.hasP9Altivec()) {
9560 switch (IntrinsicID) {
9562 llvm_unreachable("Unknown comparison intrinsic.");
9563 case Intrinsic::ppc_altivec_vcmpneb_p:
9566 case Intrinsic::ppc_altivec_vcmpneh_p:
9569 case Intrinsic::ppc_altivec_vcmpnew_p:
9572 case Intrinsic::ppc_altivec_vcmpnezb_p:
9575 case Intrinsic::ppc_altivec_vcmpnezh_p:
9578 case Intrinsic::ppc_altivec_vcmpnezw_p:
9586 case Intrinsic::ppc_altivec_vcmpgefp_p:
9590 case Intrinsic::ppc_altivec_vcmpgtfp_p:
9594 case Intrinsic::ppc_altivec_vcmpgtsb_p:
9598 case Intrinsic::ppc_altivec_vcmpgtsh_p:
9602 case Intrinsic::ppc_altivec_vcmpgtsw_p:
9606 case Intrinsic::ppc_altivec_vcmpgtsd_p:
9607 if (Subtarget.hasP8Altivec()) {
9613 case Intrinsic::ppc_altivec_vcmpgtub_p:
9617 case Intrinsic::ppc_altivec_vcmpgtuh_p:
9621 case Intrinsic::ppc_altivec_vcmpgtuw_p:
9625 case Intrinsic::ppc_altivec_vcmpgtud_p:
9626 if (Subtarget.hasP8Altivec()) {
9633 // VSX predicate comparisons use the same infrastructure
9634 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9635 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9636 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9637 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9638 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9639 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9640 if (Subtarget.hasVSX()) {
9641 switch (IntrinsicID) {
9642 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9645 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9648 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9651 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9654 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9657 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9666 // Normal Comparisons.
9667 case Intrinsic::ppc_altivec_vcmpbfp:
9670 case Intrinsic::ppc_altivec_vcmpeqfp:
9673 case Intrinsic::ppc_altivec_vcmpequb:
9676 case Intrinsic::ppc_altivec_vcmpequh:
9679 case Intrinsic::ppc_altivec_vcmpequw:
9682 case Intrinsic::ppc_altivec_vcmpequd:
9683 if (Subtarget.hasP8Altivec())
9688 case Intrinsic::ppc_altivec_vcmpneb:
9689 case Intrinsic::ppc_altivec_vcmpneh:
9690 case Intrinsic::ppc_altivec_vcmpnew:
9691 case Intrinsic::ppc_altivec_vcmpnezb:
9692 case Intrinsic::ppc_altivec_vcmpnezh:
9693 case Intrinsic::ppc_altivec_vcmpnezw:
9694 if (Subtarget.hasP9Altivec())
9695 switch (IntrinsicID) {
9697 llvm_unreachable("Unknown comparison intrinsic.");
9698 case Intrinsic::ppc_altivec_vcmpneb:
9701 case Intrinsic::ppc_altivec_vcmpneh:
9704 case Intrinsic::ppc_altivec_vcmpnew:
9707 case Intrinsic::ppc_altivec_vcmpnezb:
9710 case Intrinsic::ppc_altivec_vcmpnezh:
9713 case Intrinsic::ppc_altivec_vcmpnezw:
9720 case Intrinsic::ppc_altivec_vcmpgefp:
9723 case Intrinsic::ppc_altivec_vcmpgtfp:
9726 case Intrinsic::ppc_altivec_vcmpgtsb:
9729 case Intrinsic::ppc_altivec_vcmpgtsh:
9732 case Intrinsic::ppc_altivec_vcmpgtsw:
9735 case Intrinsic::ppc_altivec_vcmpgtsd:
9736 if (Subtarget.hasP8Altivec())
9741 case Intrinsic::ppc_altivec_vcmpgtub:
9744 case Intrinsic::ppc_altivec_vcmpgtuh:
9747 case Intrinsic::ppc_altivec_vcmpgtuw:
9750 case Intrinsic::ppc_altivec_vcmpgtud:
9751 if (Subtarget.hasP8Altivec())
9760 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
9761 /// lower, do it, otherwise return null.
9762 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
9763 SelectionDAG &DAG) const {
9764 unsigned IntrinsicID =
9765 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9769 if (IntrinsicID == Intrinsic::thread_pointer) {
9770 // Reads the thread pointer register, used for __builtin_thread_pointer.
9771 if (Subtarget.isPPC64())
9772 return DAG.getRegister(PPC::X13, MVT::i64);
9773 return DAG.getRegister(PPC::R2, MVT::i32);
9776 // If this is a lowered altivec predicate compare, CompareOpc is set to the
9777 // opcode number of the comparison.
9780 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
9781 return SDValue(); // Don't custom lower most intrinsics.
9783 // If this is a non-dot comparison, make the VCMP node and we are done.
9785 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
9786 Op.getOperand(1), Op.getOperand(2),
9787 DAG.getConstant(CompareOpc, dl, MVT::i32));
9788 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
9791 // Create the PPCISD altivec 'dot' comparison node.
9793 Op.getOperand(2), // LHS
9794 Op.getOperand(3), // RHS
9795 DAG.getConstant(CompareOpc, dl, MVT::i32)
9797 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
9798 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
9800 // Now that we have the comparison, emit a copy from the CR to a GPR.
9801 // This is flagged to the above dot comparison.
9802 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
9803 DAG.getRegister(PPC::CR6, MVT::i32),
9804 CompNode.getValue(1));
9806 // Unpack the result based on how the target uses it.
9807 unsigned BitNo; // Bit # of CR6.
9808 bool InvertBit; // Invert result?
9809 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
9810 default: // Can't happen, don't crash on invalid number though.
9811 case 0: // Return the value of the EQ bit of CR6.
9812 BitNo = 0; InvertBit = false;
9814 case 1: // Return the inverted value of the EQ bit of CR6.
9815 BitNo = 0; InvertBit = true;
9817 case 2: // Return the value of the LT bit of CR6.
9818 BitNo = 2; InvertBit = false;
9820 case 3: // Return the inverted value of the LT bit of CR6.
9821 BitNo = 2; InvertBit = true;
9825 // Shift the bit into the low position.
9826 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9827 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9829 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9830 DAG.getConstant(1, dl, MVT::i32));
9832 // If we are supposed to, toggle the bit.
9834 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9835 DAG.getConstant(1, dl, MVT::i32));
9839 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9840 SelectionDAG &DAG) const {
9841 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9842 // the beginning of the argument list.
9843 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9845 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9846 case Intrinsic::ppc_cfence: {
9847 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9848 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9849 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9850 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9851 Op.getOperand(ArgStart + 1)),
9861 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
9862 // Check for a DIV with the same operands as this REM.
9863 for (auto UI : Op.getOperand(1)->uses()) {
9864 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
9865 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
9866 if (UI->getOperand(0) == Op.getOperand(0) &&
9867 UI->getOperand(1) == Op.getOperand(1))
9873 // Lower scalar BSWAP64 to xxbrd.
9874 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9877 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9880 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
9882 int VectorIndex = 0;
9883 if (Subtarget.isLittleEndian())
9885 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9886 DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9890 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9891 // compared to a value that is atomically loaded (atomic loads zero-extend).
9892 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9893 SelectionDAG &DAG) const {
9894 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9895 "Expecting an atomic compare-and-swap here.");
9897 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9898 EVT MemVT = AtomicNode->getMemoryVT();
9899 if (MemVT.getSizeInBits() >= 32)
9902 SDValue CmpOp = Op.getOperand(2);
9903 // If this is already correctly zero-extended, leave it alone.
9904 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9905 if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9908 // Clear the high bits of the compare operand.
9909 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9911 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9912 DAG.getConstant(MaskVal, dl, MVT::i32));
9914 // Replace the existing compare operand with the properly zero-extended one.
9915 SmallVector<SDValue, 4> Ops;
9916 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9917 Ops.push_back(AtomicNode->getOperand(i));
9919 MachineMemOperand *MMO = AtomicNode->getMemOperand();
9920 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9922 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9923 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9926 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9927 SelectionDAG &DAG) const {
9929 // Create a stack slot that is 16-byte aligned.
9930 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9931 int FrameIdx = MFI.CreateStackObject(16, 16, false);
9932 EVT PtrVT = getPointerTy(DAG.getDataLayout());
9933 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9935 // Store the input value into Value#0 of the stack slot.
9936 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
9937 MachinePointerInfo());
9939 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
9942 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
9943 SelectionDAG &DAG) const {
9944 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
9945 "Should only be called for ISD::INSERT_VECTOR_ELT");
9947 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
9948 // We have legal lowering for constant indices but not for variable ones.
9952 EVT VT = Op.getValueType();
9954 SDValue V1 = Op.getOperand(0);
9955 SDValue V2 = Op.getOperand(1);
9956 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
9957 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
9958 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
9959 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
9960 unsigned InsertAtElement = C->getZExtValue();
9961 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
9962 if (Subtarget.isLittleEndian()) {
9963 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
9965 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
9966 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9971 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
9972 SelectionDAG &DAG) const {
9974 SDNode *N = Op.getNode();
9976 assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
9977 "Unknown extract_vector_elt type");
9979 SDValue Value = N->getOperand(0);
9981 // The first part of this is like the store lowering except that we don't
9982 // need to track the chain.
9984 // The values are now known to be -1 (false) or 1 (true). To convert this
9985 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
9986 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
9987 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
9989 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
9990 // understand how to form the extending load.
9991 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
9993 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
9995 // Now convert to an integer and store.
9996 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9997 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10000 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10001 int FrameIdx = MFI.CreateStackObject(16, 16, false);
10002 MachinePointerInfo PtrInfo =
10003 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10004 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10005 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10007 SDValue StoreChain = DAG.getEntryNode();
10008 SDValue Ops[] = {StoreChain,
10009 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10011 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10013 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10014 dl, VTs, Ops, MVT::v4i32, PtrInfo);
10016 // Extract the value requested.
10017 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10018 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10019 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10022 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10024 if (!Subtarget.useCRBits())
10027 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10030 /// Lowering for QPX v4i1 loads
10031 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10032 SelectionDAG &DAG) const {
10034 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10035 SDValue LoadChain = LN->getChain();
10036 SDValue BasePtr = LN->getBasePtr();
10038 if (Op.getValueType() == MVT::v4f64 ||
10039 Op.getValueType() == MVT::v4f32) {
10040 EVT MemVT = LN->getMemoryVT();
10041 unsigned Alignment = LN->getAlignment();
10043 // If this load is properly aligned, then it is legal.
10044 if (Alignment >= MemVT.getStoreSize())
10047 EVT ScalarVT = Op.getValueType().getScalarType(),
10048 ScalarMemVT = MemVT.getScalarType();
10049 unsigned Stride = ScalarMemVT.getStoreSize();
10051 SDValue Vals[4], LoadChains[4];
10052 for (unsigned Idx = 0; Idx < 4; ++Idx) {
10054 if (ScalarVT != ScalarMemVT)
10055 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10057 LN->getPointerInfo().getWithOffset(Idx * Stride),
10058 ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10059 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10061 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10062 LN->getPointerInfo().getWithOffset(Idx * Stride),
10063 MinAlign(Alignment, Idx * Stride),
10064 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10066 if (Idx == 0 && LN->isIndexed()) {
10067 assert(LN->getAddressingMode() == ISD::PRE_INC &&
10068 "Unknown addressing mode on vector load");
10069 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10070 LN->getAddressingMode());
10074 LoadChains[Idx] = Load.getValue(1);
10076 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10077 DAG.getConstant(Stride, dl,
10078 BasePtr.getValueType()));
10081 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10082 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10084 if (LN->isIndexed()) {
10085 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10086 return DAG.getMergeValues(RetOps, dl);
10089 SDValue RetOps[] = { Value, TF };
10090 return DAG.getMergeValues(RetOps, dl);
10093 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10094 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10096 // To lower v4i1 from a byte array, we load the byte elements of the
10097 // vector and then reuse the BUILD_VECTOR logic.
10099 SDValue VectElmts[4], VectElmtChains[4];
10100 for (unsigned i = 0; i < 4; ++i) {
10101 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10102 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10104 VectElmts[i] = DAG.getExtLoad(
10105 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10106 LN->getPointerInfo().getWithOffset(i), MVT::i8,
10107 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10108 VectElmtChains[i] = VectElmts[i].getValue(1);
10111 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10112 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10114 SDValue RVals[] = { Value, LoadChain };
10115 return DAG.getMergeValues(RVals, dl);
10118 /// Lowering for QPX v4i1 stores
10119 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10120 SelectionDAG &DAG) const {
10122 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10123 SDValue StoreChain = SN->getChain();
10124 SDValue BasePtr = SN->getBasePtr();
10125 SDValue Value = SN->getValue();
10127 if (Value.getValueType() == MVT::v4f64 ||
10128 Value.getValueType() == MVT::v4f32) {
10129 EVT MemVT = SN->getMemoryVT();
10130 unsigned Alignment = SN->getAlignment();
10132 // If this store is properly aligned, then it is legal.
10133 if (Alignment >= MemVT.getStoreSize())
10136 EVT ScalarVT = Value.getValueType().getScalarType(),
10137 ScalarMemVT = MemVT.getScalarType();
10138 unsigned Stride = ScalarMemVT.getStoreSize();
10141 for (unsigned Idx = 0; Idx < 4; ++Idx) {
10142 SDValue Ex = DAG.getNode(
10143 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10144 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
10146 if (ScalarVT != ScalarMemVT)
10148 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10149 SN->getPointerInfo().getWithOffset(Idx * Stride),
10150 ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10151 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10153 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10154 SN->getPointerInfo().getWithOffset(Idx * Stride),
10155 MinAlign(Alignment, Idx * Stride),
10156 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10158 if (Idx == 0 && SN->isIndexed()) {
10159 assert(SN->getAddressingMode() == ISD::PRE_INC &&
10160 "Unknown addressing mode on vector store");
10161 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10162 SN->getAddressingMode());
10165 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10166 DAG.getConstant(Stride, dl,
10167 BasePtr.getValueType()));
10168 Stores[Idx] = Store;
10171 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10173 if (SN->isIndexed()) {
10174 SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10175 return DAG.getMergeValues(RetOps, dl);
10181 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10182 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10184 // The values are now known to be -1 (false) or 1 (true). To convert this
10185 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10186 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10187 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10189 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10190 // understand how to form the extending load.
10191 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10193 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10195 // Now convert to an integer and store.
10196 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10197 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10200 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10201 int FrameIdx = MFI.CreateStackObject(16, 16, false);
10202 MachinePointerInfo PtrInfo =
10203 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10204 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10205 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10207 SDValue Ops[] = {StoreChain,
10208 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10210 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10212 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10213 dl, VTs, Ops, MVT::v4i32, PtrInfo);
10215 // Move data into the byte array.
10216 SDValue Loads[4], LoadChains[4];
10217 for (unsigned i = 0; i < 4; ++i) {
10218 unsigned Offset = 4*i;
10219 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10220 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10222 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10223 PtrInfo.getWithOffset(Offset));
10224 LoadChains[i] = Loads[i].getValue(1);
10227 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10230 for (unsigned i = 0; i < 4; ++i) {
10231 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10232 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10234 Stores[i] = DAG.getTruncStore(
10235 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10236 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10240 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10245 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10247 if (Op.getValueType() == MVT::v4i32) {
10248 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10250 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl);
10251 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
10253 SDValue RHSSwap = // = vrlw RHS, 16
10254 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10256 // Shrinkify inputs to v8i16.
10257 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10258 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10259 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10261 // Low parts multiplied together, generating 32-bit results (we ignore the
10263 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10264 LHS, RHS, DAG, dl, MVT::v4i32);
10266 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10267 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10268 // Shift the high parts up 16 bits.
10269 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10271 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10272 } else if (Op.getValueType() == MVT::v8i16) {
10273 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10275 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
10277 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
10278 LHS, RHS, Zero, DAG, dl);
10279 } else if (Op.getValueType() == MVT::v16i8) {
10280 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10281 bool isLittleEndian = Subtarget.isLittleEndian();
10283 // Multiply the even 8-bit parts, producing 16-bit sums.
10284 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10285 LHS, RHS, DAG, dl, MVT::v8i16);
10286 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10288 // Multiply the odd 8-bit parts, producing 16-bit sums.
10289 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10290 LHS, RHS, DAG, dl, MVT::v8i16);
10291 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10293 // Merge the results together. Because vmuleub and vmuloub are
10294 // instructions with a big-endian bias, we must reverse the
10295 // element numbering and reverse the meaning of "odd" and "even"
10296 // when generating little endian code.
10298 for (unsigned i = 0; i != 8; ++i) {
10299 if (isLittleEndian) {
10301 Ops[i*2+1] = 2*i+16;
10304 Ops[i*2+1] = 2*i+1+16;
10307 if (isLittleEndian)
10308 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10310 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10312 llvm_unreachable("Unknown mul to lower!");
10316 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
10318 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
10320 EVT VT = Op.getValueType();
10321 assert(VT.isVector() &&
10322 "Only set vector abs as custom, scalar abs shouldn't reach here!");
10323 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
10324 VT == MVT::v16i8) &&
10325 "Unexpected vector element type!");
10326 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
10327 "Current subtarget doesn't support smax v2i64!");
10329 // For vector abs, it can be lowered to:
10336 SDValue X = Op.getOperand(0);
10337 SDValue Zero = DAG.getConstant(0, dl, VT);
10338 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
10340 // SMAX patch https://reviews.llvm.org/D47332
10341 // hasn't landed yet, so use intrinsic first here.
10342 // TODO: Should use SMAX directly once SMAX patch landed
10343 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
10344 if (VT == MVT::v2i64)
10345 BifID = Intrinsic::ppc_altivec_vmaxsd;
10346 else if (VT == MVT::v8i16)
10347 BifID = Intrinsic::ppc_altivec_vmaxsh;
10348 else if (VT == MVT::v16i8)
10349 BifID = Intrinsic::ppc_altivec_vmaxsb;
10351 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
10354 // Custom lowering for fpext vf32 to v2f64
10355 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10357 assert(Op.getOpcode() == ISD::FP_EXTEND &&
10358 "Should only be called for ISD::FP_EXTEND");
10360 // We only want to custom lower an extend from v2f32 to v2f64.
10361 if (Op.getValueType() != MVT::v2f64 ||
10362 Op.getOperand(0).getValueType() != MVT::v2f32)
10366 SDValue Op0 = Op.getOperand(0);
10368 switch (Op0.getOpcode()) {
10371 case ISD::EXTRACT_SUBVECTOR: {
10372 assert(Op0.getNumOperands() == 2 &&
10373 isa<ConstantSDNode>(Op0->getOperand(1)) &&
10374 "Node should have 2 operands with second one being a constant!");
10376 if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10379 // Custom lower is only done for high or low doubleword.
10380 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10384 // Since input is v4f32, at this point Idx is either 0 or 2.
10385 // Shift to get the doubleword position we want.
10386 int DWord = Idx >> 1;
10388 // High and low word positions are different on little endian.
10389 if (Subtarget.isLittleEndian())
10392 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10393 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10398 SDValue NewLoad[2];
10399 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10400 // Ensure both input are loads.
10401 SDValue LdOp = Op0.getOperand(i);
10402 if (LdOp.getOpcode() != ISD::LOAD)
10404 // Generate new load node.
10405 LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10406 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10407 NewLoad[i] = DAG.getMemIntrinsicNode(
10408 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10409 LD->getMemoryVT(), LD->getMemOperand());
10412 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10413 NewLoad[1], Op0.getNode()->getFlags());
10414 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10415 DAG.getConstant(0, dl, MVT::i32));
10418 LoadSDNode *LD = cast<LoadSDNode>(Op0);
10419 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10420 SDValue NewLd = DAG.getMemIntrinsicNode(
10421 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10422 LD->getMemoryVT(), LD->getMemOperand());
10423 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10424 DAG.getConstant(0, dl, MVT::i32));
10427 llvm_unreachable("ERROR:Should return for all cases within swtich.");
10430 /// LowerOperation - Provide custom lowering hooks for some operations.
10432 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10433 switch (Op.getOpcode()) {
10434 default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10435 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
10436 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
10437 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
10438 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
10439 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
10440 case ISD::SETCC: return LowerSETCC(Op, DAG);
10441 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
10442 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
10444 // Variable argument lowering.
10445 case ISD::VASTART: return LowerVASTART(Op, DAG);
10446 case ISD::VAARG: return LowerVAARG(Op, DAG);
10447 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
10449 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
10450 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10451 case ISD::GET_DYNAMIC_AREA_OFFSET:
10452 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10454 // Exception handling lowering.
10455 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
10456 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
10457 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
10459 case ISD::LOAD: return LowerLOAD(Op, DAG);
10460 case ISD::STORE: return LowerSTORE(Op, DAG);
10461 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
10462 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
10463 case ISD::FP_TO_UINT:
10464 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10465 case ISD::UINT_TO_FP:
10466 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
10467 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
10469 // Lower 64-bit shifts.
10470 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
10471 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
10472 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
10474 // Vector-related lowering.
10475 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
10476 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
10477 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10478 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
10479 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
10480 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
10481 case ISD::MUL: return LowerMUL(Op, DAG);
10482 case ISD::ABS: return LowerABS(Op, DAG);
10483 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
10485 // For counter-based loop handling.
10486 case ISD::INTRINSIC_W_CHAIN: return SDValue();
10488 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
10490 // Frame & Return address.
10491 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
10492 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
10494 case ISD::INTRINSIC_VOID:
10495 return LowerINTRINSIC_VOID(Op, DAG);
10498 return LowerREM(Op, DAG);
10500 return LowerBSWAP(Op, DAG);
10501 case ISD::ATOMIC_CMP_SWAP:
10502 return LowerATOMIC_CMP_SWAP(Op, DAG);
10506 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10507 SmallVectorImpl<SDValue>&Results,
10508 SelectionDAG &DAG) const {
10510 switch (N->getOpcode()) {
10512 llvm_unreachable("Do not know how to custom type legalize this operation!");
10513 case ISD::READCYCLECOUNTER: {
10514 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10515 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10517 Results.push_back(RTB);
10518 Results.push_back(RTB.getValue(1));
10519 Results.push_back(RTB.getValue(2));
10522 case ISD::INTRINSIC_W_CHAIN: {
10523 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10524 Intrinsic::loop_decrement)
10527 assert(N->getValueType(0) == MVT::i1 &&
10528 "Unexpected result type for CTR decrement intrinsic");
10529 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10530 N->getValueType(0));
10531 SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10532 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10535 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10536 Results.push_back(NewInt.getValue(1));
10540 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10543 EVT VT = N->getValueType(0);
10545 if (VT == MVT::i64) {
10546 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10548 Results.push_back(NewNode);
10549 Results.push_back(NewNode.getValue(1));
10553 case ISD::FP_TO_SINT:
10554 case ISD::FP_TO_UINT:
10555 // LowerFP_TO_INT() can only handle f32 and f64.
10556 if (N->getOperand(0).getValueType() == MVT::ppcf128)
10558 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10560 case ISD::TRUNCATE: {
10561 EVT TrgVT = N->getValueType(0);
10562 EVT OpVT = N->getOperand(0).getValueType();
10563 if (TrgVT.isVector() &&
10564 isOperationCustom(N->getOpcode(), TrgVT) &&
10565 OpVT.getSizeInBits() <= 128 &&
10566 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
10567 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
10571 // Don't handle bitcast here.
10576 //===----------------------------------------------------------------------===//
10577 // Other Lowering Code
10578 //===----------------------------------------------------------------------===//
10580 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10581 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10582 Function *Func = Intrinsic::getDeclaration(M, Id);
10583 return Builder.CreateCall(Func, {});
10586 // The mappings for emitLeading/TrailingFence is taken from
10587 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10588 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10590 AtomicOrdering Ord) const {
10591 if (Ord == AtomicOrdering::SequentiallyConsistent)
10592 return callIntrinsic(Builder, Intrinsic::ppc_sync);
10593 if (isReleaseOrStronger(Ord))
10594 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10598 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10600 AtomicOrdering Ord) const {
10601 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10602 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10603 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10604 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10605 if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10606 return Builder.CreateCall(
10607 Intrinsic::getDeclaration(
10608 Builder.GetInsertBlock()->getParent()->getParent(),
10609 Intrinsic::ppc_cfence, {Inst->getType()}),
10611 // FIXME: Can use isync for rmw operation.
10612 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10617 MachineBasicBlock *
10618 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10619 unsigned AtomicSize,
10620 unsigned BinOpcode,
10621 unsigned CmpOpcode,
10622 unsigned CmpPred) const {
10623 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10624 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10626 auto LoadMnemonic = PPC::LDARX;
10627 auto StoreMnemonic = PPC::STDCX;
10628 switch (AtomicSize) {
10630 llvm_unreachable("Unexpected size of atomic entity");
10632 LoadMnemonic = PPC::LBARX;
10633 StoreMnemonic = PPC::STBCX;
10634 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10637 LoadMnemonic = PPC::LHARX;
10638 StoreMnemonic = PPC::STHCX;
10639 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10642 LoadMnemonic = PPC::LWARX;
10643 StoreMnemonic = PPC::STWCX;
10646 LoadMnemonic = PPC::LDARX;
10647 StoreMnemonic = PPC::STDCX;
10651 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10652 MachineFunction *F = BB->getParent();
10653 MachineFunction::iterator It = ++BB->getIterator();
10655 Register dest = MI.getOperand(0).getReg();
10656 Register ptrA = MI.getOperand(1).getReg();
10657 Register ptrB = MI.getOperand(2).getReg();
10658 Register incr = MI.getOperand(3).getReg();
10659 DebugLoc dl = MI.getDebugLoc();
10661 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10662 MachineBasicBlock *loop2MBB =
10663 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10664 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10665 F->insert(It, loopMBB);
10667 F->insert(It, loop2MBB);
10668 F->insert(It, exitMBB);
10669 exitMBB->splice(exitMBB->begin(), BB,
10670 std::next(MachineBasicBlock::iterator(MI)), BB->end());
10671 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10673 MachineRegisterInfo &RegInfo = F->getRegInfo();
10674 Register TmpReg = (!BinOpcode) ? incr :
10675 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10676 : &PPC::GPRCRegClass);
10680 // fallthrough --> loopMBB
10681 BB->addSuccessor(loopMBB);
10684 // l[wd]arx dest, ptr
10685 // add r0, dest, incr
10686 // st[wd]cx. r0, ptr
10688 // fallthrough --> exitMBB
10692 // l[wd]arx dest, ptr
10693 // cmpl?[wd] incr, dest
10696 // st[wd]cx. dest, ptr
10698 // fallthrough --> exitMBB
10701 BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10702 .addReg(ptrA).addReg(ptrB);
10704 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10706 // Signed comparisons of byte or halfword values must be sign-extended.
10707 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10708 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10709 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10710 ExtReg).addReg(dest);
10711 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10712 .addReg(incr).addReg(ExtReg);
10714 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10715 .addReg(incr).addReg(dest);
10717 BuildMI(BB, dl, TII->get(PPC::BCC))
10718 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10719 BB->addSuccessor(loop2MBB);
10720 BB->addSuccessor(exitMBB);
10723 BuildMI(BB, dl, TII->get(StoreMnemonic))
10724 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10725 BuildMI(BB, dl, TII->get(PPC::BCC))
10726 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10727 BB->addSuccessor(loopMBB);
10728 BB->addSuccessor(exitMBB);
10736 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10737 MachineInstr &MI, MachineBasicBlock *BB,
10738 bool is8bit, // operation
10739 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10740 // If we support part-word atomic mnemonics, just use them
10741 if (Subtarget.hasPartwordAtomics())
10742 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10745 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10746 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10747 // In 64 bit mode we have to use 64 bits for addresses, even though the
10748 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
10749 // registers without caring whether they're 32 or 64, but here we're
10750 // doing actual arithmetic on the addresses.
10751 bool is64bit = Subtarget.isPPC64();
10752 bool isLittleEndian = Subtarget.isLittleEndian();
10753 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10755 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10756 MachineFunction *F = BB->getParent();
10757 MachineFunction::iterator It = ++BB->getIterator();
10759 Register dest = MI.getOperand(0).getReg();
10760 Register ptrA = MI.getOperand(1).getReg();
10761 Register ptrB = MI.getOperand(2).getReg();
10762 Register incr = MI.getOperand(3).getReg();
10763 DebugLoc dl = MI.getDebugLoc();
10765 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10766 MachineBasicBlock *loop2MBB =
10767 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10768 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10769 F->insert(It, loopMBB);
10771 F->insert(It, loop2MBB);
10772 F->insert(It, exitMBB);
10773 exitMBB->splice(exitMBB->begin(), BB,
10774 std::next(MachineBasicBlock::iterator(MI)), BB->end());
10775 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10777 MachineRegisterInfo &RegInfo = F->getRegInfo();
10778 const TargetRegisterClass *RC =
10779 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10780 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10782 Register PtrReg = RegInfo.createVirtualRegister(RC);
10783 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10784 Register ShiftReg =
10785 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10786 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10787 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10788 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10789 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10790 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10791 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10792 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10793 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10796 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10800 // fallthrough --> loopMBB
10801 BB->addSuccessor(loopMBB);
10803 // The 4-byte load must be aligned, while a char or short may be
10804 // anywhere in the word. Hence all this nasty bookkeeping code.
10805 // add ptr1, ptrA, ptrB [copy if ptrA==0]
10806 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10807 // xori shift, shift1, 24 [16]
10808 // rlwinm ptr, ptr1, 0, 0, 29
10809 // slw incr2, incr, shift
10810 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10811 // slw mask, mask2, shift
10813 // lwarx tmpDest, ptr
10814 // add tmp, tmpDest, incr2
10815 // andc tmp2, tmpDest, mask
10816 // and tmp3, tmp, mask
10817 // or tmp4, tmp3, tmp2
10818 // stwcx. tmp4, ptr
10820 // fallthrough --> exitMBB
10821 // srw dest, tmpDest, shift
10822 if (ptrA != ZeroReg) {
10823 Ptr1Reg = RegInfo.createVirtualRegister(RC);
10824 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10830 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
10832 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
10833 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10836 .addImm(is8bit ? 28 : 27);
10837 if (!isLittleEndian)
10838 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
10840 .addImm(is8bit ? 24 : 16);
10842 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10847 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10852 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
10854 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10856 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10857 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10861 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10866 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10870 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
10872 .addReg(TmpDestReg);
10873 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
10874 .addReg(TmpDestReg)
10876 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
10878 // For unsigned comparisons, we can directly compare the shifted values.
10879 // For signed comparisons we shift and sign extend.
10880 Register SReg = RegInfo.createVirtualRegister(GPRC);
10881 BuildMI(BB, dl, TII->get(PPC::AND), SReg)
10882 .addReg(TmpDestReg)
10884 unsigned ValueReg = SReg;
10885 unsigned CmpReg = Incr2Reg;
10886 if (CmpOpcode == PPC::CMPW) {
10887 ValueReg = RegInfo.createVirtualRegister(GPRC);
10888 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
10891 Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
10892 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10894 ValueReg = ValueSReg;
10897 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10900 BuildMI(BB, dl, TII->get(PPC::BCC))
10904 BB->addSuccessor(loop2MBB);
10905 BB->addSuccessor(exitMBB);
10908 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
10909 BuildMI(BB, dl, TII->get(PPC::STWCX))
10913 BuildMI(BB, dl, TII->get(PPC::BCC))
10914 .addImm(PPC::PRED_NE)
10917 BB->addSuccessor(loopMBB);
10918 BB->addSuccessor(exitMBB);
10923 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
10924 .addReg(TmpDestReg)
10929 llvm::MachineBasicBlock *
10930 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
10931 MachineBasicBlock *MBB) const {
10932 DebugLoc DL = MI.getDebugLoc();
10933 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10934 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
10936 MachineFunction *MF = MBB->getParent();
10937 MachineRegisterInfo &MRI = MF->getRegInfo();
10939 const BasicBlock *BB = MBB->getBasicBlock();
10940 MachineFunction::iterator I = ++MBB->getIterator();
10942 Register DstReg = MI.getOperand(0).getReg();
10943 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10944 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10945 Register mainDstReg = MRI.createVirtualRegister(RC);
10946 Register restoreDstReg = MRI.createVirtualRegister(RC);
10948 MVT PVT = getPointerTy(MF->getDataLayout());
10949 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10950 "Invalid Pointer Size!");
10951 // For v = setjmp(buf), we generate
10954 // SjLjSetup mainMBB
10960 // buf[LabelOffset] = LR
10964 // v = phi(main, restore)
10967 MachineBasicBlock *thisMBB = MBB;
10968 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10969 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10970 MF->insert(I, mainMBB);
10971 MF->insert(I, sinkMBB);
10973 MachineInstrBuilder MIB;
10975 // Transfer the remainder of BB and its successor edges to sinkMBB.
10976 sinkMBB->splice(sinkMBB->begin(), MBB,
10977 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10978 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
10980 // Note that the structure of the jmp_buf used here is not compatible
10981 // with that used by libc, and is not designed to be. Specifically, it
10982 // stores only those 'reserved' registers that LLVM does not otherwise
10983 // understand how to spill. Also, by convention, by the time this
10984 // intrinsic is called, Clang has already stored the frame address in the
10985 // first slot of the buffer and stack address in the third. Following the
10986 // X86 target code, we'll store the jump address in the second slot. We also
10987 // need to save the TOC pointer (R2) to handle jumps between shared
10988 // libraries, and that will be stored in the fourth slot. The thread
10989 // identifier (R13) is not affected.
10992 const int64_t LabelOffset = 1 * PVT.getStoreSize();
10993 const int64_t TOCOffset = 3 * PVT.getStoreSize();
10994 const int64_t BPOffset = 4 * PVT.getStoreSize();
10996 // Prepare IP either in reg.
10997 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
10998 Register LabelReg = MRI.createVirtualRegister(PtrRC);
10999 Register BufReg = MI.getOperand(1).getReg();
11001 if (Subtarget.is64BitELFABI()) {
11002 setUsesTOCBasePtr(*MBB->getParent());
11003 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11010 // Naked functions never have a base pointer, and so we use r1. For all
11011 // other functions, this decision must be delayed until during PEI.
11013 if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11014 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11016 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11018 MIB = BuildMI(*thisMBB, MI, DL,
11019 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11026 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11027 MIB.addRegMask(TRI->getNoPreservedMask());
11029 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11031 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11033 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11035 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11036 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11041 BuildMI(mainMBB, DL,
11042 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11045 if (Subtarget.isPPC64()) {
11046 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11048 .addImm(LabelOffset)
11051 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11053 .addImm(LabelOffset)
11056 MIB.cloneMemRefs(MI);
11058 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11059 mainMBB->addSuccessor(sinkMBB);
11062 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11063 TII->get(PPC::PHI), DstReg)
11064 .addReg(mainDstReg).addMBB(mainMBB)
11065 .addReg(restoreDstReg).addMBB(thisMBB);
11067 MI.eraseFromParent();
11071 MachineBasicBlock *
11072 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11073 MachineBasicBlock *MBB) const {
11074 DebugLoc DL = MI.getDebugLoc();
11075 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11077 MachineFunction *MF = MBB->getParent();
11078 MachineRegisterInfo &MRI = MF->getRegInfo();
11080 MVT PVT = getPointerTy(MF->getDataLayout());
11081 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11082 "Invalid Pointer Size!");
11084 const TargetRegisterClass *RC =
11085 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11086 Register Tmp = MRI.createVirtualRegister(RC);
11087 // Since FP is only updated here but NOT referenced, it's treated as GPR.
11088 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11089 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11093 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11096 MachineInstrBuilder MIB;
11098 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11099 const int64_t SPOffset = 2 * PVT.getStoreSize();
11100 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11101 const int64_t BPOffset = 4 * PVT.getStoreSize();
11103 Register BufReg = MI.getOperand(0).getReg();
11105 // Reload FP (the jumped-to function may not have had a
11106 // frame pointer, and if so, then its r31 will be restored
11108 if (PVT == MVT::i64) {
11109 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11113 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11117 MIB.cloneMemRefs(MI);
11120 if (PVT == MVT::i64) {
11121 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11122 .addImm(LabelOffset)
11125 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11126 .addImm(LabelOffset)
11129 MIB.cloneMemRefs(MI);
11132 if (PVT == MVT::i64) {
11133 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11137 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11141 MIB.cloneMemRefs(MI);
11144 if (PVT == MVT::i64) {
11145 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11149 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11153 MIB.cloneMemRefs(MI);
11156 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11157 setUsesTOCBasePtr(*MBB->getParent());
11158 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11165 BuildMI(*MBB, MI, DL,
11166 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11167 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11169 MI.eraseFromParent();
11173 MachineBasicBlock *
11174 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11175 MachineBasicBlock *BB) const {
11176 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11177 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11178 if (Subtarget.is64BitELFABI() &&
11179 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11180 // Call lowering should have added an r2 operand to indicate a dependence
11181 // on the TOC base pointer value. It can't however, because there is no
11182 // way to mark the dependence as implicit there, and so the stackmap code
11183 // will confuse it with a regular operand. Instead, add the dependence
11185 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11188 return emitPatchPoint(MI, BB);
11191 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11192 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11193 return emitEHSjLjSetJmp(MI, BB);
11194 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11195 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11196 return emitEHSjLjLongJmp(MI, BB);
11199 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11201 // To "insert" these instructions we actually have to insert their
11202 // control-flow patterns.
11203 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11204 MachineFunction::iterator It = ++BB->getIterator();
11206 MachineFunction *F = BB->getParent();
11208 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11209 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11210 MI.getOpcode() == PPC::SELECT_I8) {
11211 SmallVector<MachineOperand, 2> Cond;
11212 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11213 MI.getOpcode() == PPC::SELECT_CC_I8)
11214 Cond.push_back(MI.getOperand(4));
11216 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11217 Cond.push_back(MI.getOperand(1));
11219 DebugLoc dl = MI.getDebugLoc();
11220 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11221 MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11222 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11223 MI.getOpcode() == PPC::SELECT_CC_F8 ||
11224 MI.getOpcode() == PPC::SELECT_CC_F16 ||
11225 MI.getOpcode() == PPC::SELECT_CC_QFRC ||
11226 MI.getOpcode() == PPC::SELECT_CC_QSRC ||
11227 MI.getOpcode() == PPC::SELECT_CC_QBRC ||
11228 MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11229 MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11230 MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11231 MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11232 MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11233 MI.getOpcode() == PPC::SELECT_CC_SPE ||
11234 MI.getOpcode() == PPC::SELECT_F4 ||
11235 MI.getOpcode() == PPC::SELECT_F8 ||
11236 MI.getOpcode() == PPC::SELECT_F16 ||
11237 MI.getOpcode() == PPC::SELECT_QFRC ||
11238 MI.getOpcode() == PPC::SELECT_QSRC ||
11239 MI.getOpcode() == PPC::SELECT_QBRC ||
11240 MI.getOpcode() == PPC::SELECT_SPE ||
11241 MI.getOpcode() == PPC::SELECT_SPE4 ||
11242 MI.getOpcode() == PPC::SELECT_VRRC ||
11243 MI.getOpcode() == PPC::SELECT_VSFRC ||
11244 MI.getOpcode() == PPC::SELECT_VSSRC ||
11245 MI.getOpcode() == PPC::SELECT_VSRC) {
11246 // The incoming instruction knows the destination vreg to set, the
11247 // condition code register to branch on, the true/false values to
11248 // select between, and a branch opcode to use.
11253 // cmpTY ccX, r1, r2
11255 // fallthrough --> copy0MBB
11256 MachineBasicBlock *thisMBB = BB;
11257 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11258 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11259 DebugLoc dl = MI.getDebugLoc();
11260 F->insert(It, copy0MBB);
11261 F->insert(It, sinkMBB);
11263 // Transfer the remainder of BB and its successor edges to sinkMBB.
11264 sinkMBB->splice(sinkMBB->begin(), BB,
11265 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11266 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11268 // Next, add the true and fallthrough blocks as its successors.
11269 BB->addSuccessor(copy0MBB);
11270 BB->addSuccessor(sinkMBB);
11272 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11273 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11274 MI.getOpcode() == PPC::SELECT_F16 ||
11275 MI.getOpcode() == PPC::SELECT_SPE4 ||
11276 MI.getOpcode() == PPC::SELECT_SPE ||
11277 MI.getOpcode() == PPC::SELECT_QFRC ||
11278 MI.getOpcode() == PPC::SELECT_QSRC ||
11279 MI.getOpcode() == PPC::SELECT_QBRC ||
11280 MI.getOpcode() == PPC::SELECT_VRRC ||
11281 MI.getOpcode() == PPC::SELECT_VSFRC ||
11282 MI.getOpcode() == PPC::SELECT_VSSRC ||
11283 MI.getOpcode() == PPC::SELECT_VSRC) {
11284 BuildMI(BB, dl, TII->get(PPC::BC))
11285 .addReg(MI.getOperand(1).getReg())
11288 unsigned SelectPred = MI.getOperand(4).getImm();
11289 BuildMI(BB, dl, TII->get(PPC::BCC))
11290 .addImm(SelectPred)
11291 .addReg(MI.getOperand(1).getReg())
11296 // %FalseValue = ...
11297 // # fallthrough to sinkMBB
11300 // Update machine-CFG edges
11301 BB->addSuccessor(sinkMBB);
11304 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11307 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11308 .addReg(MI.getOperand(3).getReg())
11310 .addReg(MI.getOperand(2).getReg())
11312 } else if (MI.getOpcode() == PPC::ReadTB) {
11313 // To read the 64-bit time-base register on a 32-bit target, we read the
11314 // two halves. Should the counter have wrapped while it was being read, we
11315 // need to try again.
11318 // mfspr Rx,TBU # load from TBU
11319 // mfspr Ry,TB # load from TB
11320 // mfspr Rz,TBU # load from TBU
11321 // cmpw crX,Rx,Rz # check if 'old'='new'
11322 // bne readLoop # branch if they're not equal
11325 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11326 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11327 DebugLoc dl = MI.getDebugLoc();
11328 F->insert(It, readMBB);
11329 F->insert(It, sinkMBB);
11331 // Transfer the remainder of BB and its successor edges to sinkMBB.
11332 sinkMBB->splice(sinkMBB->begin(), BB,
11333 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11334 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11336 BB->addSuccessor(readMBB);
11339 MachineRegisterInfo &RegInfo = F->getRegInfo();
11340 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11341 Register LoReg = MI.getOperand(0).getReg();
11342 Register HiReg = MI.getOperand(1).getReg();
11344 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11345 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11346 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11348 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11350 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11352 .addReg(ReadAgainReg);
11353 BuildMI(BB, dl, TII->get(PPC::BCC))
11354 .addImm(PPC::PRED_NE)
11358 BB->addSuccessor(readMBB);
11359 BB->addSuccessor(sinkMBB);
11360 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11361 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11362 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11363 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11364 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11365 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11366 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11367 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11369 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11370 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11371 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11372 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11373 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11374 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11375 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11376 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11378 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11379 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11380 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11381 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11382 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11383 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11384 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11385 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11387 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11388 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11389 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11390 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11391 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11392 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11393 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11394 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11396 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11397 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11398 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11399 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11400 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11401 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11402 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11403 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11405 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11406 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11407 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11408 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11409 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11410 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11411 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11412 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11414 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11415 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11416 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11417 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11418 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11419 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11420 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11421 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11423 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11424 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11425 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11426 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11427 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11428 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11429 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11430 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11432 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11433 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11434 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11435 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11436 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11437 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11438 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11439 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11441 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11442 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11443 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11444 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11445 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11446 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11447 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11448 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11450 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11451 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11452 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11453 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11454 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11455 BB = EmitAtomicBinary(MI, BB, 4, 0);
11456 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11457 BB = EmitAtomicBinary(MI, BB, 8, 0);
11458 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11459 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11460 (Subtarget.hasPartwordAtomics() &&
11461 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11462 (Subtarget.hasPartwordAtomics() &&
11463 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11464 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11466 auto LoadMnemonic = PPC::LDARX;
11467 auto StoreMnemonic = PPC::STDCX;
11468 switch (MI.getOpcode()) {
11470 llvm_unreachable("Compare and swap of unknown size");
11471 case PPC::ATOMIC_CMP_SWAP_I8:
11472 LoadMnemonic = PPC::LBARX;
11473 StoreMnemonic = PPC::STBCX;
11474 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11476 case PPC::ATOMIC_CMP_SWAP_I16:
11477 LoadMnemonic = PPC::LHARX;
11478 StoreMnemonic = PPC::STHCX;
11479 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11481 case PPC::ATOMIC_CMP_SWAP_I32:
11482 LoadMnemonic = PPC::LWARX;
11483 StoreMnemonic = PPC::STWCX;
11485 case PPC::ATOMIC_CMP_SWAP_I64:
11486 LoadMnemonic = PPC::LDARX;
11487 StoreMnemonic = PPC::STDCX;
11490 Register dest = MI.getOperand(0).getReg();
11491 Register ptrA = MI.getOperand(1).getReg();
11492 Register ptrB = MI.getOperand(2).getReg();
11493 Register oldval = MI.getOperand(3).getReg();
11494 Register newval = MI.getOperand(4).getReg();
11495 DebugLoc dl = MI.getDebugLoc();
11497 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11498 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11499 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11500 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11501 F->insert(It, loop1MBB);
11502 F->insert(It, loop2MBB);
11503 F->insert(It, midMBB);
11504 F->insert(It, exitMBB);
11505 exitMBB->splice(exitMBB->begin(), BB,
11506 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11507 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11511 // fallthrough --> loopMBB
11512 BB->addSuccessor(loop1MBB);
11515 // l[bhwd]arx dest, ptr
11516 // cmp[wd] dest, oldval
11519 // st[bhwd]cx. newval, ptr
11523 // st[bhwd]cx. dest, ptr
11526 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11527 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11530 BuildMI(BB, dl, TII->get(PPC::BCC))
11531 .addImm(PPC::PRED_NE)
11534 BB->addSuccessor(loop2MBB);
11535 BB->addSuccessor(midMBB);
11538 BuildMI(BB, dl, TII->get(StoreMnemonic))
11542 BuildMI(BB, dl, TII->get(PPC::BCC))
11543 .addImm(PPC::PRED_NE)
11546 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11547 BB->addSuccessor(loop1MBB);
11548 BB->addSuccessor(exitMBB);
11551 BuildMI(BB, dl, TII->get(StoreMnemonic))
11555 BB->addSuccessor(exitMBB);
11560 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11561 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11562 // We must use 64-bit registers for addresses when targeting 64-bit,
11563 // since we're actually doing arithmetic on them. Other registers
11565 bool is64bit = Subtarget.isPPC64();
11566 bool isLittleEndian = Subtarget.isLittleEndian();
11567 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11569 Register dest = MI.getOperand(0).getReg();
11570 Register ptrA = MI.getOperand(1).getReg();
11571 Register ptrB = MI.getOperand(2).getReg();
11572 Register oldval = MI.getOperand(3).getReg();
11573 Register newval = MI.getOperand(4).getReg();
11574 DebugLoc dl = MI.getDebugLoc();
11576 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11577 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11578 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11579 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11580 F->insert(It, loop1MBB);
11581 F->insert(It, loop2MBB);
11582 F->insert(It, midMBB);
11583 F->insert(It, exitMBB);
11584 exitMBB->splice(exitMBB->begin(), BB,
11585 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11586 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11588 MachineRegisterInfo &RegInfo = F->getRegInfo();
11589 const TargetRegisterClass *RC =
11590 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11591 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11593 Register PtrReg = RegInfo.createVirtualRegister(RC);
11594 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11595 Register ShiftReg =
11596 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11597 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11598 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11599 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11600 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11601 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11602 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11603 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11604 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11605 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11606 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11608 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11609 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11612 // fallthrough --> loopMBB
11613 BB->addSuccessor(loop1MBB);
11615 // The 4-byte load must be aligned, while a char or short may be
11616 // anywhere in the word. Hence all this nasty bookkeeping code.
11617 // add ptr1, ptrA, ptrB [copy if ptrA==0]
11618 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11619 // xori shift, shift1, 24 [16]
11620 // rlwinm ptr, ptr1, 0, 0, 29
11621 // slw newval2, newval, shift
11622 // slw oldval2, oldval,shift
11623 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11624 // slw mask, mask2, shift
11625 // and newval3, newval2, mask
11626 // and oldval3, oldval2, mask
11628 // lwarx tmpDest, ptr
11629 // and tmp, tmpDest, mask
11630 // cmpw tmp, oldval3
11633 // andc tmp2, tmpDest, mask
11634 // or tmp4, tmp2, newval3
11635 // stwcx. tmp4, ptr
11639 // stwcx. tmpDest, ptr
11641 // srw dest, tmpDest, shift
11642 if (ptrA != ZeroReg) {
11643 Ptr1Reg = RegInfo.createVirtualRegister(RC);
11644 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11651 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11653 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11654 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11657 .addImm(is8bit ? 28 : 27);
11658 if (!isLittleEndian)
11659 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11661 .addImm(is8bit ? 24 : 16);
11663 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11668 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11673 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11676 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11680 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11682 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11683 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11687 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11690 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11691 .addReg(NewVal2Reg)
11693 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11694 .addReg(OldVal2Reg)
11698 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11701 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11702 .addReg(TmpDestReg)
11704 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11706 .addReg(OldVal3Reg);
11707 BuildMI(BB, dl, TII->get(PPC::BCC))
11708 .addImm(PPC::PRED_NE)
11711 BB->addSuccessor(loop2MBB);
11712 BB->addSuccessor(midMBB);
11715 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11716 .addReg(TmpDestReg)
11718 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11720 .addReg(NewVal3Reg);
11721 BuildMI(BB, dl, TII->get(PPC::STWCX))
11725 BuildMI(BB, dl, TII->get(PPC::BCC))
11726 .addImm(PPC::PRED_NE)
11729 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11730 BB->addSuccessor(loop1MBB);
11731 BB->addSuccessor(exitMBB);
11734 BuildMI(BB, dl, TII->get(PPC::STWCX))
11735 .addReg(TmpDestReg)
11738 BB->addSuccessor(exitMBB);
11743 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11746 } else if (MI.getOpcode() == PPC::FADDrtz) {
11747 // This pseudo performs an FADD with rounding mode temporarily forced
11748 // to round-to-zero. We emit this via custom inserter since the FPSCR
11749 // is not modeled at the SelectionDAG level.
11750 Register Dest = MI.getOperand(0).getReg();
11751 Register Src1 = MI.getOperand(1).getReg();
11752 Register Src2 = MI.getOperand(2).getReg();
11753 DebugLoc dl = MI.getDebugLoc();
11755 MachineRegisterInfo &RegInfo = F->getRegInfo();
11756 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11758 // Save FPSCR value.
11759 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11761 // Set rounding mode to round-to-zero.
11762 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
11763 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
11765 // Perform addition.
11766 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
11768 // Restore FPSCR value.
11769 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11770 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11771 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11772 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11773 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11774 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11775 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11778 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11779 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11781 MachineRegisterInfo &RegInfo = F->getRegInfo();
11782 Register Dest = RegInfo.createVirtualRegister(
11783 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11785 DebugLoc Dl = MI.getDebugLoc();
11786 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
11787 .addReg(MI.getOperand(1).getReg())
11789 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11790 MI.getOperand(0).getReg())
11791 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
11792 } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11793 DebugLoc Dl = MI.getDebugLoc();
11794 MachineRegisterInfo &RegInfo = F->getRegInfo();
11795 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11796 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11797 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11798 MI.getOperand(0).getReg())
11800 } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11801 DebugLoc Dl = MI.getDebugLoc();
11802 unsigned Imm = MI.getOperand(1).getImm();
11803 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
11804 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11805 MI.getOperand(0).getReg())
11806 .addReg(PPC::CR0EQ);
11807 } else if (MI.getOpcode() == PPC::SETRNDi) {
11808 DebugLoc dl = MI.getDebugLoc();
11809 Register OldFPSCRReg = MI.getOperand(0).getReg();
11811 // Save FPSCR value.
11812 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11814 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
11815 // the following settings:
11816 // 00 Round to nearest
11818 // 10 Round to +inf
11819 // 11 Round to -inf
11821 // When the operand is immediate, using the two least significant bits of
11822 // the immediate to set the bits 62:63 of FPSCR.
11823 unsigned Mode = MI.getOperand(1).getImm();
11824 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
11827 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
11829 } else if (MI.getOpcode() == PPC::SETRND) {
11830 DebugLoc dl = MI.getDebugLoc();
11832 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
11833 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
11834 // If the target doesn't have DirectMove, we should use stack to do the
11835 // conversion, because the target doesn't have the instructions like mtvsrd
11836 // or mfvsrd to do this conversion directly.
11837 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
11838 if (Subtarget.hasDirectMove()) {
11839 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
11842 // Use stack to do the register copy.
11843 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
11844 MachineRegisterInfo &RegInfo = F->getRegInfo();
11845 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
11846 if (RC == &PPC::F8RCRegClass) {
11847 // Copy register from F8RCRegClass to G8RCRegclass.
11848 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
11849 "Unsupported RegClass.");
11851 StoreOp = PPC::STFD;
11854 // Copy register from G8RCRegClass to F8RCRegclass.
11855 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
11856 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
11857 "Unsupported RegClass.");
11860 MachineFrameInfo &MFI = F->getFrameInfo();
11861 int FrameIdx = MFI.CreateStackObject(8, 8, false);
11863 MachineMemOperand *MMOStore = F->getMachineMemOperand(
11864 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11865 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
11866 MFI.getObjectAlignment(FrameIdx));
11868 // Store the SrcReg into the stack.
11869 BuildMI(*BB, MI, dl, TII->get(StoreOp))
11872 .addFrameIndex(FrameIdx)
11873 .addMemOperand(MMOStore);
11875 MachineMemOperand *MMOLoad = F->getMachineMemOperand(
11876 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11877 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
11878 MFI.getObjectAlignment(FrameIdx));
11880 // Load from the stack where SrcReg is stored, and save to DestReg,
11881 // so we have done the RegClass conversion from RegClass::SrcReg to
11882 // RegClass::DestReg.
11883 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
11885 .addFrameIndex(FrameIdx)
11886 .addMemOperand(MMOLoad);
11890 Register OldFPSCRReg = MI.getOperand(0).getReg();
11892 // Save FPSCR value.
11893 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11895 // When the operand is gprc register, use two least significant bits of the
11896 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11898 // copy OldFPSCRTmpReg, OldFPSCRReg
11899 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
11900 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
11901 // copy NewFPSCRReg, NewFPSCRTmpReg
11902 // mtfsf 255, NewFPSCRReg
11903 MachineOperand SrcOp = MI.getOperand(1);
11904 MachineRegisterInfo &RegInfo = F->getRegInfo();
11905 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11907 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
11909 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11910 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11912 // The first operand of INSERT_SUBREG should be a register which has
11913 // subregisters, we only care about its RegClass, so we should use an
11914 // IMPLICIT_DEF register.
11915 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
11916 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
11921 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11922 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
11923 .addReg(OldFPSCRTmpReg)
11928 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11929 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
11931 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
11933 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
11935 .addReg(NewFPSCRReg)
11939 llvm_unreachable("Unexpected instr type to insert");
11942 MI.eraseFromParent(); // The pseudo instruction is gone now.
11946 //===----------------------------------------------------------------------===//
11947 // Target Optimization Hooks
11948 //===----------------------------------------------------------------------===//
11950 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
11951 // For the estimates, convergence is quadratic, so we essentially double the
11952 // number of digits correct after every iteration. For both FRE and FRSQRTE,
11953 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
11954 // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
11955 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
11956 if (VT.getScalarType() == MVT::f64)
11958 return RefinementSteps;
11961 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
11962 int Enabled, int &RefinementSteps,
11963 bool &UseOneConstNR,
11964 bool Reciprocal) const {
11965 EVT VT = Operand.getValueType();
11966 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
11967 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
11968 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
11969 (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
11970 (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
11971 (VT == MVT::v4f64 && Subtarget.hasQPX())) {
11972 if (RefinementSteps == ReciprocalEstimate::Unspecified)
11973 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
11975 // The Newton-Raphson computation with a single constant does not provide
11976 // enough accuracy on some CPUs.
11977 UseOneConstNR = !Subtarget.needsTwoConstNR();
11978 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
11983 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
11985 int &RefinementSteps) const {
11986 EVT VT = Operand.getValueType();
11987 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
11988 (VT == MVT::f64 && Subtarget.hasFRE()) ||
11989 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
11990 (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
11991 (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
11992 (VT == MVT::v4f64 && Subtarget.hasQPX())) {
11993 if (RefinementSteps == ReciprocalEstimate::Unspecified)
11994 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
11995 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12000 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12001 // Note: This functionality is used only when unsafe-fp-math is enabled, and
12002 // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12003 // enabled for division), this functionality is redundant with the default
12004 // combiner logic (once the division -> reciprocal/multiply transformation
12005 // has taken place). As a result, this matters more for older cores than for
12008 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12009 // reciprocal if there are two or more FDIVs (for embedded cores with only
12010 // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12011 switch (Subtarget.getCPUDirective()) {
12016 case PPC::DIR_E500:
12017 case PPC::DIR_E500mc:
12018 case PPC::DIR_E5500:
12023 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12024 // collapsed, and so we need to look through chains of them.
12025 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12026 int64_t& Offset, SelectionDAG &DAG) {
12027 if (DAG.isBaseWithConstantOffset(Loc)) {
12028 Base = Loc.getOperand(0);
12029 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12031 // The base might itself be a base plus an offset, and if so, accumulate
12033 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12037 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12038 unsigned Bytes, int Dist,
12039 SelectionDAG &DAG) {
12040 if (VT.getSizeInBits() / 8 != Bytes)
12043 SDValue BaseLoc = Base->getBasePtr();
12044 if (Loc.getOpcode() == ISD::FrameIndex) {
12045 if (BaseLoc.getOpcode() != ISD::FrameIndex)
12047 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12048 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
12049 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12050 int FS = MFI.getObjectSize(FI);
12051 int BFS = MFI.getObjectSize(BFI);
12052 if (FS != BFS || FS != (int)Bytes) return false;
12053 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12056 SDValue Base1 = Loc, Base2 = BaseLoc;
12057 int64_t Offset1 = 0, Offset2 = 0;
12058 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12059 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12060 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12063 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12064 const GlobalValue *GV1 = nullptr;
12065 const GlobalValue *GV2 = nullptr;
12068 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12069 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12070 if (isGA1 && isGA2 && GV1 == GV2)
12071 return Offset1 == (Offset2 + Dist*Bytes);
12075 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12076 // not enforce equality of the chain operands.
12077 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12078 unsigned Bytes, int Dist,
12079 SelectionDAG &DAG) {
12080 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12081 EVT VT = LS->getMemoryVT();
12082 SDValue Loc = LS->getBasePtr();
12083 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12086 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12088 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12089 default: return false;
12090 case Intrinsic::ppc_qpx_qvlfd:
12091 case Intrinsic::ppc_qpx_qvlfda:
12094 case Intrinsic::ppc_qpx_qvlfs:
12095 case Intrinsic::ppc_qpx_qvlfsa:
12098 case Intrinsic::ppc_qpx_qvlfcd:
12099 case Intrinsic::ppc_qpx_qvlfcda:
12102 case Intrinsic::ppc_qpx_qvlfcs:
12103 case Intrinsic::ppc_qpx_qvlfcsa:
12106 case Intrinsic::ppc_qpx_qvlfiwa:
12107 case Intrinsic::ppc_qpx_qvlfiwz:
12108 case Intrinsic::ppc_altivec_lvx:
12109 case Intrinsic::ppc_altivec_lvxl:
12110 case Intrinsic::ppc_vsx_lxvw4x:
12111 case Intrinsic::ppc_vsx_lxvw4x_be:
12114 case Intrinsic::ppc_vsx_lxvd2x:
12115 case Intrinsic::ppc_vsx_lxvd2x_be:
12118 case Intrinsic::ppc_altivec_lvebx:
12121 case Intrinsic::ppc_altivec_lvehx:
12124 case Intrinsic::ppc_altivec_lvewx:
12129 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12132 if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12134 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12135 default: return false;
12136 case Intrinsic::ppc_qpx_qvstfd:
12137 case Intrinsic::ppc_qpx_qvstfda:
12140 case Intrinsic::ppc_qpx_qvstfs:
12141 case Intrinsic::ppc_qpx_qvstfsa:
12144 case Intrinsic::ppc_qpx_qvstfcd:
12145 case Intrinsic::ppc_qpx_qvstfcda:
12148 case Intrinsic::ppc_qpx_qvstfcs:
12149 case Intrinsic::ppc_qpx_qvstfcsa:
12152 case Intrinsic::ppc_qpx_qvstfiw:
12153 case Intrinsic::ppc_qpx_qvstfiwa:
12154 case Intrinsic::ppc_altivec_stvx:
12155 case Intrinsic::ppc_altivec_stvxl:
12156 case Intrinsic::ppc_vsx_stxvw4x:
12159 case Intrinsic::ppc_vsx_stxvd2x:
12162 case Intrinsic::ppc_vsx_stxvw4x_be:
12165 case Intrinsic::ppc_vsx_stxvd2x_be:
12168 case Intrinsic::ppc_altivec_stvebx:
12171 case Intrinsic::ppc_altivec_stvehx:
12174 case Intrinsic::ppc_altivec_stvewx:
12179 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12185 // Return true is there is a nearyby consecutive load to the one provided
12186 // (regardless of alignment). We search up and down the chain, looking though
12187 // token factors and other loads (but nothing else). As a result, a true result
12188 // indicates that it is safe to create a new consecutive load adjacent to the
12190 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12191 SDValue Chain = LD->getChain();
12192 EVT VT = LD->getMemoryVT();
12194 SmallSet<SDNode *, 16> LoadRoots;
12195 SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12196 SmallSet<SDNode *, 16> Visited;
12198 // First, search up the chain, branching to follow all token-factor operands.
12199 // If we find a consecutive load, then we're done, otherwise, record all
12200 // nodes just above the top-level loads and token factors.
12201 while (!Queue.empty()) {
12202 SDNode *ChainNext = Queue.pop_back_val();
12203 if (!Visited.insert(ChainNext).second)
12206 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12207 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12210 if (!Visited.count(ChainLD->getChain().getNode()))
12211 Queue.push_back(ChainLD->getChain().getNode());
12212 } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12213 for (const SDUse &O : ChainNext->ops())
12214 if (!Visited.count(O.getNode()))
12215 Queue.push_back(O.getNode());
12217 LoadRoots.insert(ChainNext);
12220 // Second, search down the chain, starting from the top-level nodes recorded
12221 // in the first phase. These top-level nodes are the nodes just above all
12222 // loads and token factors. Starting with their uses, recursively look though
12223 // all loads (just the chain uses) and token factors to find a consecutive
12228 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12229 IE = LoadRoots.end(); I != IE; ++I) {
12230 Queue.push_back(*I);
12232 while (!Queue.empty()) {
12233 SDNode *LoadRoot = Queue.pop_back_val();
12234 if (!Visited.insert(LoadRoot).second)
12237 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12238 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12241 for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12242 UE = LoadRoot->use_end(); UI != UE; ++UI)
12243 if (((isa<MemSDNode>(*UI) &&
12244 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12245 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12246 Queue.push_back(*UI);
12253 /// This function is called when we have proved that a SETCC node can be replaced
12254 /// by subtraction (and other supporting instructions) so that the result of
12255 /// comparison is kept in a GPR instead of CR. This function is purely for
12256 /// codegen purposes and has some flags to guide the codegen process.
12257 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12258 bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12259 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12261 // Zero extend the operands to the largest legal integer. Originally, they
12262 // must be of a strictly smaller size.
12263 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12264 DAG.getConstant(Size, DL, MVT::i32));
12265 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12266 DAG.getConstant(Size, DL, MVT::i32));
12268 // Swap if needed. Depends on the condition code.
12270 std::swap(Op0, Op1);
12272 // Subtract extended integers.
12273 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12275 // Move the sign bit to the least significant position and zero out the rest.
12276 // Now the least significant bit carries the result of original comparison.
12277 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12278 DAG.getConstant(Size - 1, DL, MVT::i32));
12279 auto Final = Shifted;
12281 // Complement the result if needed. Based on the condition code.
12283 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12284 DAG.getConstant(1, DL, MVT::i64));
12286 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12289 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12290 DAGCombinerInfo &DCI) const {
12291 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12293 SelectionDAG &DAG = DCI.DAG;
12296 // Size of integers being compared has a critical role in the following
12297 // analysis, so we prefer to do this when all types are legal.
12298 if (!DCI.isAfterLegalizeDAG())
12301 // If all users of SETCC extend its value to a legal integer type
12302 // then we replace SETCC with a subtraction
12303 for (SDNode::use_iterator UI = N->use_begin(),
12304 UE = N->use_end(); UI != UE; ++UI) {
12305 if (UI->getOpcode() != ISD::ZERO_EXTEND)
12309 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12310 auto OpSize = N->getOperand(0).getValueSizeInBits();
12312 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12314 if (OpSize < Size) {
12318 return generateEquivalentSub(N, Size, false, false, DL, DAG);
12320 return generateEquivalentSub(N, Size, true, true, DL, DAG);
12322 return generateEquivalentSub(N, Size, false, true, DL, DAG);
12324 return generateEquivalentSub(N, Size, true, false, DL, DAG);
12331 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12332 DAGCombinerInfo &DCI) const {
12333 SelectionDAG &DAG = DCI.DAG;
12336 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12337 // If we're tracking CR bits, we need to be careful that we don't have:
12338 // trunc(binary-ops(zext(x), zext(y)))
12340 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12341 // such that we're unnecessarily moving things into GPRs when it would be
12342 // better to keep them in CR bits.
12344 // Note that trunc here can be an actual i1 trunc, or can be the effective
12345 // truncation that comes from a setcc or select_cc.
12346 if (N->getOpcode() == ISD::TRUNCATE &&
12347 N->getValueType(0) != MVT::i1)
12350 if (N->getOperand(0).getValueType() != MVT::i32 &&
12351 N->getOperand(0).getValueType() != MVT::i64)
12354 if (N->getOpcode() == ISD::SETCC ||
12355 N->getOpcode() == ISD::SELECT_CC) {
12356 // If we're looking at a comparison, then we need to make sure that the
12357 // high bits (all except for the first) don't matter the result.
12359 cast<CondCodeSDNode>(N->getOperand(
12360 N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12361 unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12363 if (ISD::isSignedIntSetCC(CC)) {
12364 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12365 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12367 } else if (ISD::isUnsignedIntSetCC(CC)) {
12368 if (!DAG.MaskedValueIsZero(N->getOperand(0),
12369 APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12370 !DAG.MaskedValueIsZero(N->getOperand(1),
12371 APInt::getHighBitsSet(OpBits, OpBits-1)))
12372 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12375 // This is neither a signed nor an unsigned comparison, just make sure
12376 // that the high bits are equal.
12377 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12378 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12380 // We don't really care about what is known about the first bit (if
12381 // anything), so clear it in all masks prior to comparing them.
12382 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
12383 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
12385 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
12390 // We now know that the higher-order bits are irrelevant, we just need to
12391 // make sure that all of the intermediate operations are bit operations, and
12392 // all inputs are extensions.
12393 if (N->getOperand(0).getOpcode() != ISD::AND &&
12394 N->getOperand(0).getOpcode() != ISD::OR &&
12395 N->getOperand(0).getOpcode() != ISD::XOR &&
12396 N->getOperand(0).getOpcode() != ISD::SELECT &&
12397 N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12398 N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12399 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12400 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12401 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12404 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12405 N->getOperand(1).getOpcode() != ISD::AND &&
12406 N->getOperand(1).getOpcode() != ISD::OR &&
12407 N->getOperand(1).getOpcode() != ISD::XOR &&
12408 N->getOperand(1).getOpcode() != ISD::SELECT &&
12409 N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12410 N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12411 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12412 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12413 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12416 SmallVector<SDValue, 4> Inputs;
12417 SmallVector<SDValue, 8> BinOps, PromOps;
12418 SmallPtrSet<SDNode *, 16> Visited;
12420 for (unsigned i = 0; i < 2; ++i) {
12421 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12422 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12423 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12424 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12425 isa<ConstantSDNode>(N->getOperand(i)))
12426 Inputs.push_back(N->getOperand(i));
12428 BinOps.push_back(N->getOperand(i));
12430 if (N->getOpcode() == ISD::TRUNCATE)
12434 // Visit all inputs, collect all binary operations (and, or, xor and
12435 // select) that are all fed by extensions.
12436 while (!BinOps.empty()) {
12437 SDValue BinOp = BinOps.back();
12440 if (!Visited.insert(BinOp.getNode()).second)
12443 PromOps.push_back(BinOp);
12445 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12446 // The condition of the select is not promoted.
12447 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12449 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12452 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12453 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12454 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12455 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12456 isa<ConstantSDNode>(BinOp.getOperand(i))) {
12457 Inputs.push_back(BinOp.getOperand(i));
12458 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12459 BinOp.getOperand(i).getOpcode() == ISD::OR ||
12460 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12461 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12462 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12463 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12464 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12465 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12466 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12467 BinOps.push_back(BinOp.getOperand(i));
12469 // We have an input that is not an extension or another binary
12470 // operation; we'll abort this transformation.
12476 // Make sure that this is a self-contained cluster of operations (which
12477 // is not quite the same thing as saying that everything has only one
12479 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12480 if (isa<ConstantSDNode>(Inputs[i]))
12483 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12484 UE = Inputs[i].getNode()->use_end();
12486 SDNode *User = *UI;
12487 if (User != N && !Visited.count(User))
12490 // Make sure that we're not going to promote the non-output-value
12491 // operand(s) or SELECT or SELECT_CC.
12492 // FIXME: Although we could sometimes handle this, and it does occur in
12493 // practice that one of the condition inputs to the select is also one of
12494 // the outputs, we currently can't deal with this.
12495 if (User->getOpcode() == ISD::SELECT) {
12496 if (User->getOperand(0) == Inputs[i])
12498 } else if (User->getOpcode() == ISD::SELECT_CC) {
12499 if (User->getOperand(0) == Inputs[i] ||
12500 User->getOperand(1) == Inputs[i])
12506 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12507 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12508 UE = PromOps[i].getNode()->use_end();
12510 SDNode *User = *UI;
12511 if (User != N && !Visited.count(User))
12514 // Make sure that we're not going to promote the non-output-value
12515 // operand(s) or SELECT or SELECT_CC.
12516 // FIXME: Although we could sometimes handle this, and it does occur in
12517 // practice that one of the condition inputs to the select is also one of
12518 // the outputs, we currently can't deal with this.
12519 if (User->getOpcode() == ISD::SELECT) {
12520 if (User->getOperand(0) == PromOps[i])
12522 } else if (User->getOpcode() == ISD::SELECT_CC) {
12523 if (User->getOperand(0) == PromOps[i] ||
12524 User->getOperand(1) == PromOps[i])
12530 // Replace all inputs with the extension operand.
12531 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12532 // Constants may have users outside the cluster of to-be-promoted nodes,
12533 // and so we need to replace those as we do the promotions.
12534 if (isa<ConstantSDNode>(Inputs[i]))
12537 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12540 std::list<HandleSDNode> PromOpHandles;
12541 for (auto &PromOp : PromOps)
12542 PromOpHandles.emplace_back(PromOp);
12544 // Replace all operations (these are all the same, but have a different
12545 // (i1) return type). DAG.getNode will validate that the types of
12546 // a binary operator match, so go through the list in reverse so that
12547 // we've likely promoted both operands first. Any intermediate truncations or
12548 // extensions disappear.
12549 while (!PromOpHandles.empty()) {
12550 SDValue PromOp = PromOpHandles.back().getValue();
12551 PromOpHandles.pop_back();
12553 if (PromOp.getOpcode() == ISD::TRUNCATE ||
12554 PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12555 PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12556 PromOp.getOpcode() == ISD::ANY_EXTEND) {
12557 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12558 PromOp.getOperand(0).getValueType() != MVT::i1) {
12559 // The operand is not yet ready (see comment below).
12560 PromOpHandles.emplace_front(PromOp);
12564 SDValue RepValue = PromOp.getOperand(0);
12565 if (isa<ConstantSDNode>(RepValue))
12566 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12568 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12573 switch (PromOp.getOpcode()) {
12574 default: C = 0; break;
12575 case ISD::SELECT: C = 1; break;
12576 case ISD::SELECT_CC: C = 2; break;
12579 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12580 PromOp.getOperand(C).getValueType() != MVT::i1) ||
12581 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12582 PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12583 // The to-be-promoted operands of this node have not yet been
12584 // promoted (this should be rare because we're going through the
12585 // list backward, but if one of the operands has several users in
12586 // this cluster of to-be-promoted nodes, it is possible).
12587 PromOpHandles.emplace_front(PromOp);
12591 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12592 PromOp.getNode()->op_end());
12594 // If there are any constant inputs, make sure they're replaced now.
12595 for (unsigned i = 0; i < 2; ++i)
12596 if (isa<ConstantSDNode>(Ops[C+i]))
12597 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12599 DAG.ReplaceAllUsesOfValueWith(PromOp,
12600 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12603 // Now we're left with the initial truncation itself.
12604 if (N->getOpcode() == ISD::TRUNCATE)
12605 return N->getOperand(0);
12607 // Otherwise, this is a comparison. The operands to be compared have just
12608 // changed type (to i1), but everything else is the same.
12609 return SDValue(N, 0);
12612 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12613 DAGCombinerInfo &DCI) const {
12614 SelectionDAG &DAG = DCI.DAG;
12617 // If we're tracking CR bits, we need to be careful that we don't have:
12618 // zext(binary-ops(trunc(x), trunc(y)))
12620 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12621 // such that we're unnecessarily moving things into CR bits that can more
12622 // efficiently stay in GPRs. Note that if we're not certain that the high
12623 // bits are set as required by the final extension, we still may need to do
12624 // some masking to get the proper behavior.
12626 // This same functionality is important on PPC64 when dealing with
12627 // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12628 // the return values of functions. Because it is so similar, it is handled
12631 if (N->getValueType(0) != MVT::i32 &&
12632 N->getValueType(0) != MVT::i64)
12635 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12636 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12639 if (N->getOperand(0).getOpcode() != ISD::AND &&
12640 N->getOperand(0).getOpcode() != ISD::OR &&
12641 N->getOperand(0).getOpcode() != ISD::XOR &&
12642 N->getOperand(0).getOpcode() != ISD::SELECT &&
12643 N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12646 SmallVector<SDValue, 4> Inputs;
12647 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12648 SmallPtrSet<SDNode *, 16> Visited;
12650 // Visit all inputs, collect all binary operations (and, or, xor and
12651 // select) that are all fed by truncations.
12652 while (!BinOps.empty()) {
12653 SDValue BinOp = BinOps.back();
12656 if (!Visited.insert(BinOp.getNode()).second)
12659 PromOps.push_back(BinOp);
12661 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12662 // The condition of the select is not promoted.
12663 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12665 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12668 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12669 isa<ConstantSDNode>(BinOp.getOperand(i))) {
12670 Inputs.push_back(BinOp.getOperand(i));
12671 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12672 BinOp.getOperand(i).getOpcode() == ISD::OR ||
12673 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12674 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12675 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12676 BinOps.push_back(BinOp.getOperand(i));
12678 // We have an input that is not a truncation or another binary
12679 // operation; we'll abort this transformation.
12685 // The operands of a select that must be truncated when the select is
12686 // promoted because the operand is actually part of the to-be-promoted set.
12687 DenseMap<SDNode *, EVT> SelectTruncOp[2];
12689 // Make sure that this is a self-contained cluster of operations (which
12690 // is not quite the same thing as saying that everything has only one
12692 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12693 if (isa<ConstantSDNode>(Inputs[i]))
12696 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12697 UE = Inputs[i].getNode()->use_end();
12699 SDNode *User = *UI;
12700 if (User != N && !Visited.count(User))
12703 // If we're going to promote the non-output-value operand(s) or SELECT or
12704 // SELECT_CC, record them for truncation.
12705 if (User->getOpcode() == ISD::SELECT) {
12706 if (User->getOperand(0) == Inputs[i])
12707 SelectTruncOp[0].insert(std::make_pair(User,
12708 User->getOperand(0).getValueType()));
12709 } else if (User->getOpcode() == ISD::SELECT_CC) {
12710 if (User->getOperand(0) == Inputs[i])
12711 SelectTruncOp[0].insert(std::make_pair(User,
12712 User->getOperand(0).getValueType()));
12713 if (User->getOperand(1) == Inputs[i])
12714 SelectTruncOp[1].insert(std::make_pair(User,
12715 User->getOperand(1).getValueType()));
12720 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12721 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12722 UE = PromOps[i].getNode()->use_end();
12724 SDNode *User = *UI;
12725 if (User != N && !Visited.count(User))
12728 // If we're going to promote the non-output-value operand(s) or SELECT or
12729 // SELECT_CC, record them for truncation.
12730 if (User->getOpcode() == ISD::SELECT) {
12731 if (User->getOperand(0) == PromOps[i])
12732 SelectTruncOp[0].insert(std::make_pair(User,
12733 User->getOperand(0).getValueType()));
12734 } else if (User->getOpcode() == ISD::SELECT_CC) {
12735 if (User->getOperand(0) == PromOps[i])
12736 SelectTruncOp[0].insert(std::make_pair(User,
12737 User->getOperand(0).getValueType()));
12738 if (User->getOperand(1) == PromOps[i])
12739 SelectTruncOp[1].insert(std::make_pair(User,
12740 User->getOperand(1).getValueType()));
12745 unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12746 bool ReallyNeedsExt = false;
12747 if (N->getOpcode() != ISD::ANY_EXTEND) {
12748 // If all of the inputs are not already sign/zero extended, then
12749 // we'll still need to do that at the end.
12750 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12751 if (isa<ConstantSDNode>(Inputs[i]))
12755 Inputs[i].getOperand(0).getValueSizeInBits();
12756 assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12758 if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12759 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12760 APInt::getHighBitsSet(OpBits,
12761 OpBits-PromBits))) ||
12762 (N->getOpcode() == ISD::SIGN_EXTEND &&
12763 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12764 (OpBits-(PromBits-1)))) {
12765 ReallyNeedsExt = true;
12771 // Replace all inputs, either with the truncation operand, or a
12772 // truncation or extension to the final output type.
12773 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12774 // Constant inputs need to be replaced with the to-be-promoted nodes that
12775 // use them because they might have users outside of the cluster of
12777 if (isa<ConstantSDNode>(Inputs[i]))
12780 SDValue InSrc = Inputs[i].getOperand(0);
12781 if (Inputs[i].getValueType() == N->getValueType(0))
12782 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12783 else if (N->getOpcode() == ISD::SIGN_EXTEND)
12784 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12785 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12786 else if (N->getOpcode() == ISD::ZERO_EXTEND)
12787 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12788 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12790 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12791 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12794 std::list<HandleSDNode> PromOpHandles;
12795 for (auto &PromOp : PromOps)
12796 PromOpHandles.emplace_back(PromOp);
12798 // Replace all operations (these are all the same, but have a different
12799 // (promoted) return type). DAG.getNode will validate that the types of
12800 // a binary operator match, so go through the list in reverse so that
12801 // we've likely promoted both operands first.
12802 while (!PromOpHandles.empty()) {
12803 SDValue PromOp = PromOpHandles.back().getValue();
12804 PromOpHandles.pop_back();
12807 switch (PromOp.getOpcode()) {
12808 default: C = 0; break;
12809 case ISD::SELECT: C = 1; break;
12810 case ISD::SELECT_CC: C = 2; break;
12813 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12814 PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
12815 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12816 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
12817 // The to-be-promoted operands of this node have not yet been
12818 // promoted (this should be rare because we're going through the
12819 // list backward, but if one of the operands has several users in
12820 // this cluster of to-be-promoted nodes, it is possible).
12821 PromOpHandles.emplace_front(PromOp);
12825 // For SELECT and SELECT_CC nodes, we do a similar check for any
12826 // to-be-promoted comparison inputs.
12827 if (PromOp.getOpcode() == ISD::SELECT ||
12828 PromOp.getOpcode() == ISD::SELECT_CC) {
12829 if ((SelectTruncOp[0].count(PromOp.getNode()) &&
12830 PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
12831 (SelectTruncOp[1].count(PromOp.getNode()) &&
12832 PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
12833 PromOpHandles.emplace_front(PromOp);
12838 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12839 PromOp.getNode()->op_end());
12841 // If this node has constant inputs, then they'll need to be promoted here.
12842 for (unsigned i = 0; i < 2; ++i) {
12843 if (!isa<ConstantSDNode>(Ops[C+i]))
12845 if (Ops[C+i].getValueType() == N->getValueType(0))
12848 if (N->getOpcode() == ISD::SIGN_EXTEND)
12849 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12850 else if (N->getOpcode() == ISD::ZERO_EXTEND)
12851 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12853 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12856 // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
12857 // truncate them again to the original value type.
12858 if (PromOp.getOpcode() == ISD::SELECT ||
12859 PromOp.getOpcode() == ISD::SELECT_CC) {
12860 auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
12861 if (SI0 != SelectTruncOp[0].end())
12862 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
12863 auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
12864 if (SI1 != SelectTruncOp[1].end())
12865 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
12868 DAG.ReplaceAllUsesOfValueWith(PromOp,
12869 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
12872 // Now we're left with the initial extension itself.
12873 if (!ReallyNeedsExt)
12874 return N->getOperand(0);
12876 // To zero extend, just mask off everything except for the first bit (in the
12878 if (N->getOpcode() == ISD::ZERO_EXTEND)
12879 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
12880 DAG.getConstant(APInt::getLowBitsSet(
12881 N->getValueSizeInBits(0), PromBits),
12882 dl, N->getValueType(0)));
12884 assert(N->getOpcode() == ISD::SIGN_EXTEND &&
12885 "Invalid extension type");
12886 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
12888 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
12889 return DAG.getNode(
12890 ISD::SRA, dl, N->getValueType(0),
12891 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
12895 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
12896 DAGCombinerInfo &DCI) const {
12897 assert(N->getOpcode() == ISD::SETCC &&
12898 "Should be called with a SETCC node");
12900 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12901 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
12902 SDValue LHS = N->getOperand(0);
12903 SDValue RHS = N->getOperand(1);
12905 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
12906 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
12908 std::swap(LHS, RHS);
12910 // x == 0-y --> x+y == 0
12911 // x != 0-y --> x+y != 0
12912 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
12915 SelectionDAG &DAG = DCI.DAG;
12916 EVT VT = N->getValueType(0);
12917 EVT OpVT = LHS.getValueType();
12918 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
12919 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
12923 return DAGCombineTruncBoolExt(N, DCI);
12926 // Is this an extending load from an f32 to an f64?
12927 static bool isFPExtLoad(SDValue Op) {
12928 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
12929 return LD->getExtensionType() == ISD::EXTLOAD &&
12930 Op.getValueType() == MVT::f64;
12934 /// Reduces the number of fp-to-int conversion when building a vector.
12936 /// If this vector is built out of floating to integer conversions,
12937 /// transform it to a vector built out of floating point values followed by a
12938 /// single floating to integer conversion of the vector.
12939 /// Namely (build_vector (fptosi $A), (fptosi $B), ...)
12940 /// becomes (fptosi (build_vector ($A, $B, ...)))
12941 SDValue PPCTargetLowering::
12942 combineElementTruncationToVectorTruncation(SDNode *N,
12943 DAGCombinerInfo &DCI) const {
12944 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12945 "Should be called with a BUILD_VECTOR node");
12947 SelectionDAG &DAG = DCI.DAG;
12950 SDValue FirstInput = N->getOperand(0);
12951 assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
12952 "The input operand must be an fp-to-int conversion.");
12954 // This combine happens after legalization so the fp_to_[su]i nodes are
12955 // already converted to PPCSISD nodes.
12956 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
12957 if (FirstConversion == PPCISD::FCTIDZ ||
12958 FirstConversion == PPCISD::FCTIDUZ ||
12959 FirstConversion == PPCISD::FCTIWZ ||
12960 FirstConversion == PPCISD::FCTIWUZ) {
12961 bool IsSplat = true;
12962 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
12963 FirstConversion == PPCISD::FCTIWUZ;
12964 EVT SrcVT = FirstInput.getOperand(0).getValueType();
12965 SmallVector<SDValue, 4> Ops;
12966 EVT TargetVT = N->getValueType(0);
12967 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
12968 SDValue NextOp = N->getOperand(i);
12969 if (NextOp.getOpcode() != PPCISD::MFVSR)
12971 unsigned NextConversion = NextOp.getOperand(0).getOpcode();
12972 if (NextConversion != FirstConversion)
12974 // If we are converting to 32-bit integers, we need to add an FP_ROUND.
12975 // This is not valid if the input was originally double precision. It is
12976 // also not profitable to do unless this is an extending load in which
12977 // case doing this combine will allow us to combine consecutive loads.
12978 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
12980 if (N->getOperand(i) != FirstInput)
12984 // If this is a splat, we leave it as-is since there will be only a single
12985 // fp-to-int conversion followed by a splat of the integer. This is better
12986 // for 32-bit and smaller ints and neutral for 64-bit ints.
12990 // Now that we know we have the right type of node, get its operands
12991 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
12992 SDValue In = N->getOperand(i).getOperand(0);
12994 // For 32-bit values, we need to add an FP_ROUND node (if we made it
12995 // here, we know that all inputs are extending loads so this is safe).
12997 Ops.push_back(DAG.getUNDEF(SrcVT));
12999 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13000 MVT::f32, In.getOperand(0),
13001 DAG.getIntPtrConstant(1, dl));
13002 Ops.push_back(Trunc);
13005 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13009 if (FirstConversion == PPCISD::FCTIDZ ||
13010 FirstConversion == PPCISD::FCTIWZ)
13011 Opcode = ISD::FP_TO_SINT;
13013 Opcode = ISD::FP_TO_UINT;
13015 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13016 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13017 return DAG.getNode(Opcode, dl, TargetVT, BV);
13022 /// Reduce the number of loads when building a vector.
13024 /// Building a vector out of multiple loads can be converted to a load
13025 /// of the vector type if the loads are consecutive. If the loads are
13026 /// consecutive but in descending order, a shuffle is added at the end
13027 /// to reorder the vector.
13028 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13029 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13030 "Should be called with a BUILD_VECTOR node");
13034 // Return early for non byte-sized type, as they can't be consecutive.
13035 if (!N->getValueType(0).getVectorElementType().isByteSized())
13038 bool InputsAreConsecutiveLoads = true;
13039 bool InputsAreReverseConsecutive = true;
13040 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13041 SDValue FirstInput = N->getOperand(0);
13042 bool IsRoundOfExtLoad = false;
13044 if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13045 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13046 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13047 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13049 // Not a build vector of (possibly fp_rounded) loads.
13050 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13051 N->getNumOperands() == 1)
13054 for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13055 // If any inputs are fp_round(extload), they all must be.
13056 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13059 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13061 if (NextInput.getOpcode() != ISD::LOAD)
13064 SDValue PreviousInput =
13065 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13066 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13067 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13069 // If any inputs are fp_round(extload), they all must be.
13070 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13073 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13074 InputsAreConsecutiveLoads = false;
13075 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13076 InputsAreReverseConsecutive = false;
13078 // Exit early if the loads are neither consecutive nor reverse consecutive.
13079 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13083 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13084 "The loads cannot be both consecutive and reverse consecutive.");
13086 SDValue FirstLoadOp =
13087 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13088 SDValue LastLoadOp =
13089 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13090 N->getOperand(N->getNumOperands()-1);
13092 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13093 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13094 if (InputsAreConsecutiveLoads) {
13095 assert(LD1 && "Input needs to be a LoadSDNode.");
13096 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13097 LD1->getBasePtr(), LD1->getPointerInfo(),
13098 LD1->getAlignment());
13100 if (InputsAreReverseConsecutive) {
13101 assert(LDL && "Input needs to be a LoadSDNode.");
13102 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13103 LDL->getBasePtr(), LDL->getPointerInfo(),
13104 LDL->getAlignment());
13105 SmallVector<int, 16> Ops;
13106 for (int i = N->getNumOperands() - 1; i >= 0; i--)
13109 return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13110 DAG.getUNDEF(N->getValueType(0)), Ops);
13115 // This function adds the required vector_shuffle needed to get
13116 // the elements of the vector extract in the correct position
13117 // as specified by the CorrectElems encoding.
13118 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13119 SDValue Input, uint64_t Elems,
13120 uint64_t CorrectElems) {
13123 unsigned NumElems = Input.getValueType().getVectorNumElements();
13124 SmallVector<int, 16> ShuffleMask(NumElems, -1);
13126 // Knowing the element indices being extracted from the original
13127 // vector and the order in which they're being inserted, just put
13128 // them at element indices required for the instruction.
13129 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13130 if (DAG.getDataLayout().isLittleEndian())
13131 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13133 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13134 CorrectElems = CorrectElems >> 8;
13135 Elems = Elems >> 8;
13139 DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13140 DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13142 EVT Ty = N->getValueType(0);
13143 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
13147 // Look for build vector patterns where input operands come from sign
13148 // extended vector_extract elements of specific indices. If the correct indices
13149 // aren't used, add a vector shuffle to fix up the indices and create a new
13150 // PPCISD:SExtVElems node which selects the vector sign extend instructions
13151 // during instruction selection.
13152 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13153 // This array encodes the indices that the vector sign extend instructions
13154 // extract from when extending from one type to another for both BE and LE.
13155 // The right nibble of each byte corresponds to the LE incides.
13156 // and the left nibble of each byte corresponds to the BE incides.
13157 // For example: 0x3074B8FC byte->word
13158 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13159 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13160 // For example: 0x000070F8 byte->double word
13161 // For LE: the allowed indices are: 0x0,0x8
13162 // For BE: the allowed indices are: 0x7,0xF
13163 uint64_t TargetElems[] = {
13164 0x3074B8FC, // b->w
13165 0x000070F8, // b->d
13166 0x10325476, // h->w
13167 0x00003074, // h->d
13168 0x00001032, // w->d
13171 uint64_t Elems = 0;
13175 auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13178 if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13179 Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13182 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13183 // of the right width.
13184 SDValue Extract = Op.getOperand(0);
13185 if (Extract.getOpcode() == ISD::ANY_EXTEND)
13186 Extract = Extract.getOperand(0);
13187 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13190 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13194 Index = ExtOp->getZExtValue();
13195 if (Input && Input != Extract.getOperand(0))
13199 Input = Extract.getOperand(0);
13201 Elems = Elems << 8;
13202 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13208 // If the build vector operands aren't sign extended vector extracts,
13209 // of the same input vector, then return.
13210 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13211 if (!isSExtOfVecExtract(N->getOperand(i))) {
13216 // If the vector extract indicies are not correct, add the appropriate
13218 int TgtElemArrayIdx;
13219 int InputSize = Input.getValueType().getScalarSizeInBits();
13220 int OutputSize = N->getValueType(0).getScalarSizeInBits();
13221 if (InputSize + OutputSize == 40)
13222 TgtElemArrayIdx = 0;
13223 else if (InputSize + OutputSize == 72)
13224 TgtElemArrayIdx = 1;
13225 else if (InputSize + OutputSize == 48)
13226 TgtElemArrayIdx = 2;
13227 else if (InputSize + OutputSize == 80)
13228 TgtElemArrayIdx = 3;
13229 else if (InputSize + OutputSize == 96)
13230 TgtElemArrayIdx = 4;
13234 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13235 CorrectElems = DAG.getDataLayout().isLittleEndian()
13236 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13237 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13238 if (Elems != CorrectElems) {
13239 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13242 // Regular lowering will catch cases where a shuffle is not needed.
13246 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13247 DAGCombinerInfo &DCI) const {
13248 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13249 "Should be called with a BUILD_VECTOR node");
13251 SelectionDAG &DAG = DCI.DAG;
13254 if (!Subtarget.hasVSX())
13257 // The target independent DAG combiner will leave a build_vector of
13258 // float-to-int conversions intact. We can generate MUCH better code for
13259 // a float-to-int conversion of a vector of floats.
13260 SDValue FirstInput = N->getOperand(0);
13261 if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13262 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13267 // If we're building a vector out of consecutive loads, just load that
13269 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13273 // If we're building a vector out of extended elements from another vector
13274 // we have P9 vector integer extend instructions. The code assumes legal
13275 // input types (i.e. it can't handle things like v4i16) so do not run before
13277 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13278 Reduced = combineBVOfVecSExt(N, DAG);
13284 if (N->getValueType(0) != MVT::v2f64)
13288 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13289 if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13290 FirstInput.getOpcode() != ISD::UINT_TO_FP)
13292 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13293 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13295 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13298 SDValue Ext1 = FirstInput.getOperand(0);
13299 SDValue Ext2 = N->getOperand(1).getOperand(0);
13300 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13301 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13304 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13305 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13306 if (!Ext1Op || !Ext2Op)
13308 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13309 Ext1.getOperand(0) != Ext2.getOperand(0))
13312 int FirstElem = Ext1Op->getZExtValue();
13313 int SecondElem = Ext2Op->getZExtValue();
13315 if (FirstElem == 0 && SecondElem == 1)
13316 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13317 else if (FirstElem == 2 && SecondElem == 3)
13318 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13322 SDValue SrcVec = Ext1.getOperand(0);
13323 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13324 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13325 return DAG.getNode(NodeType, dl, MVT::v2f64,
13326 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13329 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13330 DAGCombinerInfo &DCI) const {
13331 assert((N->getOpcode() == ISD::SINT_TO_FP ||
13332 N->getOpcode() == ISD::UINT_TO_FP) &&
13333 "Need an int -> FP conversion node here");
13335 if (useSoftFloat() || !Subtarget.has64BitSupport())
13338 SelectionDAG &DAG = DCI.DAG;
13342 // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13343 // from the hardware.
13344 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13346 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13347 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13350 SDValue FirstOperand(Op.getOperand(0));
13351 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13352 (FirstOperand.getValueType() == MVT::i8 ||
13353 FirstOperand.getValueType() == MVT::i16);
13354 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13355 bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13356 bool DstDouble = Op.getValueType() == MVT::f64;
13357 unsigned ConvOp = Signed ?
13358 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
13359 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13360 SDValue WidthConst =
13361 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13363 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13364 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13365 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13366 DAG.getVTList(MVT::f64, MVT::Other),
13367 Ops, MVT::i8, LDN->getMemOperand());
13369 // For signed conversion, we need to sign-extend the value in the VSR
13371 SDValue ExtOps[] = { Ld, WidthConst };
13372 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13373 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13375 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13379 // For i32 intermediate values, unfortunately, the conversion functions
13380 // leave the upper 32 bits of the value are undefined. Within the set of
13381 // scalar instructions, we have no method for zero- or sign-extending the
13382 // value. Thus, we cannot handle i32 intermediate values here.
13383 if (Op.getOperand(0).getValueType() == MVT::i32)
13386 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13387 "UINT_TO_FP is supported only with FPCVT");
13389 // If we have FCFIDS, then use it when converting to single-precision.
13390 // Otherwise, convert to double-precision and then round.
13391 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13392 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13394 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13396 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13400 // If we're converting from a float, to an int, and back to a float again,
13401 // then we don't need the store/load pair at all.
13402 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13403 Subtarget.hasFPCVT()) ||
13404 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13405 SDValue Src = Op.getOperand(0).getOperand(0);
13406 if (Src.getValueType() == MVT::f32) {
13407 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13408 DCI.AddToWorklist(Src.getNode());
13409 } else if (Src.getValueType() != MVT::f64) {
13410 // Make sure that we don't pick up a ppc_fp128 source value.
13415 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13418 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13419 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13421 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13422 FP = DAG.getNode(ISD::FP_ROUND, dl,
13423 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13424 DCI.AddToWorklist(FP.getNode());
13433 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13434 // builtins) into loads with swaps.
13435 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13436 DAGCombinerInfo &DCI) const {
13437 SelectionDAG &DAG = DCI.DAG;
13441 MachineMemOperand *MMO;
13443 switch (N->getOpcode()) {
13445 llvm_unreachable("Unexpected opcode for little endian VSX load");
13447 LoadSDNode *LD = cast<LoadSDNode>(N);
13448 Chain = LD->getChain();
13449 Base = LD->getBasePtr();
13450 MMO = LD->getMemOperand();
13451 // If the MMO suggests this isn't a load of a full vector, leave
13452 // things alone. For a built-in, we have to make the change for
13453 // correctness, so if there is a size problem that will be a bug.
13454 if (MMO->getSize() < 16)
13458 case ISD::INTRINSIC_W_CHAIN: {
13459 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13460 Chain = Intrin->getChain();
13461 // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13462 // us what we want. Get operand 2 instead.
13463 Base = Intrin->getOperand(2);
13464 MMO = Intrin->getMemOperand();
13469 MVT VecTy = N->getValueType(0).getSimpleVT();
13471 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13472 // aligned and the type is a vector with elements up to 4 bytes
13473 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
13474 && VecTy.getScalarSizeInBits() <= 32 ) {
13478 SDValue LoadOps[] = { Chain, Base };
13479 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13480 DAG.getVTList(MVT::v2f64, MVT::Other),
13481 LoadOps, MVT::v2f64, MMO);
13483 DCI.AddToWorklist(Load.getNode());
13484 Chain = Load.getValue(1);
13485 SDValue Swap = DAG.getNode(
13486 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13487 DCI.AddToWorklist(Swap.getNode());
13489 // Add a bitcast if the resulting load type doesn't match v2f64.
13490 if (VecTy != MVT::v2f64) {
13491 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13492 DCI.AddToWorklist(N.getNode());
13493 // Package {bitcast value, swap's chain} to match Load's shape.
13494 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13495 N, Swap.getValue(1));
13501 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13502 // builtins) into stores with swaps.
13503 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13504 DAGCombinerInfo &DCI) const {
13505 SelectionDAG &DAG = DCI.DAG;
13510 MachineMemOperand *MMO;
13512 switch (N->getOpcode()) {
13514 llvm_unreachable("Unexpected opcode for little endian VSX store");
13516 StoreSDNode *ST = cast<StoreSDNode>(N);
13517 Chain = ST->getChain();
13518 Base = ST->getBasePtr();
13519 MMO = ST->getMemOperand();
13521 // If the MMO suggests this isn't a store of a full vector, leave
13522 // things alone. For a built-in, we have to make the change for
13523 // correctness, so if there is a size problem that will be a bug.
13524 if (MMO->getSize() < 16)
13528 case ISD::INTRINSIC_VOID: {
13529 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13530 Chain = Intrin->getChain();
13531 // Intrin->getBasePtr() oddly does not get what we want.
13532 Base = Intrin->getOperand(3);
13533 MMO = Intrin->getMemOperand();
13539 SDValue Src = N->getOperand(SrcOpnd);
13540 MVT VecTy = Src.getValueType().getSimpleVT();
13542 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13543 // aligned and the type is a vector with elements up to 4 bytes
13544 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
13545 && VecTy.getScalarSizeInBits() <= 32 ) {
13549 // All stores are done as v2f64 and possible bit cast.
13550 if (VecTy != MVT::v2f64) {
13551 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13552 DCI.AddToWorklist(Src.getNode());
13555 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13556 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13557 DCI.AddToWorklist(Swap.getNode());
13558 Chain = Swap.getValue(1);
13559 SDValue StoreOps[] = { Chain, Swap, Base };
13560 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13561 DAG.getVTList(MVT::Other),
13562 StoreOps, VecTy, MMO);
13563 DCI.AddToWorklist(Store.getNode());
13567 // Handle DAG combine for STORE (FP_TO_INT F).
13568 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13569 DAGCombinerInfo &DCI) const {
13571 SelectionDAG &DAG = DCI.DAG;
13573 unsigned Opcode = N->getOperand(1).getOpcode();
13575 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13576 && "Not a FP_TO_INT Instruction!");
13578 SDValue Val = N->getOperand(1).getOperand(0);
13579 EVT Op1VT = N->getOperand(1).getValueType();
13580 EVT ResVT = Val.getValueType();
13582 // Floating point types smaller than 32 bits are not legal on Power.
13583 if (ResVT.getScalarSizeInBits() < 32)
13586 // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13587 bool ValidTypeForStoreFltAsInt =
13588 (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13589 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13591 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Altivec() ||
13592 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13595 // Extend f32 values to f64
13596 if (ResVT.getScalarSizeInBits() == 32) {
13597 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13598 DCI.AddToWorklist(Val.getNode());
13601 // Set signed or unsigned conversion opcode.
13602 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13603 PPCISD::FP_TO_SINT_IN_VSR :
13604 PPCISD::FP_TO_UINT_IN_VSR;
13606 Val = DAG.getNode(ConvOpcode,
13607 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13608 DCI.AddToWorklist(Val.getNode());
13610 // Set number of bytes being converted.
13611 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13612 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13613 DAG.getIntPtrConstant(ByteSize, dl, false),
13614 DAG.getValueType(Op1VT) };
13616 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13617 DAG.getVTList(MVT::Other), Ops,
13618 cast<StoreSDNode>(N)->getMemoryVT(),
13619 cast<StoreSDNode>(N)->getMemOperand());
13621 DCI.AddToWorklist(Val.getNode());
13625 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13626 LSBaseSDNode *LSBase,
13627 DAGCombinerInfo &DCI) const {
13628 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
13629 "Not a reverse memop pattern!");
13631 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
13632 auto Mask = SVN->getMask();
13634 auto I = Mask.rbegin();
13635 auto E = Mask.rend();
13637 for (; I != E; ++I) {
13645 SelectionDAG &DAG = DCI.DAG;
13646 EVT VT = SVN->getValueType(0);
13648 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13651 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
13652 // See comment in PPCVSXSwapRemoval.cpp.
13653 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
13654 if (!Subtarget.hasP9Vector())
13657 if(!IsElementReverse(SVN))
13660 if (LSBase->getOpcode() == ISD::LOAD) {
13662 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
13663 return DAG.getMemIntrinsicNode(
13664 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
13665 LSBase->getMemoryVT(), LSBase->getMemOperand());
13668 if (LSBase->getOpcode() == ISD::STORE) {
13670 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
13671 LSBase->getBasePtr()};
13672 return DAG.getMemIntrinsicNode(
13673 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
13674 LSBase->getMemoryVT(), LSBase->getMemOperand());
13677 llvm_unreachable("Expected a load or store node here");
13680 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
13681 DAGCombinerInfo &DCI) const {
13682 SelectionDAG &DAG = DCI.DAG;
13684 switch (N->getOpcode()) {
13687 return combineADD(N, DCI);
13689 return combineSHL(N, DCI);
13691 return combineSRA(N, DCI);
13693 return combineSRL(N, DCI);
13695 return combineMUL(N, DCI);
13697 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
13698 return N->getOperand(0);
13701 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
13702 return N->getOperand(0);
13705 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
13706 if (C->isNullValue() || // 0 >>s V -> 0.
13707 C->isAllOnesValue()) // -1 >>s V -> -1.
13708 return N->getOperand(0);
13711 case ISD::SIGN_EXTEND:
13712 case ISD::ZERO_EXTEND:
13713 case ISD::ANY_EXTEND:
13714 return DAGCombineExtBoolTrunc(N, DCI);
13715 case ISD::TRUNCATE:
13716 return combineTRUNCATE(N, DCI);
13718 if (SDValue CSCC = combineSetCC(N, DCI))
13721 case ISD::SELECT_CC:
13722 return DAGCombineTruncBoolExt(N, DCI);
13723 case ISD::SINT_TO_FP:
13724 case ISD::UINT_TO_FP:
13725 return combineFPToIntToFP(N, DCI);
13726 case ISD::VECTOR_SHUFFLE:
13727 if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
13728 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
13729 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
13734 EVT Op1VT = N->getOperand(1).getValueType();
13735 unsigned Opcode = N->getOperand(1).getOpcode();
13737 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
13738 SDValue Val= combineStoreFPToInt(N, DCI);
13743 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
13744 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
13745 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
13750 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
13751 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
13752 N->getOperand(1).getNode()->hasOneUse() &&
13753 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
13754 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
13756 // STBRX can only handle simple types and it makes no sense to store less
13757 // two bytes in byte-reversed order.
13758 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
13759 if (mVT.isExtended() || mVT.getSizeInBits() < 16)
13762 SDValue BSwapOp = N->getOperand(1).getOperand(0);
13763 // Do an any-extend to 32-bits if this is a half-word input.
13764 if (BSwapOp.getValueType() == MVT::i16)
13765 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
13767 // If the type of BSWAP operand is wider than stored memory width
13768 // it need to be shifted to the right side before STBRX.
13769 if (Op1VT.bitsGT(mVT)) {
13770 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
13771 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
13772 DAG.getConstant(Shift, dl, MVT::i32));
13773 // Need to truncate if this is a bswap of i64 stored as i32/i16.
13774 if (Op1VT == MVT::i64)
13775 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
13779 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
13782 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
13783 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
13784 cast<StoreSDNode>(N)->getMemOperand());
13787 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0>
13788 // So it can increase the chance of CSE constant construction.
13789 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
13790 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
13791 // Need to sign-extended to 64-bits to handle negative values.
13792 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
13793 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
13794 MemVT.getSizeInBits());
13795 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
13797 // DAG.getTruncStore() can't be used here because it doesn't accept
13798 // the general (base + offset) addressing mode.
13799 // So we use UpdateNodeOperands and setTruncatingStore instead.
13800 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
13802 cast<StoreSDNode>(N)->setTruncatingStore(true);
13803 return SDValue(N, 0);
13806 // For little endian, VSX stores require generating xxswapd/lxvd2x.
13807 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
13808 if (Op1VT.isSimple()) {
13809 MVT StoreVT = Op1VT.getSimpleVT();
13810 if (Subtarget.needsSwapsForVSXMemOps() &&
13811 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
13812 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
13813 return expandVSXStoreForLE(N, DCI);
13818 LoadSDNode *LD = cast<LoadSDNode>(N);
13819 EVT VT = LD->getValueType(0);
13821 // For little endian, VSX loads require generating lxvd2x/xxswapd.
13822 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
13823 if (VT.isSimple()) {
13824 MVT LoadVT = VT.getSimpleVT();
13825 if (Subtarget.needsSwapsForVSXMemOps() &&
13826 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
13827 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
13828 return expandVSXLoadForLE(N, DCI);
13831 // We sometimes end up with a 64-bit integer load, from which we extract
13832 // two single-precision floating-point numbers. This happens with
13833 // std::complex<float>, and other similar structures, because of the way we
13834 // canonicalize structure copies. However, if we lack direct moves,
13835 // then the final bitcasts from the extracted integer values to the
13836 // floating-point numbers turn into store/load pairs. Even with direct moves,
13837 // just loading the two floating-point numbers is likely better.
13838 auto ReplaceTwoFloatLoad = [&]() {
13839 if (VT != MVT::i64)
13842 if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
13846 // We're looking for a sequence like this:
13847 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
13848 // t16: i64 = srl t13, Constant:i32<32>
13849 // t17: i32 = truncate t16
13850 // t18: f32 = bitcast t17
13851 // t19: i32 = truncate t13
13852 // t20: f32 = bitcast t19
13854 if (!LD->hasNUsesOfValue(2, 0))
13857 auto UI = LD->use_begin();
13858 while (UI.getUse().getResNo() != 0) ++UI;
13859 SDNode *Trunc = *UI++;
13860 while (UI.getUse().getResNo() != 0) ++UI;
13861 SDNode *RightShift = *UI;
13862 if (Trunc->getOpcode() != ISD::TRUNCATE)
13863 std::swap(Trunc, RightShift);
13865 if (Trunc->getOpcode() != ISD::TRUNCATE ||
13866 Trunc->getValueType(0) != MVT::i32 ||
13867 !Trunc->hasOneUse())
13869 if (RightShift->getOpcode() != ISD::SRL ||
13870 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
13871 RightShift->getConstantOperandVal(1) != 32 ||
13872 !RightShift->hasOneUse())
13875 SDNode *Trunc2 = *RightShift->use_begin();
13876 if (Trunc2->getOpcode() != ISD::TRUNCATE ||
13877 Trunc2->getValueType(0) != MVT::i32 ||
13878 !Trunc2->hasOneUse())
13881 SDNode *Bitcast = *Trunc->use_begin();
13882 SDNode *Bitcast2 = *Trunc2->use_begin();
13884 if (Bitcast->getOpcode() != ISD::BITCAST ||
13885 Bitcast->getValueType(0) != MVT::f32)
13887 if (Bitcast2->getOpcode() != ISD::BITCAST ||
13888 Bitcast2->getValueType(0) != MVT::f32)
13891 if (Subtarget.isLittleEndian())
13892 std::swap(Bitcast, Bitcast2);
13894 // Bitcast has the second float (in memory-layout order) and Bitcast2
13895 // has the first one.
13897 SDValue BasePtr = LD->getBasePtr();
13898 if (LD->isIndexed()) {
13899 assert(LD->getAddressingMode() == ISD::PRE_INC &&
13900 "Non-pre-inc AM on PPC?");
13902 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
13907 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
13908 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
13909 LD->getPointerInfo(), LD->getAlignment(),
13910 MMOFlags, LD->getAAInfo());
13912 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
13913 BasePtr, DAG.getIntPtrConstant(4, dl));
13914 SDValue FloatLoad2 = DAG.getLoad(
13915 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
13916 LD->getPointerInfo().getWithOffset(4),
13917 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
13919 if (LD->isIndexed()) {
13920 // Note that DAGCombine should re-form any pre-increment load(s) from
13921 // what is produced here if that makes sense.
13922 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
13925 DCI.CombineTo(Bitcast2, FloatLoad);
13926 DCI.CombineTo(Bitcast, FloatLoad2);
13928 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
13929 SDValue(FloatLoad2.getNode(), 1));
13933 if (ReplaceTwoFloatLoad())
13934 return SDValue(N, 0);
13936 EVT MemVT = LD->getMemoryVT();
13937 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
13938 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
13939 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
13940 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
13941 if (LD->isUnindexed() && VT.isVector() &&
13942 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
13943 // P8 and later hardware should just use LOAD.
13944 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
13945 VT == MVT::v4i32 || VT == MVT::v4f32)) ||
13946 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
13947 LD->getAlignment() >= ScalarABIAlignment)) &&
13948 LD->getAlignment() < ABIAlignment) {
13949 // This is a type-legal unaligned Altivec or QPX load.
13950 SDValue Chain = LD->getChain();
13951 SDValue Ptr = LD->getBasePtr();
13952 bool isLittleEndian = Subtarget.isLittleEndian();
13954 // This implements the loading of unaligned vectors as described in
13955 // the venerable Apple Velocity Engine overview. Specifically:
13956 // https://developer.apple.com/hardwaredrivers/ve/alignment.html
13957 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
13959 // The general idea is to expand a sequence of one or more unaligned
13960 // loads into an alignment-based permutation-control instruction (lvsl
13961 // or lvsr), a series of regular vector loads (which always truncate
13962 // their input address to an aligned address), and a series of
13963 // permutations. The results of these permutations are the requested
13964 // loaded values. The trick is that the last "extra" load is not taken
13965 // from the address you might suspect (sizeof(vector) bytes after the
13966 // last requested load), but rather sizeof(vector) - 1 bytes after the
13967 // last requested vector. The point of this is to avoid a page fault if
13968 // the base address happened to be aligned. This works because if the
13969 // base address is aligned, then adding less than a full vector length
13970 // will cause the last vector in the sequence to be (re)loaded.
13971 // Otherwise, the next vector will be fetched as you might suspect was
13974 // We might be able to reuse the permutation generation from
13975 // a different base address offset from this one by an aligned amount.
13976 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
13977 // optimization later.
13978 Intrinsic::ID Intr, IntrLD, IntrPerm;
13979 MVT PermCntlTy, PermTy, LDTy;
13980 if (Subtarget.hasAltivec()) {
13981 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr :
13982 Intrinsic::ppc_altivec_lvsl;
13983 IntrLD = Intrinsic::ppc_altivec_lvx;
13984 IntrPerm = Intrinsic::ppc_altivec_vperm;
13985 PermCntlTy = MVT::v16i8;
13986 PermTy = MVT::v4i32;
13989 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
13990 Intrinsic::ppc_qpx_qvlpcls;
13991 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
13992 Intrinsic::ppc_qpx_qvlfs;
13993 IntrPerm = Intrinsic::ppc_qpx_qvfperm;
13994 PermCntlTy = MVT::v4f64;
13995 PermTy = MVT::v4f64;
13996 LDTy = MemVT.getSimpleVT();
13999 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14001 // Create the new MMO for the new base load. It is like the original MMO,
14002 // but represents an area in memory almost twice the vector size centered
14003 // on the original address. If the address is unaligned, we might start
14004 // reading up to (sizeof(vector)-1) bytes below the address of the
14005 // original unaligned load.
14006 MachineFunction &MF = DAG.getMachineFunction();
14007 MachineMemOperand *BaseMMO =
14008 MF.getMachineMemOperand(LD->getMemOperand(),
14009 -(long)MemVT.getStoreSize()+1,
14010 2*MemVT.getStoreSize()-1);
14012 // Create the new base load.
14014 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14015 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14017 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14018 DAG.getVTList(PermTy, MVT::Other),
14019 BaseLoadOps, LDTy, BaseMMO);
14021 // Note that the value of IncOffset (which is provided to the next
14022 // load's pointer info offset value, and thus used to calculate the
14023 // alignment), and the value of IncValue (which is actually used to
14024 // increment the pointer value) are different! This is because we
14025 // require the next load to appear to be aligned, even though it
14026 // is actually offset from the base pointer by a lesser amount.
14027 int IncOffset = VT.getSizeInBits() / 8;
14028 int IncValue = IncOffset;
14030 // Walk (both up and down) the chain looking for another load at the real
14031 // (aligned) offset (the alignment of the other load does not matter in
14032 // this case). If found, then do not use the offset reduction trick, as
14033 // that will prevent the loads from being later combined (as they would
14034 // otherwise be duplicates).
14035 if (!findConsecutiveLoad(LD, DAG))
14038 SDValue Increment =
14039 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14040 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14042 MachineMemOperand *ExtraMMO =
14043 MF.getMachineMemOperand(LD->getMemOperand(),
14044 1, 2*MemVT.getStoreSize()-1);
14045 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14046 SDValue ExtraLoad =
14047 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14048 DAG.getVTList(PermTy, MVT::Other),
14049 ExtraLoadOps, LDTy, ExtraMMO);
14051 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14052 BaseLoad.getValue(1), ExtraLoad.getValue(1));
14054 // Because vperm has a big-endian bias, we must reverse the order
14055 // of the input vectors and complement the permute control vector
14056 // when generating little endian code. We have already handled the
14057 // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14058 // and ExtraLoad here.
14060 if (isLittleEndian)
14061 Perm = BuildIntrinsicOp(IntrPerm,
14062 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14064 Perm = BuildIntrinsicOp(IntrPerm,
14065 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14068 Perm = Subtarget.hasAltivec() ?
14069 DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
14070 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
14071 DAG.getTargetConstant(1, dl, MVT::i64));
14072 // second argument is 1 because this rounding
14073 // is always exact.
14075 // The output of the permutation is our loaded result, the TokenFactor is
14077 DCI.CombineTo(N, Perm, TF);
14078 return SDValue(N, 0);
14082 case ISD::INTRINSIC_WO_CHAIN: {
14083 bool isLittleEndian = Subtarget.isLittleEndian();
14084 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14085 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14086 : Intrinsic::ppc_altivec_lvsl);
14087 if ((IID == Intr ||
14088 IID == Intrinsic::ppc_qpx_qvlpcld ||
14089 IID == Intrinsic::ppc_qpx_qvlpcls) &&
14090 N->getOperand(1)->getOpcode() == ISD::ADD) {
14091 SDValue Add = N->getOperand(1);
14093 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
14094 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
14096 if (DAG.MaskedValueIsZero(Add->getOperand(1),
14097 APInt::getAllOnesValue(Bits /* alignment */)
14098 .zext(Add.getScalarValueSizeInBits()))) {
14099 SDNode *BasePtr = Add->getOperand(0).getNode();
14100 for (SDNode::use_iterator UI = BasePtr->use_begin(),
14101 UE = BasePtr->use_end();
14103 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14104 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
14105 // We've found another LVSL/LVSR, and this address is an aligned
14106 // multiple of that one. The results will be the same, so use the
14107 // one we've just found instead.
14109 return SDValue(*UI, 0);
14114 if (isa<ConstantSDNode>(Add->getOperand(1))) {
14115 SDNode *BasePtr = Add->getOperand(0).getNode();
14116 for (SDNode::use_iterator UI = BasePtr->use_begin(),
14117 UE = BasePtr->use_end(); UI != UE; ++UI) {
14118 if (UI->getOpcode() == ISD::ADD &&
14119 isa<ConstantSDNode>(UI->getOperand(1)) &&
14120 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14121 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14122 (1ULL << Bits) == 0) {
14123 SDNode *OtherAdd = *UI;
14124 for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14125 VE = OtherAdd->use_end(); VI != VE; ++VI) {
14126 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14127 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14128 return SDValue(*VI, 0);
14136 // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14137 // Expose the vabsduw/h/b opportunity for down stream
14138 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14139 (IID == Intrinsic::ppc_altivec_vmaxsw ||
14140 IID == Intrinsic::ppc_altivec_vmaxsh ||
14141 IID == Intrinsic::ppc_altivec_vmaxsb)) {
14142 SDValue V1 = N->getOperand(1);
14143 SDValue V2 = N->getOperand(2);
14144 if ((V1.getSimpleValueType() == MVT::v4i32 ||
14145 V1.getSimpleValueType() == MVT::v8i16 ||
14146 V1.getSimpleValueType() == MVT::v16i8) &&
14147 V1.getSimpleValueType() == V2.getSimpleValueType()) {
14149 if (V1.getOpcode() == ISD::SUB &&
14150 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14151 V1.getOperand(1) == V2) {
14152 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14155 if (V2.getOpcode() == ISD::SUB &&
14156 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14157 V2.getOperand(1) == V1) {
14158 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14161 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14162 V1.getOperand(0) == V2.getOperand(1) &&
14163 V1.getOperand(1) == V2.getOperand(0)) {
14164 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14171 case ISD::INTRINSIC_W_CHAIN:
14172 // For little endian, VSX loads require generating lxvd2x/xxswapd.
14173 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14174 if (Subtarget.needsSwapsForVSXMemOps()) {
14175 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14178 case Intrinsic::ppc_vsx_lxvw4x:
14179 case Intrinsic::ppc_vsx_lxvd2x:
14180 return expandVSXLoadForLE(N, DCI);
14184 case ISD::INTRINSIC_VOID:
14185 // For little endian, VSX stores require generating xxswapd/stxvd2x.
14186 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14187 if (Subtarget.needsSwapsForVSXMemOps()) {
14188 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14191 case Intrinsic::ppc_vsx_stxvw4x:
14192 case Intrinsic::ppc_vsx_stxvd2x:
14193 return expandVSXStoreForLE(N, DCI);
14198 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14199 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14200 N->getOperand(0).hasOneUse() &&
14201 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14202 (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14203 N->getValueType(0) == MVT::i64))) {
14204 SDValue Load = N->getOperand(0);
14205 LoadSDNode *LD = cast<LoadSDNode>(Load);
14206 // Create the byte-swapping load.
14208 LD->getChain(), // Chain
14209 LD->getBasePtr(), // Ptr
14210 DAG.getValueType(N->getValueType(0)) // VT
14213 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14214 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14215 MVT::i64 : MVT::i32, MVT::Other),
14216 Ops, LD->getMemoryVT(), LD->getMemOperand());
14218 // If this is an i16 load, insert the truncate.
14219 SDValue ResVal = BSLoad;
14220 if (N->getValueType(0) == MVT::i16)
14221 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14223 // First, combine the bswap away. This makes the value produced by the
14225 DCI.CombineTo(N, ResVal);
14227 // Next, combine the load away, we give it a bogus result value but a real
14228 // chain result. The result value is dead because the bswap is dead.
14229 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14231 // Return N so it doesn't get rechecked!
14232 return SDValue(N, 0);
14236 // If a VCMPo node already exists with exactly the same operands as this
14237 // node, use its result instead of this node (VCMPo computes both a CR6 and
14238 // a normal output).
14240 if (!N->getOperand(0).hasOneUse() &&
14241 !N->getOperand(1).hasOneUse() &&
14242 !N->getOperand(2).hasOneUse()) {
14244 // Scan all of the users of the LHS, looking for VCMPo's that match.
14245 SDNode *VCMPoNode = nullptr;
14247 SDNode *LHSN = N->getOperand(0).getNode();
14248 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14250 if (UI->getOpcode() == PPCISD::VCMPo &&
14251 UI->getOperand(1) == N->getOperand(1) &&
14252 UI->getOperand(2) == N->getOperand(2) &&
14253 UI->getOperand(0) == N->getOperand(0)) {
14258 // If there is no VCMPo node, or if the flag value has a single use, don't
14260 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
14263 // Look at the (necessarily single) use of the flag value. If it has a
14264 // chain, this transformation is more complex. Note that multiple things
14265 // could use the value result, which we should ignore.
14266 SDNode *FlagUser = nullptr;
14267 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
14268 FlagUser == nullptr; ++UI) {
14269 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
14270 SDNode *User = *UI;
14271 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14272 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
14279 // If the user is a MFOCRF instruction, we know this is safe.
14280 // Otherwise we give up for right now.
14281 if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14282 return SDValue(VCMPoNode, 0);
14285 case ISD::BRCOND: {
14286 SDValue Cond = N->getOperand(1);
14287 SDValue Target = N->getOperand(2);
14289 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14290 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14291 Intrinsic::loop_decrement) {
14293 // We now need to make the intrinsic dead (it cannot be instruction
14295 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14296 assert(Cond.getNode()->hasOneUse() &&
14297 "Counter decrement has more than one use");
14299 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14300 N->getOperand(0), Target);
14305 // If this is a branch on an altivec predicate comparison, lower this so
14306 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
14307 // lowering is done pre-legalize, because the legalizer lowers the predicate
14308 // compare down to code that is difficult to reassemble.
14309 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14310 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14312 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14313 // value. If so, pass-through the AND to get to the intrinsic.
14314 if (LHS.getOpcode() == ISD::AND &&
14315 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14316 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14317 Intrinsic::loop_decrement &&
14318 isa<ConstantSDNode>(LHS.getOperand(1)) &&
14319 !isNullConstant(LHS.getOperand(1)))
14320 LHS = LHS.getOperand(0);
14322 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14323 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14324 Intrinsic::loop_decrement &&
14325 isa<ConstantSDNode>(RHS)) {
14326 assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14327 "Counter decrement comparison is not EQ or NE");
14329 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14330 bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14331 (CC == ISD::SETNE && !Val);
14333 // We now need to make the intrinsic dead (it cannot be instruction
14335 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14336 assert(LHS.getNode()->hasOneUse() &&
14337 "Counter decrement has more than one use");
14339 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14340 N->getOperand(0), N->getOperand(4));
14346 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14347 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14348 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14349 assert(isDot && "Can't compare against a vector result!");
14351 // If this is a comparison against something other than 0/1, then we know
14352 // that the condition is never/always true.
14353 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14354 if (Val != 0 && Val != 1) {
14355 if (CC == ISD::SETEQ) // Cond never true, remove branch.
14356 return N->getOperand(0);
14357 // Always !=, turn it into an unconditional branch.
14358 return DAG.getNode(ISD::BR, dl, MVT::Other,
14359 N->getOperand(0), N->getOperand(4));
14362 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14364 // Create the PPCISD altivec 'dot' comparison node.
14366 LHS.getOperand(2), // LHS of compare
14367 LHS.getOperand(3), // RHS of compare
14368 DAG.getConstant(CompareOpc, dl, MVT::i32)
14370 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
14371 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
14373 // Unpack the result based on how the target uses it.
14374 PPC::Predicate CompOpc;
14375 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
14376 default: // Can't happen, don't crash on invalid number though.
14377 case 0: // Branch on the value of the EQ bit of CR6.
14378 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
14380 case 1: // Branch on the inverted value of the EQ bit of CR6.
14381 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
14383 case 2: // Branch on the value of the LT bit of CR6.
14384 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
14386 case 3: // Branch on the inverted value of the LT bit of CR6.
14387 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
14391 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
14392 DAG.getConstant(CompOpc, dl, MVT::i32),
14393 DAG.getRegister(PPC::CR6, MVT::i32),
14394 N->getOperand(4), CompNode.getValue(1));
14398 case ISD::BUILD_VECTOR:
14399 return DAGCombineBuildVector(N, DCI);
14401 return combineABS(N, DCI);
14403 return combineVSelect(N, DCI);
14410 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14412 SmallVectorImpl<SDNode *> &Created) const {
14413 // fold (sdiv X, pow2)
14414 EVT VT = N->getValueType(0);
14415 if (VT == MVT::i64 && !Subtarget.isPPC64())
14417 if ((VT != MVT::i32 && VT != MVT::i64) ||
14418 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
14422 SDValue N0 = N->getOperand(0);
14424 bool IsNegPow2 = (-Divisor).isPowerOf2();
14425 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
14426 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
14428 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
14429 Created.push_back(Op.getNode());
14432 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
14433 Created.push_back(Op.getNode());
14439 //===----------------------------------------------------------------------===//
14440 // Inline Assembly Support
14441 //===----------------------------------------------------------------------===//
14443 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14445 const APInt &DemandedElts,
14446 const SelectionDAG &DAG,
14447 unsigned Depth) const {
14449 switch (Op.getOpcode()) {
14451 case PPCISD::LBRX: {
14452 // lhbrx is known to have the top bits cleared out.
14453 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
14454 Known.Zero = 0xFFFF0000;
14457 case ISD::INTRINSIC_WO_CHAIN: {
14458 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
14460 case Intrinsic::ppc_altivec_vcmpbfp_p:
14461 case Intrinsic::ppc_altivec_vcmpeqfp_p:
14462 case Intrinsic::ppc_altivec_vcmpequb_p:
14463 case Intrinsic::ppc_altivec_vcmpequh_p:
14464 case Intrinsic::ppc_altivec_vcmpequw_p:
14465 case Intrinsic::ppc_altivec_vcmpequd_p:
14466 case Intrinsic::ppc_altivec_vcmpgefp_p:
14467 case Intrinsic::ppc_altivec_vcmpgtfp_p:
14468 case Intrinsic::ppc_altivec_vcmpgtsb_p:
14469 case Intrinsic::ppc_altivec_vcmpgtsh_p:
14470 case Intrinsic::ppc_altivec_vcmpgtsw_p:
14471 case Intrinsic::ppc_altivec_vcmpgtsd_p:
14472 case Intrinsic::ppc_altivec_vcmpgtub_p:
14473 case Intrinsic::ppc_altivec_vcmpgtuh_p:
14474 case Intrinsic::ppc_altivec_vcmpgtuw_p:
14475 case Intrinsic::ppc_altivec_vcmpgtud_p:
14476 Known.Zero = ~1U; // All bits but the low one are known to be zero.
14483 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
14484 switch (Subtarget.getCPUDirective()) {
14487 case PPC::DIR_PWR4:
14488 case PPC::DIR_PWR5:
14489 case PPC::DIR_PWR5X:
14490 case PPC::DIR_PWR6:
14491 case PPC::DIR_PWR6X:
14492 case PPC::DIR_PWR7:
14493 case PPC::DIR_PWR8:
14494 case PPC::DIR_PWR9:
14495 case PPC::DIR_PWR_FUTURE: {
14499 if (!DisableInnermostLoopAlign32) {
14500 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
14501 // so that we can decrease cache misses and branch-prediction misses.
14502 // Actual alignment of the loop will depend on the hotness check and other
14503 // logic in alignBlocks.
14504 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14508 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14510 // For small loops (between 5 and 8 instructions), align to a 32-byte
14511 // boundary so that the entire loop fits in one instruction-cache line.
14512 uint64_t LoopSize = 0;
14513 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14514 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14515 LoopSize += TII->getInstSizeInBytes(*J);
14520 if (LoopSize > 16 && LoopSize <= 32)
14527 return TargetLowering::getPrefLoopAlignment(ML);
14530 /// getConstraintType - Given a constraint, return the type of
14531 /// constraint it is for this target.
14532 PPCTargetLowering::ConstraintType
14533 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14534 if (Constraint.size() == 1) {
14535 switch (Constraint[0]) {
14543 return C_RegisterClass;
14545 // FIXME: While Z does indicate a memory constraint, it specifically
14546 // indicates an r+r address (used in conjunction with the 'y' modifier
14547 // in the replacement string). Currently, we're forcing the base
14548 // register to be r0 in the asm printer (which is interpreted as zero)
14549 // and forming the complete address in the second register. This is
14553 } else if (Constraint == "wc") { // individual CR bits.
14554 return C_RegisterClass;
14555 } else if (Constraint == "wa" || Constraint == "wd" ||
14556 Constraint == "wf" || Constraint == "ws" ||
14557 Constraint == "wi" || Constraint == "ww") {
14558 return C_RegisterClass; // VSX registers.
14560 return TargetLowering::getConstraintType(Constraint);
14563 /// Examine constraint type and operand type and determine a weight value.
14564 /// This object must already have been set up with the operand type
14565 /// and the current alternative constraint selected.
14566 TargetLowering::ConstraintWeight
14567 PPCTargetLowering::getSingleConstraintMatchWeight(
14568 AsmOperandInfo &info, const char *constraint) const {
14569 ConstraintWeight weight = CW_Invalid;
14570 Value *CallOperandVal = info.CallOperandVal;
14571 // If we don't have a value, we can't do a match,
14572 // but allow it at the lowest weight.
14573 if (!CallOperandVal)
14575 Type *type = CallOperandVal->getType();
14577 // Look at the constraint type.
14578 if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14579 return CW_Register; // an individual CR bit.
14580 else if ((StringRef(constraint) == "wa" ||
14581 StringRef(constraint) == "wd" ||
14582 StringRef(constraint) == "wf") &&
14583 type->isVectorTy())
14584 return CW_Register;
14585 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14586 return CW_Register; // just hold 64-bit integers data.
14587 else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14588 return CW_Register;
14589 else if (StringRef(constraint) == "ww" && type->isFloatTy())
14590 return CW_Register;
14592 switch (*constraint) {
14594 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14597 if (type->isIntegerTy())
14598 weight = CW_Register;
14601 if (type->isFloatTy())
14602 weight = CW_Register;
14605 if (type->isDoubleTy())
14606 weight = CW_Register;
14609 if (type->isVectorTy())
14610 weight = CW_Register;
14613 weight = CW_Register;
14616 weight = CW_Memory;
14622 std::pair<unsigned, const TargetRegisterClass *>
14623 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14624 StringRef Constraint,
14626 if (Constraint.size() == 1) {
14627 // GCC RS6000 Constraint Letters
14628 switch (Constraint[0]) {
14629 case 'b': // R1-R31
14630 if (VT == MVT::i64 && Subtarget.isPPC64())
14631 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14632 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14633 case 'r': // R0-R31
14634 if (VT == MVT::i64 && Subtarget.isPPC64())
14635 return std::make_pair(0U, &PPC::G8RCRegClass);
14636 return std::make_pair(0U, &PPC::GPRCRegClass);
14637 // 'd' and 'f' constraints are both defined to be "the floating point
14638 // registers", where one is for 32-bit and the other for 64-bit. We don't
14639 // really care overly much here so just give them all the same reg classes.
14642 if (Subtarget.hasSPE()) {
14643 if (VT == MVT::f32 || VT == MVT::i32)
14644 return std::make_pair(0U, &PPC::GPRCRegClass);
14645 if (VT == MVT::f64 || VT == MVT::i64)
14646 return std::make_pair(0U, &PPC::SPERCRegClass);
14648 if (VT == MVT::f32 || VT == MVT::i32)
14649 return std::make_pair(0U, &PPC::F4RCRegClass);
14650 if (VT == MVT::f64 || VT == MVT::i64)
14651 return std::make_pair(0U, &PPC::F8RCRegClass);
14652 if (VT == MVT::v4f64 && Subtarget.hasQPX())
14653 return std::make_pair(0U, &PPC::QFRCRegClass);
14654 if (VT == MVT::v4f32 && Subtarget.hasQPX())
14655 return std::make_pair(0U, &PPC::QSRCRegClass);
14659 if (VT == MVT::v4f64 && Subtarget.hasQPX())
14660 return std::make_pair(0U, &PPC::QFRCRegClass);
14661 if (VT == MVT::v4f32 && Subtarget.hasQPX())
14662 return std::make_pair(0U, &PPC::QSRCRegClass);
14663 if (Subtarget.hasAltivec())
14664 return std::make_pair(0U, &PPC::VRRCRegClass);
14667 return std::make_pair(0U, &PPC::CRRCRegClass);
14669 } else if (Constraint == "wc" && Subtarget.useCRBits()) {
14670 // An individual CR bit.
14671 return std::make_pair(0U, &PPC::CRBITRCRegClass);
14672 } else if ((Constraint == "wa" || Constraint == "wd" ||
14673 Constraint == "wf" || Constraint == "wi") &&
14674 Subtarget.hasVSX()) {
14675 return std::make_pair(0U, &PPC::VSRCRegClass);
14676 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
14677 if (VT == MVT::f32 && Subtarget.hasP8Vector())
14678 return std::make_pair(0U, &PPC::VSSRCRegClass);
14680 return std::make_pair(0U, &PPC::VSFRCRegClass);
14683 // If we name a VSX register, we can't defer to the base class because it
14684 // will not recognize the correct register (their names will be VSL{0-31}
14685 // and V{0-31} so they won't match). So we match them here.
14686 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
14687 int VSNum = atoi(Constraint.data() + 3);
14688 assert(VSNum >= 0 && VSNum <= 63 &&
14689 "Attempted to access a vsr out of range");
14691 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
14692 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
14694 std::pair<unsigned, const TargetRegisterClass *> R =
14695 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
14697 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
14698 // (which we call X[0-9]+). If a 64-bit value has been requested, and a
14699 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
14701 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
14702 // the AsmName field from *RegisterInfo.td, then this would not be necessary.
14703 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
14704 PPC::GPRCRegClass.contains(R.first))
14705 return std::make_pair(TRI->getMatchingSuperReg(R.first,
14706 PPC::sub_32, &PPC::G8RCRegClass),
14707 &PPC::G8RCRegClass);
14709 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
14710 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
14711 R.first = PPC::CR0;
14712 R.second = &PPC::CRRCRegClass;
14718 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
14719 /// vector. If it is invalid, don't add anything to Ops.
14720 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
14721 std::string &Constraint,
14722 std::vector<SDValue>&Ops,
14723 SelectionDAG &DAG) const {
14726 // Only support length 1 constraints.
14727 if (Constraint.length() > 1) return;
14729 char Letter = Constraint[0];
14740 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
14741 if (!CST) return; // Must be an immediate to match.
14743 int64_t Value = CST->getSExtValue();
14744 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
14745 // numbers are printed as such.
14747 default: llvm_unreachable("Unknown constraint letter!");
14748 case 'I': // "I" is a signed 16-bit constant.
14749 if (isInt<16>(Value))
14750 Result = DAG.getTargetConstant(Value, dl, TCVT);
14752 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
14753 if (isShiftedUInt<16, 16>(Value))
14754 Result = DAG.getTargetConstant(Value, dl, TCVT);
14756 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
14757 if (isShiftedInt<16, 16>(Value))
14758 Result = DAG.getTargetConstant(Value, dl, TCVT);
14760 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
14761 if (isUInt<16>(Value))
14762 Result = DAG.getTargetConstant(Value, dl, TCVT);
14764 case 'M': // "M" is a constant that is greater than 31.
14766 Result = DAG.getTargetConstant(Value, dl, TCVT);
14768 case 'N': // "N" is a positive constant that is an exact power of two.
14769 if (Value > 0 && isPowerOf2_64(Value))
14770 Result = DAG.getTargetConstant(Value, dl, TCVT);
14772 case 'O': // "O" is the constant zero.
14774 Result = DAG.getTargetConstant(Value, dl, TCVT);
14776 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
14777 if (isInt<16>(-Value))
14778 Result = DAG.getTargetConstant(Value, dl, TCVT);
14785 if (Result.getNode()) {
14786 Ops.push_back(Result);
14790 // Handle standard constraint letters.
14791 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
14794 // isLegalAddressingMode - Return true if the addressing mode represented
14795 // by AM is legal for this target, for a load/store of the specified type.
14796 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
14797 const AddrMode &AM, Type *Ty,
14798 unsigned AS, Instruction *I) const {
14799 // PPC does not allow r+i addressing modes for vectors!
14800 if (Ty->isVectorTy() && AM.BaseOffs != 0)
14803 // PPC allows a sign-extended 16-bit immediate field.
14804 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
14807 // No global is ever allowed as a base.
14811 // PPC only support r+r,
14812 switch (AM.Scale) {
14813 case 0: // "r+i" or just "i", depending on HasBaseReg.
14816 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
14818 // Otherwise we have r+r or r+i.
14821 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
14823 // Allow 2*r as r+r.
14826 // No other scales are supported.
14833 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
14834 SelectionDAG &DAG) const {
14835 MachineFunction &MF = DAG.getMachineFunction();
14836 MachineFrameInfo &MFI = MF.getFrameInfo();
14837 MFI.setReturnAddressIsTaken(true);
14839 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
14843 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14845 // Make sure the function does not optimize away the store of the RA to
14847 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
14848 FuncInfo->setLRStoreRequired();
14849 bool isPPC64 = Subtarget.isPPC64();
14850 auto PtrVT = getPointerTy(MF.getDataLayout());
14853 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
14855 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
14856 isPPC64 ? MVT::i64 : MVT::i32);
14857 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
14858 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
14859 MachinePointerInfo());
14862 // Just load the return address off the stack.
14863 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
14864 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
14865 MachinePointerInfo());
14868 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
14869 SelectionDAG &DAG) const {
14871 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14873 MachineFunction &MF = DAG.getMachineFunction();
14874 MachineFrameInfo &MFI = MF.getFrameInfo();
14875 MFI.setFrameAddressIsTaken(true);
14877 EVT PtrVT = getPointerTy(MF.getDataLayout());
14878 bool isPPC64 = PtrVT == MVT::i64;
14880 // Naked functions never have a frame pointer, and so we use r1. For all
14881 // other functions, this decision must be delayed until during PEI.
14883 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
14884 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
14886 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
14888 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
14891 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
14892 FrameAddr, MachinePointerInfo());
14896 // FIXME? Maybe this could be a TableGen attribute on some registers and
14897 // this table could be generated automatically from RegInfo.
14898 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
14899 const MachineFunction &MF) const {
14900 bool isPPC64 = Subtarget.isPPC64();
14901 bool IsDarwinABI = Subtarget.isDarwinABI();
14903 bool is64Bit = isPPC64 && VT == LLT::scalar(64);
14904 if (!is64Bit && VT != LLT::scalar(32))
14905 report_fatal_error("Invalid register global variable type");
14907 Register Reg = StringSwitch<Register>(RegName)
14908 .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
14909 .Case("r2", (IsDarwinABI || isPPC64) ? Register() : PPC::R2)
14910 .Case("r13", (!isPPC64 && IsDarwinABI) ? Register() :
14911 (is64Bit ? PPC::X13 : PPC::R13))
14912 .Default(Register());
14916 report_fatal_error("Invalid register name global variable");
14919 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
14920 // 32-bit SVR4 ABI access everything as got-indirect.
14921 if (Subtarget.is32BitELFABI())
14924 // AIX accesses everything indirectly through the TOC, which is similar to
14926 if (Subtarget.isAIXABI())
14929 CodeModel::Model CModel = getTargetMachine().getCodeModel();
14930 // If it is small or large code model, module locals are accessed
14931 // indirectly by loading their address from .toc/.got.
14932 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
14935 // JumpTable and BlockAddress are accessed as got-indirect.
14936 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
14939 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
14940 return Subtarget.isGVIndirectSymbol(G->getGlobal());
14946 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
14947 // The PowerPC target isn't yet aware of offsets.
14951 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
14953 MachineFunction &MF,
14954 unsigned Intrinsic) const {
14955 switch (Intrinsic) {
14956 case Intrinsic::ppc_qpx_qvlfd:
14957 case Intrinsic::ppc_qpx_qvlfs:
14958 case Intrinsic::ppc_qpx_qvlfcd:
14959 case Intrinsic::ppc_qpx_qvlfcs:
14960 case Intrinsic::ppc_qpx_qvlfiwa:
14961 case Intrinsic::ppc_qpx_qvlfiwz:
14962 case Intrinsic::ppc_altivec_lvx:
14963 case Intrinsic::ppc_altivec_lvxl:
14964 case Intrinsic::ppc_altivec_lvebx:
14965 case Intrinsic::ppc_altivec_lvehx:
14966 case Intrinsic::ppc_altivec_lvewx:
14967 case Intrinsic::ppc_vsx_lxvd2x:
14968 case Intrinsic::ppc_vsx_lxvw4x: {
14970 switch (Intrinsic) {
14971 case Intrinsic::ppc_altivec_lvebx:
14974 case Intrinsic::ppc_altivec_lvehx:
14977 case Intrinsic::ppc_altivec_lvewx:
14980 case Intrinsic::ppc_vsx_lxvd2x:
14983 case Intrinsic::ppc_qpx_qvlfd:
14986 case Intrinsic::ppc_qpx_qvlfs:
14989 case Intrinsic::ppc_qpx_qvlfcd:
14992 case Intrinsic::ppc_qpx_qvlfcs:
15000 Info.opc = ISD::INTRINSIC_W_CHAIN;
15002 Info.ptrVal = I.getArgOperand(0);
15003 Info.offset = -VT.getStoreSize()+1;
15004 Info.size = 2*VT.getStoreSize()-1;
15005 Info.align = Align::None();
15006 Info.flags = MachineMemOperand::MOLoad;
15009 case Intrinsic::ppc_qpx_qvlfda:
15010 case Intrinsic::ppc_qpx_qvlfsa:
15011 case Intrinsic::ppc_qpx_qvlfcda:
15012 case Intrinsic::ppc_qpx_qvlfcsa:
15013 case Intrinsic::ppc_qpx_qvlfiwaa:
15014 case Intrinsic::ppc_qpx_qvlfiwza: {
15016 switch (Intrinsic) {
15017 case Intrinsic::ppc_qpx_qvlfda:
15020 case Intrinsic::ppc_qpx_qvlfsa:
15023 case Intrinsic::ppc_qpx_qvlfcda:
15026 case Intrinsic::ppc_qpx_qvlfcsa:
15034 Info.opc = ISD::INTRINSIC_W_CHAIN;
15036 Info.ptrVal = I.getArgOperand(0);
15038 Info.size = VT.getStoreSize();
15039 Info.align = Align::None();
15040 Info.flags = MachineMemOperand::MOLoad;
15043 case Intrinsic::ppc_qpx_qvstfd:
15044 case Intrinsic::ppc_qpx_qvstfs:
15045 case Intrinsic::ppc_qpx_qvstfcd:
15046 case Intrinsic::ppc_qpx_qvstfcs:
15047 case Intrinsic::ppc_qpx_qvstfiw:
15048 case Intrinsic::ppc_altivec_stvx:
15049 case Intrinsic::ppc_altivec_stvxl:
15050 case Intrinsic::ppc_altivec_stvebx:
15051 case Intrinsic::ppc_altivec_stvehx:
15052 case Intrinsic::ppc_altivec_stvewx:
15053 case Intrinsic::ppc_vsx_stxvd2x:
15054 case Intrinsic::ppc_vsx_stxvw4x: {
15056 switch (Intrinsic) {
15057 case Intrinsic::ppc_altivec_stvebx:
15060 case Intrinsic::ppc_altivec_stvehx:
15063 case Intrinsic::ppc_altivec_stvewx:
15066 case Intrinsic::ppc_vsx_stxvd2x:
15069 case Intrinsic::ppc_qpx_qvstfd:
15072 case Intrinsic::ppc_qpx_qvstfs:
15075 case Intrinsic::ppc_qpx_qvstfcd:
15078 case Intrinsic::ppc_qpx_qvstfcs:
15086 Info.opc = ISD::INTRINSIC_VOID;
15088 Info.ptrVal = I.getArgOperand(1);
15089 Info.offset = -VT.getStoreSize()+1;
15090 Info.size = 2*VT.getStoreSize()-1;
15091 Info.align = Align::None();
15092 Info.flags = MachineMemOperand::MOStore;
15095 case Intrinsic::ppc_qpx_qvstfda:
15096 case Intrinsic::ppc_qpx_qvstfsa:
15097 case Intrinsic::ppc_qpx_qvstfcda:
15098 case Intrinsic::ppc_qpx_qvstfcsa:
15099 case Intrinsic::ppc_qpx_qvstfiwa: {
15101 switch (Intrinsic) {
15102 case Intrinsic::ppc_qpx_qvstfda:
15105 case Intrinsic::ppc_qpx_qvstfsa:
15108 case Intrinsic::ppc_qpx_qvstfcda:
15111 case Intrinsic::ppc_qpx_qvstfcsa:
15119 Info.opc = ISD::INTRINSIC_VOID;
15121 Info.ptrVal = I.getArgOperand(1);
15123 Info.size = VT.getStoreSize();
15124 Info.align = Align::None();
15125 Info.flags = MachineMemOperand::MOStore;
15135 /// getOptimalMemOpType - Returns the target specific optimal type for load
15136 /// and store operations as a result of memset, memcpy, and memmove
15137 /// lowering. If DstAlign is zero that means it's safe to destination
15138 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
15139 /// means there isn't a need to check it against alignment requirement,
15140 /// probably because the source does not need to be loaded. If 'IsMemset' is
15141 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
15142 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
15143 /// source is constant so it does not need to be loaded.
15144 /// It returns EVT::Other if the type should be determined using generic
15145 /// target-independent logic.
15146 EVT PPCTargetLowering::getOptimalMemOpType(
15147 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
15148 bool ZeroMemset, bool MemcpyStrSrc,
15149 const AttributeList &FuncAttributes) const {
15150 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15151 // When expanding a memset, require at least two QPX instructions to cover
15152 // the cost of loading the value to be stored from the constant pool.
15153 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
15154 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
15155 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
15159 // We should use Altivec/VSX loads and stores when available. For unaligned
15160 // addresses, unaligned VSX loads are only fast starting with the P8.
15161 if (Subtarget.hasAltivec() && Size >= 16 &&
15162 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
15163 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15167 if (Subtarget.isPPC64()) {
15174 /// Returns true if it is beneficial to convert a load of a constant
15175 /// to just the constant itself.
15176 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15178 assert(Ty->isIntegerTy());
15180 unsigned BitSize = Ty->getPrimitiveSizeInBits();
15181 return !(BitSize == 0 || BitSize > 64);
15184 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15185 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15187 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15188 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15189 return NumBits1 == 64 && NumBits2 == 32;
15192 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15193 if (!VT1.isInteger() || !VT2.isInteger())
15195 unsigned NumBits1 = VT1.getSizeInBits();
15196 unsigned NumBits2 = VT2.getSizeInBits();
15197 return NumBits1 == 64 && NumBits2 == 32;
15200 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15201 // Generally speaking, zexts are not free, but they are free when they can be
15202 // folded with other operations.
15203 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15204 EVT MemVT = LD->getMemoryVT();
15205 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15206 (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15207 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15208 LD->getExtensionType() == ISD::ZEXTLOAD))
15212 // FIXME: Add other cases...
15213 // - 32-bit shifts with a zext to i64
15214 // - zext after ctlz, bswap, etc.
15215 // - zext after and by a constant mask
15217 return TargetLowering::isZExtFree(Val, VT2);
15220 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15221 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15222 "invalid fpext types");
15223 // Extending to float128 is not free.
15224 if (DestVT == MVT::f128)
15229 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15230 return isInt<16>(Imm) || isUInt<16>(Imm);
15233 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15234 return isInt<16>(Imm) || isUInt<16>(Imm);
15237 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15240 MachineMemOperand::Flags,
15241 bool *Fast) const {
15242 if (DisablePPCUnaligned)
15245 // PowerPC supports unaligned memory access for simple non-vector types.
15246 // Although accessing unaligned addresses is not as efficient as accessing
15247 // aligned addresses, it is generally more efficient than manual expansion,
15248 // and generally only traps for software emulation when crossing page
15251 if (!VT.isSimple())
15254 if (VT.isFloatingPoint() && !Subtarget.allowsUnalignedFPAccess())
15257 if (VT.getSimpleVT().isVector()) {
15258 if (Subtarget.hasVSX()) {
15259 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15260 VT != MVT::v4f32 && VT != MVT::v4i32)
15267 if (VT == MVT::ppcf128)
15276 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15278 VT = VT.getScalarType();
15280 if (!VT.isSimple())
15283 switch (VT.getSimpleVT().SimpleTy) {
15288 return (EnableQuadPrecision && Subtarget.hasP9Vector());
15297 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15298 // LR is a callee-save register, but we must treat it as clobbered by any call
15299 // site. Hence we include LR in the scratch registers, which are in turn added
15300 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15301 // to CTR, which is used by any indirect call.
15302 static const MCPhysReg ScratchRegs[] = {
15303 PPC::X12, PPC::LR8, PPC::CTR8, 0
15306 return ScratchRegs;
15309 unsigned PPCTargetLowering::getExceptionPointerRegister(
15310 const Constant *PersonalityFn) const {
15311 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15314 unsigned PPCTargetLowering::getExceptionSelectorRegister(
15315 const Constant *PersonalityFn) const {
15316 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15320 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15321 EVT VT , unsigned DefinedValues) const {
15322 if (VT == MVT::v2i64)
15323 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15325 if (Subtarget.hasVSX() || Subtarget.hasQPX())
15328 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15331 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15332 if (DisableILPPref || Subtarget.enableMachineScheduler())
15333 return TargetLowering::getSchedulingPreference(N);
15338 // Create a fast isel object.
15340 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15341 const TargetLibraryInfo *LibInfo) const {
15342 return PPC::createFastISel(FuncInfo, LibInfo);
15345 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
15346 if (Subtarget.isDarwinABI()) return;
15347 if (!Subtarget.isPPC64()) return;
15349 // Update IsSplitCSR in PPCFunctionInfo
15350 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
15351 PFI->setIsSplitCSR(true);
15354 void PPCTargetLowering::insertCopiesSplitCSR(
15355 MachineBasicBlock *Entry,
15356 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
15357 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
15358 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
15362 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
15363 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
15364 MachineBasicBlock::iterator MBBI = Entry->begin();
15365 for (const MCPhysReg *I = IStart; *I; ++I) {
15366 const TargetRegisterClass *RC = nullptr;
15367 if (PPC::G8RCRegClass.contains(*I))
15368 RC = &PPC::G8RCRegClass;
15369 else if (PPC::F8RCRegClass.contains(*I))
15370 RC = &PPC::F8RCRegClass;
15371 else if (PPC::CRRCRegClass.contains(*I))
15372 RC = &PPC::CRRCRegClass;
15373 else if (PPC::VRRCRegClass.contains(*I))
15374 RC = &PPC::VRRCRegClass;
15376 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
15378 Register NewVR = MRI->createVirtualRegister(RC);
15379 // Create copy from CSR to a virtual register.
15380 // FIXME: this currently does not emit CFI pseudo-instructions, it works
15381 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
15382 // nounwind. If we want to generalize this later, we may need to emit
15383 // CFI pseudo-instructions.
15384 assert(Entry->getParent()->getFunction().hasFnAttribute(
15385 Attribute::NoUnwind) &&
15386 "Function should be nounwind in insertCopiesSplitCSR!");
15387 Entry->addLiveIn(*I);
15388 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
15391 // Insert the copy-back instructions right before the terminator.
15392 for (auto *Exit : Exits)
15393 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
15394 TII->get(TargetOpcode::COPY), *I)
15399 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15400 bool PPCTargetLowering::useLoadStackGuardNode() const {
15401 if (!Subtarget.isTargetLinux())
15402 return TargetLowering::useLoadStackGuardNode();
15406 // Override to disable global variable loading on Linux.
15407 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15408 if (!Subtarget.isTargetLinux())
15409 return TargetLowering::insertSSPDeclarations(M);
15412 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15413 bool ForCodeSize) const {
15414 if (!VT.isSimple() || !Subtarget.hasVSX())
15417 switch(VT.getSimpleVT().SimpleTy) {
15419 // For FP types that are currently not supported by PPC backend, return
15420 // false. Examples: f16, f80.
15425 return Imm.isPosZero();
15429 // For vector shift operation op, fold
15430 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
15431 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
15432 SelectionDAG &DAG) {
15433 SDValue N0 = N->getOperand(0);
15434 SDValue N1 = N->getOperand(1);
15435 EVT VT = N0.getValueType();
15436 unsigned OpSizeInBits = VT.getScalarSizeInBits();
15437 unsigned Opcode = N->getOpcode();
15438 unsigned TargetOpcode;
15442 llvm_unreachable("Unexpected shift operation");
15444 TargetOpcode = PPCISD::SHL;
15447 TargetOpcode = PPCISD::SRL;
15450 TargetOpcode = PPCISD::SRA;
15454 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
15455 N1->getOpcode() == ISD::AND)
15456 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
15457 if (Mask->getZExtValue() == OpSizeInBits - 1)
15458 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
15463 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
15464 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15467 SDValue N0 = N->getOperand(0);
15468 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15469 if (!Subtarget.isISA3_0() ||
15470 N0.getOpcode() != ISD::SIGN_EXTEND ||
15471 N0.getOperand(0).getValueType() != MVT::i32 ||
15472 CN1 == nullptr || N->getValueType(0) != MVT::i64)
15475 // We can't save an operation here if the value is already extended, and
15476 // the existing shift is easier to combine.
15477 SDValue ExtsSrc = N0.getOperand(0);
15478 if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
15479 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
15483 SDValue ShiftBy = SDValue(CN1, 0);
15484 // We want the shift amount to be i32 on the extswli, but the shift could
15486 if (ShiftBy.getValueType() == MVT::i64)
15487 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
15489 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
15493 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
15494 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15500 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
15501 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15507 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
15508 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
15509 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15510 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15511 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15512 const PPCSubtarget &Subtarget) {
15513 if (!Subtarget.isPPC64())
15516 SDValue LHS = N->getOperand(0);
15517 SDValue RHS = N->getOperand(1);
15519 auto isZextOfCompareWithConstant = [](SDValue Op) {
15520 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15521 Op.getValueType() != MVT::i64)
15524 SDValue Cmp = Op.getOperand(0);
15525 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15526 Cmp.getOperand(0).getValueType() != MVT::i64)
15529 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15530 int64_t NegConstant = 0 - Constant->getSExtValue();
15531 // Due to the limitations of the addi instruction,
15532 // -C is required to be [-32768, 32767].
15533 return isInt<16>(NegConstant);
15539 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15540 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15542 // If there is a pattern, canonicalize a zext operand to the RHS.
15543 if (LHSHasPattern && !RHSHasPattern)
15544 std::swap(LHS, RHS);
15545 else if (!LHSHasPattern && !RHSHasPattern)
15549 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15550 SDValue Cmp = RHS.getOperand(0);
15551 SDValue Z = Cmp.getOperand(0);
15552 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15554 assert(Constant && "Constant Should not be a null pointer.");
15555 int64_t NegConstant = 0 - Constant->getSExtValue();
15557 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15561 // --> addze X, (addic Z, -1).carry
15563 // add X, (zext(setne Z, C))--
15564 // \ when -32768 <= -C <= 32767 && C != 0
15565 // --> addze X, (addic (addi Z, -C), -1).carry
15566 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15567 DAG.getConstant(NegConstant, DL, MVT::i64));
15568 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15569 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15570 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15571 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15572 SDValue(Addc.getNode(), 1));
15576 // --> addze X, (subfic Z, 0).carry
15578 // add X, (zext(sete Z, C))--
15579 // \ when -32768 <= -C <= 32767 && C != 0
15580 // --> addze X, (subfic (addi Z, -C), 0).carry
15581 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15582 DAG.getConstant(NegConstant, DL, MVT::i64));
15583 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15584 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15585 DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15586 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15587 SDValue(Subc.getNode(), 1));
15594 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15595 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15601 // Detect TRUNCATE operations on bitcasts of float128 values.
15602 // What we are looking for here is the situtation where we extract a subset
15603 // of bits from a 128 bit float.
15604 // This can be of two forms:
15605 // 1) BITCAST of f128 feeding TRUNCATE
15606 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15607 // The reason this is required is because we do not have a legal i128 type
15608 // and so we want to prevent having to store the f128 and then reload part
15610 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15611 DAGCombinerInfo &DCI) const {
15612 // If we are using CRBits then try that first.
15613 if (Subtarget.useCRBits()) {
15614 // Check if CRBits did anything and return that if it did.
15615 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15616 return CRTruncValue;
15620 SDValue Op0 = N->getOperand(0);
15622 // Looking for a truncate of i128 to i64.
15623 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
15626 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
15628 // SRL feeding TRUNCATE.
15629 if (Op0.getOpcode() == ISD::SRL) {
15630 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
15631 // The right shift has to be by 64 bits.
15632 if (!ConstNode || ConstNode->getZExtValue() != 64)
15635 // Switch the element number to extract.
15636 EltToExtract = EltToExtract ? 0 : 1;
15637 // Update Op0 past the SRL.
15638 Op0 = Op0.getOperand(0);
15641 // BITCAST feeding a TRUNCATE possibly via SRL.
15642 if (Op0.getOpcode() == ISD::BITCAST &&
15643 Op0.getValueType() == MVT::i128 &&
15644 Op0.getOperand(0).getValueType() == MVT::f128) {
15645 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
15646 return DCI.DAG.getNode(
15647 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
15648 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
15653 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
15654 SelectionDAG &DAG = DCI.DAG;
15656 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
15657 if (!ConstOpOrElement)
15660 // An imul is usually smaller than the alternative sequence for legal type.
15661 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
15662 isOperationLegal(ISD::MUL, N->getValueType(0)))
15665 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
15666 switch (this->Subtarget.getCPUDirective()) {
15668 // TODO: enhance the condition for subtarget before pwr8
15670 case PPC::DIR_PWR8:
15671 // type mul add shl
15675 case PPC::DIR_PWR9:
15676 case PPC::DIR_PWR_FUTURE:
15677 // type mul add shl
15681 // The cycle RATIO of related operations are showed as a table above.
15682 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
15683 // scalar and vector type. For 2 instrs patterns, add/sub + shl
15684 // are 4, it is always profitable; but for 3 instrs patterns
15685 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
15686 // So we should only do it for vector type.
15687 return IsAddOne && IsNeg ? VT.isVector() : true;
15691 EVT VT = N->getValueType(0);
15694 const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
15695 bool IsNeg = MulAmt.isNegative();
15696 APInt MulAmtAbs = MulAmt.abs();
15698 if ((MulAmtAbs - 1).isPowerOf2()) {
15699 // (mul x, 2^N + 1) => (add (shl x, N), x)
15700 // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
15702 if (!IsProfitable(IsNeg, true, VT))
15705 SDValue Op0 = N->getOperand(0);
15707 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15708 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
15709 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
15714 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
15715 } else if ((MulAmtAbs + 1).isPowerOf2()) {
15716 // (mul x, 2^N - 1) => (sub (shl x, N), x)
15717 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
15719 if (!IsProfitable(IsNeg, false, VT))
15722 SDValue Op0 = N->getOperand(0);
15724 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15725 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
15728 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
15730 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
15737 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
15738 // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
15739 if (!Subtarget.is64BitELFABI())
15742 // If not a tail call then no need to proceed.
15743 if (!CI->isTailCall())
15746 // If sibling calls have been disabled and tail-calls aren't guaranteed
15747 // there is no reason to duplicate.
15748 auto &TM = getTargetMachine();
15749 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
15752 // Can't tail call a function called indirectly, or if it has variadic args.
15753 const Function *Callee = CI->getCalledFunction();
15754 if (!Callee || Callee->isVarArg())
15757 // Make sure the callee and caller calling conventions are eligible for tco.
15758 const Function *Caller = CI->getParent()->getParent();
15759 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
15760 CI->getCallingConv()))
15763 // If the function is local then we have a good chance at tail-calling it
15764 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
15767 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
15768 if (!Subtarget.hasVSX())
15770 if (Subtarget.hasP9Vector() && VT == MVT::f128)
15772 return VT == MVT::f32 || VT == MVT::f64 ||
15773 VT == MVT::v4f32 || VT == MVT::v2f64;
15776 bool PPCTargetLowering::
15777 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
15778 const Value *Mask = AndI.getOperand(1);
15779 // If the mask is suitable for andi. or andis. we should sink the and.
15780 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
15781 // Can't handle constants wider than 64-bits.
15782 if (CI->getBitWidth() > 64)
15784 int64_t ConstVal = CI->getZExtValue();
15785 return isUInt<16>(ConstVal) ||
15786 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
15789 // For non-constant masks, we can always use the record-form and.
15793 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
15794 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
15795 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
15796 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
15797 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
15798 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
15799 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
15800 assert(Subtarget.hasP9Altivec() &&
15801 "Only combine this when P9 altivec supported!");
15802 EVT VT = N->getValueType(0);
15803 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15806 SelectionDAG &DAG = DCI.DAG;
15808 if (N->getOperand(0).getOpcode() == ISD::SUB) {
15809 // Even for signed integers, if it's known to be positive (as signed
15810 // integer) due to zero-extended inputs.
15811 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
15812 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
15813 if ((SubOpcd0 == ISD::ZERO_EXTEND ||
15814 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
15815 (SubOpcd1 == ISD::ZERO_EXTEND ||
15816 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
15817 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
15818 N->getOperand(0)->getOperand(0),
15819 N->getOperand(0)->getOperand(1),
15820 DAG.getTargetConstant(0, dl, MVT::i32));
15823 // For type v4i32, it can be optimized with xvnegsp + vabsduw
15824 if (N->getOperand(0).getValueType() == MVT::v4i32 &&
15825 N->getOperand(0).hasOneUse()) {
15826 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
15827 N->getOperand(0)->getOperand(0),
15828 N->getOperand(0)->getOperand(1),
15829 DAG.getTargetConstant(1, dl, MVT::i32));
15836 // For type v4i32/v8ii16/v16i8, transform
15837 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
15838 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
15839 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
15840 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
15841 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
15842 DAGCombinerInfo &DCI) const {
15843 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
15844 assert(Subtarget.hasP9Altivec() &&
15845 "Only combine this when P9 altivec supported!");
15847 SelectionDAG &DAG = DCI.DAG;
15849 SDValue Cond = N->getOperand(0);
15850 SDValue TrueOpnd = N->getOperand(1);
15851 SDValue FalseOpnd = N->getOperand(2);
15852 EVT VT = N->getOperand(1).getValueType();
15854 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
15855 FalseOpnd.getOpcode() != ISD::SUB)
15858 // ABSD only available for type v4i32/v8i16/v16i8
15859 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15862 // At least to save one more dependent computation
15863 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
15866 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
15868 // Can only handle unsigned comparison here
15877 std::swap(TrueOpnd, FalseOpnd);
15881 SDValue CmpOpnd1 = Cond.getOperand(0);
15882 SDValue CmpOpnd2 = Cond.getOperand(1);
15884 // SETCC CmpOpnd1 CmpOpnd2 cond
15885 // TrueOpnd = CmpOpnd1 - CmpOpnd2
15886 // FalseOpnd = CmpOpnd2 - CmpOpnd1
15887 if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
15888 TrueOpnd.getOperand(1) == CmpOpnd2 &&
15889 FalseOpnd.getOperand(0) == CmpOpnd2 &&
15890 FalseOpnd.getOperand(1) == CmpOpnd1) {
15891 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
15892 CmpOpnd1, CmpOpnd2,
15893 DAG.getTargetConstant(0, dl, MVT::i32));