1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the PPCISelLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallSite.h"
59 #include "llvm/IR/CallingConv.h"
60 #include "llvm/IR/Constant.h"
61 #include "llvm/IR/Constants.h"
62 #include "llvm/IR/DataLayout.h"
63 #include "llvm/IR/DebugLoc.h"
64 #include "llvm/IR/DerivedTypes.h"
65 #include "llvm/IR/Function.h"
66 #include "llvm/IR/GlobalValue.h"
67 #include "llvm/IR/IRBuilder.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/IntrinsicsPowerPC.h"
71 #include "llvm/IR/Module.h"
72 #include "llvm/IR/Type.h"
73 #include "llvm/IR/Use.h"
74 #include "llvm/IR/Value.h"
75 #include "llvm/MC/MCContext.h"
76 #include "llvm/MC/MCExpr.h"
77 #include "llvm/MC/MCRegisterInfo.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
102 using namespace llvm;
104 #define DEBUG_TYPE "ppc-lowering"
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
121 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision",
122 cl::desc("enable quad precision float support on ppc"), cl::Hidden);
124 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
125 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
127 STATISTIC(NumTailCalls, "Number of tail calls");
128 STATISTIC(NumSiblingCalls, "Number of sibling calls");
130 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
132 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
134 // FIXME: Remove this once the bug has been fixed!
135 extern cl::opt<bool> ANDIGlueBug;
137 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
138 const PPCSubtarget &STI)
139 : TargetLowering(TM), Subtarget(STI) {
140 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
141 // arguments are at least 4/8 bytes aligned.
142 bool isPPC64 = Subtarget.isPPC64();
143 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
145 // Set up the register classes.
146 addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
147 if (!useSoftFloat()) {
149 addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
150 addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
152 addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
153 addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
157 // Match BITREVERSE to customized fast code sequence in the td file.
158 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
159 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
161 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
162 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
164 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
165 for (MVT VT : MVT::integer_valuetypes()) {
166 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
170 if (Subtarget.isISA3_0()) {
171 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
172 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
173 setTruncStoreAction(MVT::f64, MVT::f16, Legal);
174 setTruncStoreAction(MVT::f32, MVT::f16, Legal);
176 // No extending loads from f16 or HW conversions back and forth.
177 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
178 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
179 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
180 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
181 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
182 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
183 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
184 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
187 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
189 // PowerPC has pre-inc load and store's.
190 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
191 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
192 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
193 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
194 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
195 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
196 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
197 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
198 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
199 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
200 if (!Subtarget.hasSPE()) {
201 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
202 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
203 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
204 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
207 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
208 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
209 for (MVT VT : ScalarIntVTs) {
210 setOperationAction(ISD::ADDC, VT, Legal);
211 setOperationAction(ISD::ADDE, VT, Legal);
212 setOperationAction(ISD::SUBC, VT, Legal);
213 setOperationAction(ISD::SUBE, VT, Legal);
216 if (Subtarget.useCRBits()) {
217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
219 if (isPPC64 || Subtarget.hasFPCVT()) {
220 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
221 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
222 isPPC64 ? MVT::i64 : MVT::i32);
223 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
224 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
225 isPPC64 ? MVT::i64 : MVT::i32);
227 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
228 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
231 // PowerPC does not support direct load/store of condition registers.
232 setOperationAction(ISD::LOAD, MVT::i1, Custom);
233 setOperationAction(ISD::STORE, MVT::i1, Custom);
235 // FIXME: Remove this once the ANDI glue bug is fixed:
237 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
239 for (MVT VT : MVT::integer_valuetypes()) {
240 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
241 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
242 setTruncStoreAction(VT, MVT::i1, Expand);
245 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
248 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
249 // PPC (the libcall is not available).
250 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
251 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
253 // We do not currently implement these libm ops for PowerPC.
254 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
255 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
256 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
257 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
258 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
259 setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
261 // PowerPC has no SREM/UREM instructions unless we are on P9
262 // On P9 we may use a hardware instruction to compute the remainder.
263 // The instructions are not legalized directly because in the cases where the
264 // result of both the remainder and the division is required it is more
265 // efficient to compute the remainder from the result of the division rather
266 // than use the remainder instruction.
267 if (Subtarget.isISA3_0()) {
268 setOperationAction(ISD::SREM, MVT::i32, Custom);
269 setOperationAction(ISD::UREM, MVT::i32, Custom);
270 setOperationAction(ISD::SREM, MVT::i64, Custom);
271 setOperationAction(ISD::UREM, MVT::i64, Custom);
273 setOperationAction(ISD::SREM, MVT::i32, Expand);
274 setOperationAction(ISD::UREM, MVT::i32, Expand);
275 setOperationAction(ISD::SREM, MVT::i64, Expand);
276 setOperationAction(ISD::UREM, MVT::i64, Expand);
279 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
280 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
281 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
282 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
283 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
284 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
285 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
286 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
287 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
289 // We don't support sin/cos/sqrt/fmod/pow
290 setOperationAction(ISD::FSIN , MVT::f64, Expand);
291 setOperationAction(ISD::FCOS , MVT::f64, Expand);
292 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
293 setOperationAction(ISD::FREM , MVT::f64, Expand);
294 setOperationAction(ISD::FPOW , MVT::f64, Expand);
295 setOperationAction(ISD::FSIN , MVT::f32, Expand);
296 setOperationAction(ISD::FCOS , MVT::f32, Expand);
297 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
298 setOperationAction(ISD::FREM , MVT::f32, Expand);
299 setOperationAction(ISD::FPOW , MVT::f32, Expand);
300 if (Subtarget.hasSPE()) {
301 setOperationAction(ISD::FMA , MVT::f64, Expand);
302 setOperationAction(ISD::FMA , MVT::f32, Expand);
304 setOperationAction(ISD::FMA , MVT::f64, Legal);
305 setOperationAction(ISD::FMA , MVT::f32, Legal);
308 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
310 // If we're enabling GP optimizations, use hardware square root
311 if (!Subtarget.hasFSQRT() &&
312 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
314 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
316 if (!Subtarget.hasFSQRT() &&
317 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
318 Subtarget.hasFRES()))
319 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
321 if (Subtarget.hasFCPSGN()) {
322 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
323 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
325 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
326 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
329 if (Subtarget.hasFPRND()) {
330 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
331 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
332 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
333 setOperationAction(ISD::FROUND, MVT::f64, Legal);
335 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
336 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
337 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
338 setOperationAction(ISD::FROUND, MVT::f32, Legal);
341 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
342 // to speed up scalar BSWAP64.
343 // CTPOP or CTTZ were introduced in P8/P9 respectively
344 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
345 if (Subtarget.hasP9Vector())
346 setOperationAction(ISD::BSWAP, MVT::i64 , Custom);
348 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
349 if (Subtarget.isISA3_0()) {
350 setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
351 setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
353 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
354 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
357 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
358 setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
359 setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
361 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
362 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
365 // PowerPC does not have ROTR
366 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
367 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
369 if (!Subtarget.useCRBits()) {
370 // PowerPC does not have Select
371 setOperationAction(ISD::SELECT, MVT::i32, Expand);
372 setOperationAction(ISD::SELECT, MVT::i64, Expand);
373 setOperationAction(ISD::SELECT, MVT::f32, Expand);
374 setOperationAction(ISD::SELECT, MVT::f64, Expand);
377 // PowerPC wants to turn select_cc of FP into fsel when possible.
378 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
379 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
381 // PowerPC wants to optimize integer setcc a bit
382 if (!Subtarget.useCRBits())
383 setOperationAction(ISD::SETCC, MVT::i32, Custom);
385 // PowerPC does not have BRCOND which requires SetCC
386 if (!Subtarget.useCRBits())
387 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
389 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
391 if (Subtarget.hasSPE()) {
392 // SPE has built-in conversions
393 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
394 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
395 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
397 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
398 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
400 // PowerPC does not have [U|S]INT_TO_FP
401 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
402 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
405 if (Subtarget.hasDirectMove() && isPPC64) {
406 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
407 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
408 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
409 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
410 if (TM.Options.UnsafeFPMath) {
411 setOperationAction(ISD::LRINT, MVT::f64, Legal);
412 setOperationAction(ISD::LRINT, MVT::f32, Legal);
413 setOperationAction(ISD::LLRINT, MVT::f64, Legal);
414 setOperationAction(ISD::LLRINT, MVT::f32, Legal);
415 setOperationAction(ISD::LROUND, MVT::f64, Legal);
416 setOperationAction(ISD::LROUND, MVT::f32, Legal);
417 setOperationAction(ISD::LLROUND, MVT::f64, Legal);
418 setOperationAction(ISD::LLROUND, MVT::f32, Legal);
421 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
422 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
423 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
424 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
427 // We cannot sextinreg(i1). Expand to shifts.
428 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
430 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
431 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
432 // support continuation, user-level threading, and etc.. As a result, no
433 // other SjLj exception interfaces are implemented and please don't build
434 // your own exception handling based on them.
435 // LLVM/Clang supports zero-cost DWARF exception handling.
436 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
437 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
439 // We want to legalize GlobalAddress and ConstantPool nodes into the
440 // appropriate instructions to materialize the address.
441 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
442 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
443 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
444 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
445 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
446 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
447 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
448 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
449 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
450 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
453 setOperationAction(ISD::TRAP, MVT::Other, Legal);
455 // TRAMPOLINE is custom lowered.
456 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
457 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
459 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
460 setOperationAction(ISD::VASTART , MVT::Other, Custom);
462 if (Subtarget.is64BitELFABI()) {
463 // VAARG always uses double-word chunks, so promote anything smaller.
464 setOperationAction(ISD::VAARG, MVT::i1, Promote);
465 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
466 setOperationAction(ISD::VAARG, MVT::i8, Promote);
467 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
468 setOperationAction(ISD::VAARG, MVT::i16, Promote);
469 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
470 setOperationAction(ISD::VAARG, MVT::i32, Promote);
471 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
472 setOperationAction(ISD::VAARG, MVT::Other, Expand);
473 } else if (Subtarget.is32BitELFABI()) {
474 // VAARG is custom lowered with the 32-bit SVR4 ABI.
475 setOperationAction(ISD::VAARG, MVT::Other, Custom);
476 setOperationAction(ISD::VAARG, MVT::i64, Custom);
478 setOperationAction(ISD::VAARG, MVT::Other, Expand);
480 // VACOPY is custom lowered with the 32-bit SVR4 ABI.
481 if (Subtarget.is32BitELFABI())
482 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
484 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
486 // Use the default implementation.
487 setOperationAction(ISD::VAEND , MVT::Other, Expand);
488 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
489 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
490 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
491 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
492 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
493 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
494 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
495 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
497 // We want to custom lower some of our intrinsics.
498 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
500 // To handle counter-based loop conditions.
501 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
503 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
504 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
505 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
506 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
508 // Comparisons that require checking two conditions.
509 if (Subtarget.hasSPE()) {
510 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
511 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
512 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
513 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
515 setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
516 setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
517 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
518 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
519 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
520 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
521 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
522 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
523 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
524 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
525 setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
526 setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
528 if (Subtarget.has64BitSupport()) {
529 // They also have instructions for converting between i64 and fp.
530 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
531 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
532 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
533 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
534 // This is just the low 32 bits of a (signed) fp->i64 conversion.
535 // We cannot do this with Promote because i64 is not a legal type.
536 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
538 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
539 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
541 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
542 if (Subtarget.hasSPE())
543 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
545 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
548 // With the instructions enabled under FPCVT, we can do everything.
549 if (Subtarget.hasFPCVT()) {
550 if (Subtarget.has64BitSupport()) {
551 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
552 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
553 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
554 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
557 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
558 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
559 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
560 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
563 if (Subtarget.use64BitRegs()) {
564 // 64-bit PowerPC implementations can support i64 types directly
565 addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
566 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
567 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
568 // 64-bit PowerPC wants to expand i128 shifts itself.
569 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
570 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
571 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
573 // 32-bit PowerPC wants to expand i64 shifts itself.
574 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
575 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
576 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
579 if (Subtarget.hasVSX()) {
580 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
581 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
582 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
583 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
586 if (Subtarget.hasAltivec()) {
587 // First set operation action for all vector types to expand. Then we
588 // will selectively turn on ones that can be effectively codegen'd.
589 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
590 // add/sub are legal for all supported vector VT's.
591 setOperationAction(ISD::ADD, VT, Legal);
592 setOperationAction(ISD::SUB, VT, Legal);
594 // For v2i64, these are only valid with P8Vector. This is corrected after
596 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
597 setOperationAction(ISD::SMAX, VT, Legal);
598 setOperationAction(ISD::SMIN, VT, Legal);
599 setOperationAction(ISD::UMAX, VT, Legal);
600 setOperationAction(ISD::UMIN, VT, Legal);
603 setOperationAction(ISD::SMAX, VT, Expand);
604 setOperationAction(ISD::SMIN, VT, Expand);
605 setOperationAction(ISD::UMAX, VT, Expand);
606 setOperationAction(ISD::UMIN, VT, Expand);
609 if (Subtarget.hasVSX()) {
610 setOperationAction(ISD::FMAXNUM, VT, Legal);
611 setOperationAction(ISD::FMINNUM, VT, Legal);
614 // Vector instructions introduced in P8
615 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
616 setOperationAction(ISD::CTPOP, VT, Legal);
617 setOperationAction(ISD::CTLZ, VT, Legal);
620 setOperationAction(ISD::CTPOP, VT, Expand);
621 setOperationAction(ISD::CTLZ, VT, Expand);
624 // Vector instructions introduced in P9
625 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
626 setOperationAction(ISD::CTTZ, VT, Legal);
628 setOperationAction(ISD::CTTZ, VT, Expand);
630 // We promote all shuffles to v16i8.
631 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
632 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
634 // We promote all non-typed operations to v4i32.
635 setOperationAction(ISD::AND , VT, Promote);
636 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
637 setOperationAction(ISD::OR , VT, Promote);
638 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
639 setOperationAction(ISD::XOR , VT, Promote);
640 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
641 setOperationAction(ISD::LOAD , VT, Promote);
642 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
643 setOperationAction(ISD::SELECT, VT, Promote);
644 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
645 setOperationAction(ISD::VSELECT, VT, Legal);
646 setOperationAction(ISD::SELECT_CC, VT, Promote);
647 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
648 setOperationAction(ISD::STORE, VT, Promote);
649 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
651 // No other operations are legal.
652 setOperationAction(ISD::MUL , VT, Expand);
653 setOperationAction(ISD::SDIV, VT, Expand);
654 setOperationAction(ISD::SREM, VT, Expand);
655 setOperationAction(ISD::UDIV, VT, Expand);
656 setOperationAction(ISD::UREM, VT, Expand);
657 setOperationAction(ISD::FDIV, VT, Expand);
658 setOperationAction(ISD::FREM, VT, Expand);
659 setOperationAction(ISD::FNEG, VT, Expand);
660 setOperationAction(ISD::FSQRT, VT, Expand);
661 setOperationAction(ISD::FLOG, VT, Expand);
662 setOperationAction(ISD::FLOG10, VT, Expand);
663 setOperationAction(ISD::FLOG2, VT, Expand);
664 setOperationAction(ISD::FEXP, VT, Expand);
665 setOperationAction(ISD::FEXP2, VT, Expand);
666 setOperationAction(ISD::FSIN, VT, Expand);
667 setOperationAction(ISD::FCOS, VT, Expand);
668 setOperationAction(ISD::FABS, VT, Expand);
669 setOperationAction(ISD::FFLOOR, VT, Expand);
670 setOperationAction(ISD::FCEIL, VT, Expand);
671 setOperationAction(ISD::FTRUNC, VT, Expand);
672 setOperationAction(ISD::FRINT, VT, Expand);
673 setOperationAction(ISD::FNEARBYINT, VT, Expand);
674 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
675 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
676 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
677 setOperationAction(ISD::MULHU, VT, Expand);
678 setOperationAction(ISD::MULHS, VT, Expand);
679 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
680 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
681 setOperationAction(ISD::UDIVREM, VT, Expand);
682 setOperationAction(ISD::SDIVREM, VT, Expand);
683 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
684 setOperationAction(ISD::FPOW, VT, Expand);
685 setOperationAction(ISD::BSWAP, VT, Expand);
686 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
687 setOperationAction(ISD::ROTL, VT, Expand);
688 setOperationAction(ISD::ROTR, VT, Expand);
690 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
691 setTruncStoreAction(VT, InnerVT, Expand);
692 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
693 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
694 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
697 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
698 if (!Subtarget.hasP8Vector()) {
699 setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
700 setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
701 setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
702 setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
705 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
706 setOperationAction(ISD::ABS, VT, Custom);
708 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
709 // with merges, splats, etc.
710 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
712 // Vector truncates to sub-word integer that fit in an Altivec/VSX register
713 // are cheap, so handle them before they get expanded to scalar.
714 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
715 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
716 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
717 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
718 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
720 setOperationAction(ISD::AND , MVT::v4i32, Legal);
721 setOperationAction(ISD::OR , MVT::v4i32, Legal);
722 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
723 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
724 setOperationAction(ISD::SELECT, MVT::v4i32,
725 Subtarget.useCRBits() ? Legal : Expand);
726 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
727 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
728 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
729 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
730 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
731 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
732 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
733 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
734 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
736 // Without hasP8Altivec set, v2i64 SMAX isn't available.
737 // But ABS custom lowering requires SMAX support.
738 if (!Subtarget.hasP8Altivec())
739 setOperationAction(ISD::ABS, MVT::v2i64, Expand);
741 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
742 if (Subtarget.hasAltivec())
743 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
744 setOperationAction(ISD::ROTL, VT, Legal);
745 // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
746 if (Subtarget.hasP8Altivec())
747 setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
749 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
750 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
751 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
752 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
754 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
755 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
757 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
758 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
759 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
762 if (Subtarget.hasP8Altivec())
763 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
765 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
767 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
768 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
770 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
771 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
773 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
774 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
775 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
776 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
778 // Altivec does not contain unordered floating-point compare instructions
779 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
780 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
781 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
782 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
784 if (Subtarget.hasVSX()) {
785 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
787 if (Subtarget.hasP8Vector()) {
788 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
789 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
791 if (Subtarget.hasDirectMove() && isPPC64) {
792 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
793 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
794 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
795 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
796 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
797 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
798 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
799 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
801 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
803 // The nearbyint variants are not allowed to raise the inexact exception
804 // so we can only code-gen them with unsafe math.
805 if (TM.Options.UnsafeFPMath) {
806 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
807 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
810 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
811 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
812 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
813 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
814 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
815 setOperationAction(ISD::FROUND, MVT::f64, Legal);
817 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
818 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
819 setOperationAction(ISD::FROUND, MVT::f32, Legal);
821 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
822 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
824 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
825 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
827 // Share the Altivec comparison restrictions.
828 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
829 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
830 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
831 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
833 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
834 setOperationAction(ISD::STORE, MVT::v2f64, Legal);
836 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
838 if (Subtarget.hasP8Vector())
839 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
841 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
843 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
844 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
845 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
847 if (Subtarget.hasP8Altivec()) {
848 setOperationAction(ISD::SHL, MVT::v2i64, Legal);
849 setOperationAction(ISD::SRA, MVT::v2i64, Legal);
850 setOperationAction(ISD::SRL, MVT::v2i64, Legal);
852 // 128 bit shifts can be accomplished via 3 instructions for SHL and
853 // SRL, but not for SRA because of the instructions available:
854 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
856 setOperationAction(ISD::SHL, MVT::v1i128, Expand);
857 setOperationAction(ISD::SRL, MVT::v1i128, Expand);
858 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
860 setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
863 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
864 setOperationAction(ISD::SRA, MVT::v2i64, Expand);
865 setOperationAction(ISD::SRL, MVT::v2i64, Expand);
867 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
869 // VSX v2i64 only supports non-arithmetic operations.
870 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
871 setOperationAction(ISD::SUB, MVT::v2i64, Expand);
874 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
875 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
876 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
877 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
879 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
881 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
882 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
883 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
884 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
886 // Custom handling for partial vectors of integers converted to
887 // floating point. We already have optimal handling for v2i32 through
888 // the DAG combine, so those aren't necessary.
889 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
890 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
891 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
892 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
893 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
894 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
895 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
896 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
898 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
899 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
900 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
901 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
902 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
903 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
905 if (Subtarget.hasDirectMove())
906 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
907 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
909 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
912 if (Subtarget.hasP8Altivec()) {
913 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
914 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
917 if (Subtarget.hasP9Vector()) {
918 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
919 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
921 // 128 bit shifts can be accomplished via 3 instructions for SHL and
922 // SRL, but not for SRA because of the instructions available:
923 // VS{RL} and VS{RL}O.
924 setOperationAction(ISD::SHL, MVT::v1i128, Legal);
925 setOperationAction(ISD::SRL, MVT::v1i128, Legal);
926 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
928 if (EnableQuadPrecision) {
929 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
930 setOperationAction(ISD::FADD, MVT::f128, Legal);
931 setOperationAction(ISD::FSUB, MVT::f128, Legal);
932 setOperationAction(ISD::FDIV, MVT::f128, Legal);
933 setOperationAction(ISD::FMUL, MVT::f128, Legal);
934 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
935 // No extending loads to f128 on PPC.
936 for (MVT FPT : MVT::fp_valuetypes())
937 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
938 setOperationAction(ISD::FMA, MVT::f128, Legal);
939 setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
940 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
941 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
942 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
943 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
944 setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
946 setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
947 setOperationAction(ISD::FRINT, MVT::f128, Legal);
948 setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
949 setOperationAction(ISD::FCEIL, MVT::f128, Legal);
950 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
951 setOperationAction(ISD::FROUND, MVT::f128, Legal);
953 setOperationAction(ISD::SELECT, MVT::f128, Expand);
954 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
955 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
956 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
957 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
958 setOperationAction(ISD::BITCAST, MVT::i128, Custom);
959 // No implementation for these ops for PowerPC.
960 setOperationAction(ISD::FSIN , MVT::f128, Expand);
961 setOperationAction(ISD::FCOS , MVT::f128, Expand);
962 setOperationAction(ISD::FPOW, MVT::f128, Expand);
963 setOperationAction(ISD::FPOWI, MVT::f128, Expand);
964 setOperationAction(ISD::FREM, MVT::f128, Expand);
966 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
967 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
968 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
969 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
970 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
973 if (Subtarget.hasP9Altivec()) {
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
977 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
978 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
979 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
980 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
981 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
982 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
983 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
987 if (Subtarget.hasQPX()) {
988 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
989 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
990 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
991 setOperationAction(ISD::FREM, MVT::v4f64, Expand);
993 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
994 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
996 setOperationAction(ISD::LOAD , MVT::v4f64, Custom);
997 setOperationAction(ISD::STORE , MVT::v4f64, Custom);
999 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
1000 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
1002 if (!Subtarget.useCRBits())
1003 setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
1004 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
1006 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
1007 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
1008 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
1009 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
1010 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
1011 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
1012 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
1014 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
1015 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
1017 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1018 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1020 setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1021 setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1022 setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1023 setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1024 setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1025 setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1026 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1027 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1028 setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1029 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1031 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1032 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1034 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1035 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1037 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1039 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1040 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1041 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1042 setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1044 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1045 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1047 setOperationAction(ISD::LOAD , MVT::v4f32, Custom);
1048 setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1050 if (!Subtarget.useCRBits())
1051 setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1052 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1054 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1055 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1056 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1057 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1058 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1059 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1060 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1062 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1063 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1065 setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1066 setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1067 setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1068 setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1069 setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1070 setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1071 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1072 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1073 setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1074 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1076 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1077 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1079 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1080 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1082 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1084 setOperationAction(ISD::AND , MVT::v4i1, Legal);
1085 setOperationAction(ISD::OR , MVT::v4i1, Legal);
1086 setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1088 if (!Subtarget.useCRBits())
1089 setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1090 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1092 setOperationAction(ISD::LOAD , MVT::v4i1, Custom);
1093 setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1095 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1096 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1097 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1098 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1099 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1100 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1101 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1103 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1104 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1106 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1108 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1109 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1110 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1111 setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1113 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1114 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1115 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1116 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1118 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1119 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1121 // These need to set FE_INEXACT, and so cannot be vectorized here.
1122 setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1123 setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1125 if (TM.Options.UnsafeFPMath) {
1126 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1127 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1129 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1130 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1132 setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1133 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1135 setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1136 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1140 if (Subtarget.has64BitSupport())
1141 setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1143 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1146 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
1147 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1150 setBooleanContents(ZeroOrOneBooleanContent);
1152 if (Subtarget.hasAltivec()) {
1153 // Altivec instructions set fields to all zeros or all ones.
1154 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1158 // These libcalls are not available in 32-bit.
1159 setLibcallName(RTLIB::SHL_I128, nullptr);
1160 setLibcallName(RTLIB::SRL_I128, nullptr);
1161 setLibcallName(RTLIB::SRA_I128, nullptr);
1164 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1166 // We have target-specific dag combine patterns for the following nodes:
1167 setTargetDAGCombine(ISD::ADD);
1168 setTargetDAGCombine(ISD::SHL);
1169 setTargetDAGCombine(ISD::SRA);
1170 setTargetDAGCombine(ISD::SRL);
1171 setTargetDAGCombine(ISD::MUL);
1172 setTargetDAGCombine(ISD::SINT_TO_FP);
1173 setTargetDAGCombine(ISD::BUILD_VECTOR);
1174 if (Subtarget.hasFPCVT())
1175 setTargetDAGCombine(ISD::UINT_TO_FP);
1176 setTargetDAGCombine(ISD::LOAD);
1177 setTargetDAGCombine(ISD::STORE);
1178 setTargetDAGCombine(ISD::BR_CC);
1179 if (Subtarget.useCRBits())
1180 setTargetDAGCombine(ISD::BRCOND);
1181 setTargetDAGCombine(ISD::BSWAP);
1182 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1183 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1184 setTargetDAGCombine(ISD::INTRINSIC_VOID);
1186 setTargetDAGCombine(ISD::SIGN_EXTEND);
1187 setTargetDAGCombine(ISD::ZERO_EXTEND);
1188 setTargetDAGCombine(ISD::ANY_EXTEND);
1190 setTargetDAGCombine(ISD::TRUNCATE);
1191 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1194 if (Subtarget.useCRBits()) {
1195 setTargetDAGCombine(ISD::TRUNCATE);
1196 setTargetDAGCombine(ISD::SETCC);
1197 setTargetDAGCombine(ISD::SELECT_CC);
1200 // Use reciprocal estimates.
1201 if (TM.Options.UnsafeFPMath) {
1202 setTargetDAGCombine(ISD::FDIV);
1203 setTargetDAGCombine(ISD::FSQRT);
1206 if (Subtarget.hasP9Altivec()) {
1207 setTargetDAGCombine(ISD::ABS);
1208 setTargetDAGCombine(ISD::VSELECT);
1211 // Darwin long double math library functions have $LDBL128 appended.
1212 if (Subtarget.isDarwin()) {
1213 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
1214 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
1215 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
1216 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
1217 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
1218 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
1219 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
1220 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
1221 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
1222 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
1225 if (EnableQuadPrecision) {
1226 setLibcallName(RTLIB::LOG_F128, "logf128");
1227 setLibcallName(RTLIB::LOG2_F128, "log2f128");
1228 setLibcallName(RTLIB::LOG10_F128, "log10f128");
1229 setLibcallName(RTLIB::EXP_F128, "expf128");
1230 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1231 setLibcallName(RTLIB::SIN_F128, "sinf128");
1232 setLibcallName(RTLIB::COS_F128, "cosf128");
1233 setLibcallName(RTLIB::POW_F128, "powf128");
1234 setLibcallName(RTLIB::FMIN_F128, "fminf128");
1235 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1236 setLibcallName(RTLIB::POWI_F128, "__powikf2");
1237 setLibcallName(RTLIB::REM_F128, "fmodf128");
1240 // With 32 condition bits, we don't need to sink (and duplicate) compares
1241 // aggressively in CodeGenPrep.
1242 if (Subtarget.useCRBits()) {
1243 setHasMultipleConditionRegisters();
1244 setJumpIsExpensive();
1247 setMinFunctionAlignment(Align(4));
1248 if (Subtarget.isDarwin())
1249 setPrefFunctionAlignment(Align(16));
1251 switch (Subtarget.getCPUDirective()) {
1256 case PPC::DIR_E500mc:
1257 case PPC::DIR_E5500:
1260 case PPC::DIR_PWR5X:
1262 case PPC::DIR_PWR6X:
1266 case PPC::DIR_PWR_FUTURE:
1267 setPrefLoopAlignment(Align(16));
1268 setPrefFunctionAlignment(Align(16));
1272 if (Subtarget.enableMachineScheduler())
1273 setSchedulingPreference(Sched::Source);
1275 setSchedulingPreference(Sched::Hybrid);
1277 computeRegisterProperties(STI.getRegisterInfo());
1279 // The Freescale cores do better with aggressive inlining of memcpy and
1280 // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1281 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1282 Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1283 MaxStoresPerMemset = 32;
1284 MaxStoresPerMemsetOptSize = 16;
1285 MaxStoresPerMemcpy = 32;
1286 MaxStoresPerMemcpyOptSize = 8;
1287 MaxStoresPerMemmove = 32;
1288 MaxStoresPerMemmoveOptSize = 8;
1289 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1290 // The A2 also benefits from (very) aggressive inlining of memcpy and
1291 // friends. The overhead of a the function call, even when warm, can be
1292 // over one hundred cycles.
1293 MaxStoresPerMemset = 128;
1294 MaxStoresPerMemcpy = 128;
1295 MaxStoresPerMemmove = 128;
1296 MaxLoadsPerMemcmp = 128;
1298 MaxLoadsPerMemcmp = 8;
1299 MaxLoadsPerMemcmpOptSize = 4;
1303 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1304 /// the desired ByVal argument alignment.
1305 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign,
1306 unsigned MaxMaxAlign) {
1307 if (MaxAlign == MaxMaxAlign)
1309 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1310 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
1312 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
1314 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1315 unsigned EltAlign = 0;
1316 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1317 if (EltAlign > MaxAlign)
1318 MaxAlign = EltAlign;
1319 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1320 for (auto *EltTy : STy->elements()) {
1321 unsigned EltAlign = 0;
1322 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1323 if (EltAlign > MaxAlign)
1324 MaxAlign = EltAlign;
1325 if (MaxAlign == MaxMaxAlign)
1331 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1332 /// function arguments in the caller parameter area.
1333 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1334 const DataLayout &DL) const {
1335 // Darwin passes everything on 4 byte boundary.
1336 if (Subtarget.isDarwin())
1339 // 16byte and wider vectors are passed on 16byte boundary.
1340 // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1341 unsigned Align = Subtarget.isPPC64() ? 8 : 4;
1342 if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1343 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16);
1347 bool PPCTargetLowering::useSoftFloat() const {
1348 return Subtarget.useSoftFloat();
1351 bool PPCTargetLowering::hasSPE() const {
1352 return Subtarget.hasSPE();
1355 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1356 return VT.isScalarInteger();
1359 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1360 switch ((PPCISD::NodeType)Opcode) {
1361 case PPCISD::FIRST_NUMBER: break;
1362 case PPCISD::FSEL: return "PPCISD::FSEL";
1363 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP";
1364 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP";
1365 case PPCISD::FCFID: return "PPCISD::FCFID";
1366 case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
1367 case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
1368 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
1369 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
1370 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
1371 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
1372 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
1373 case PPCISD::FP_TO_UINT_IN_VSR:
1374 return "PPCISD::FP_TO_UINT_IN_VSR,";
1375 case PPCISD::FP_TO_SINT_IN_VSR:
1376 return "PPCISD::FP_TO_SINT_IN_VSR";
1377 case PPCISD::FRE: return "PPCISD::FRE";
1378 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
1379 case PPCISD::STFIWX: return "PPCISD::STFIWX";
1380 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
1381 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
1382 case PPCISD::VPERM: return "PPCISD::VPERM";
1383 case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
1384 case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
1385 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
1386 case PPCISD::VECSHL: return "PPCISD::VECSHL";
1387 case PPCISD::CMPB: return "PPCISD::CMPB";
1388 case PPCISD::Hi: return "PPCISD::Hi";
1389 case PPCISD::Lo: return "PPCISD::Lo";
1390 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
1391 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1392 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1393 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
1394 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
1395 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
1396 case PPCISD::SRL: return "PPCISD::SRL";
1397 case PPCISD::SRA: return "PPCISD::SRA";
1398 case PPCISD::SHL: return "PPCISD::SHL";
1399 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
1400 case PPCISD::CALL: return "PPCISD::CALL";
1401 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
1402 case PPCISD::MTCTR: return "PPCISD::MTCTR";
1403 case PPCISD::BCTRL: return "PPCISD::BCTRL";
1404 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
1405 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
1406 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
1407 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
1408 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1409 case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
1410 case PPCISD::MFVSR: return "PPCISD::MFVSR";
1411 case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
1412 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
1413 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
1414 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
1415 case PPCISD::ANDI_rec_1_EQ_BIT:
1416 return "PPCISD::ANDI_rec_1_EQ_BIT";
1417 case PPCISD::ANDI_rec_1_GT_BIT:
1418 return "PPCISD::ANDI_rec_1_GT_BIT";
1419 case PPCISD::VCMP: return "PPCISD::VCMP";
1420 case PPCISD::VCMPo: return "PPCISD::VCMPo";
1421 case PPCISD::LBRX: return "PPCISD::LBRX";
1422 case PPCISD::STBRX: return "PPCISD::STBRX";
1423 case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
1424 case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
1425 case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
1426 case PPCISD::STXSIX: return "PPCISD::STXSIX";
1427 case PPCISD::VEXTS: return "PPCISD::VEXTS";
1428 case PPCISD::SExtVElems: return "PPCISD::SExtVElems";
1429 case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
1430 case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
1431 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
1432 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
1433 case PPCISD::ST_VSR_SCAL_INT:
1434 return "PPCISD::ST_VSR_SCAL_INT";
1435 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
1436 case PPCISD::BDNZ: return "PPCISD::BDNZ";
1437 case PPCISD::BDZ: return "PPCISD::BDZ";
1438 case PPCISD::MFFS: return "PPCISD::MFFS";
1439 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
1440 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
1441 case PPCISD::CR6SET: return "PPCISD::CR6SET";
1442 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
1443 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
1444 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
1445 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1446 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
1447 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
1448 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
1449 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
1450 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
1451 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1452 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
1453 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
1454 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
1455 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1456 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1457 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
1458 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
1459 case PPCISD::SC: return "PPCISD::SC";
1460 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
1461 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
1462 case PPCISD::RFEBB: return "PPCISD::RFEBB";
1463 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
1464 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
1465 case PPCISD::VABSD: return "PPCISD::VABSD";
1466 case PPCISD::QVFPERM: return "PPCISD::QVFPERM";
1467 case PPCISD::QVGPCI: return "PPCISD::QVGPCI";
1468 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI";
1469 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI";
1470 case PPCISD::QBFLT: return "PPCISD::QBFLT";
1471 case PPCISD::QVLFSb: return "PPCISD::QVLFSb";
1472 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
1473 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
1474 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
1475 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
1476 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
1477 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
1478 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
1483 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1486 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1488 if (Subtarget.hasQPX())
1489 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1491 return VT.changeVectorElementTypeToInteger();
1494 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1495 assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1499 //===----------------------------------------------------------------------===//
1500 // Node matching predicates, for use by the tblgen matching code.
1501 //===----------------------------------------------------------------------===//
1503 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1504 static bool isFloatingPointZero(SDValue Op) {
1505 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1506 return CFP->getValueAPF().isZero();
1507 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1508 // Maybe this has already been legalized into the constant pool?
1509 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1510 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1511 return CFP->getValueAPF().isZero();
1516 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
1517 /// true if Op is undef or if it matches the specified value.
1518 static bool isConstantOrUndef(int Op, int Val) {
1519 return Op < 0 || Op == Val;
1522 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1523 /// VPKUHUM instruction.
1524 /// The ShuffleKind distinguishes between big-endian operations with
1525 /// two different inputs (0), either-endian operations with two identical
1526 /// inputs (1), and little-endian operations with two different inputs (2).
1527 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1528 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1529 SelectionDAG &DAG) {
1530 bool IsLE = DAG.getDataLayout().isLittleEndian();
1531 if (ShuffleKind == 0) {
1534 for (unsigned i = 0; i != 16; ++i)
1535 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1537 } else if (ShuffleKind == 2) {
1540 for (unsigned i = 0; i != 16; ++i)
1541 if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1543 } else if (ShuffleKind == 1) {
1544 unsigned j = IsLE ? 0 : 1;
1545 for (unsigned i = 0; i != 8; ++i)
1546 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
1547 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
1553 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1554 /// VPKUWUM instruction.
1555 /// The ShuffleKind distinguishes between big-endian operations with
1556 /// two different inputs (0), either-endian operations with two identical
1557 /// inputs (1), and little-endian operations with two different inputs (2).
1558 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1559 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1560 SelectionDAG &DAG) {
1561 bool IsLE = DAG.getDataLayout().isLittleEndian();
1562 if (ShuffleKind == 0) {
1565 for (unsigned i = 0; i != 16; i += 2)
1566 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
1567 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
1569 } else if (ShuffleKind == 2) {
1572 for (unsigned i = 0; i != 16; i += 2)
1573 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1574 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
1576 } else if (ShuffleKind == 1) {
1577 unsigned j = IsLE ? 0 : 2;
1578 for (unsigned i = 0; i != 8; i += 2)
1579 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1580 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1581 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1582 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
1588 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1589 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1590 /// current subtarget.
1592 /// The ShuffleKind distinguishes between big-endian operations with
1593 /// two different inputs (0), either-endian operations with two identical
1594 /// inputs (1), and little-endian operations with two different inputs (2).
1595 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1596 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1597 SelectionDAG &DAG) {
1598 const PPCSubtarget& Subtarget =
1599 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1600 if (!Subtarget.hasP8Vector())
1603 bool IsLE = DAG.getDataLayout().isLittleEndian();
1604 if (ShuffleKind == 0) {
1607 for (unsigned i = 0; i != 16; i += 4)
1608 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
1609 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
1610 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
1611 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
1613 } else if (ShuffleKind == 2) {
1616 for (unsigned i = 0; i != 16; i += 4)
1617 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1618 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
1619 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
1620 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
1622 } else if (ShuffleKind == 1) {
1623 unsigned j = IsLE ? 0 : 4;
1624 for (unsigned i = 0; i != 8; i += 4)
1625 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1626 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1627 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
1628 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
1629 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1630 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
1631 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1632 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1638 /// isVMerge - Common function, used to match vmrg* shuffles.
1640 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1641 unsigned LHSStart, unsigned RHSStart) {
1642 if (N->getValueType(0) != MVT::v16i8)
1644 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1645 "Unsupported merge size!");
1647 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
1648 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
1649 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1650 LHSStart+j+i*UnitSize) ||
1651 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1652 RHSStart+j+i*UnitSize))
1658 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1659 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1660 /// The ShuffleKind distinguishes between big-endian merges with two
1661 /// different inputs (0), either-endian merges with two identical inputs (1),
1662 /// and little-endian merges with two different inputs (2). For the latter,
1663 /// the input operands are swapped (see PPCInstrAltivec.td).
1664 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1665 unsigned ShuffleKind, SelectionDAG &DAG) {
1666 if (DAG.getDataLayout().isLittleEndian()) {
1667 if (ShuffleKind == 1) // unary
1668 return isVMerge(N, UnitSize, 0, 0);
1669 else if (ShuffleKind == 2) // swapped
1670 return isVMerge(N, UnitSize, 0, 16);
1674 if (ShuffleKind == 1) // unary
1675 return isVMerge(N, UnitSize, 8, 8);
1676 else if (ShuffleKind == 0) // normal
1677 return isVMerge(N, UnitSize, 8, 24);
1683 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1684 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1685 /// The ShuffleKind distinguishes between big-endian merges with two
1686 /// different inputs (0), either-endian merges with two identical inputs (1),
1687 /// and little-endian merges with two different inputs (2). For the latter,
1688 /// the input operands are swapped (see PPCInstrAltivec.td).
1689 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1690 unsigned ShuffleKind, SelectionDAG &DAG) {
1691 if (DAG.getDataLayout().isLittleEndian()) {
1692 if (ShuffleKind == 1) // unary
1693 return isVMerge(N, UnitSize, 8, 8);
1694 else if (ShuffleKind == 2) // swapped
1695 return isVMerge(N, UnitSize, 8, 24);
1699 if (ShuffleKind == 1) // unary
1700 return isVMerge(N, UnitSize, 0, 0);
1701 else if (ShuffleKind == 0) // normal
1702 return isVMerge(N, UnitSize, 0, 16);
1709 * Common function used to match vmrgew and vmrgow shuffles
1711 * The indexOffset determines whether to look for even or odd words in
1712 * the shuffle mask. This is based on the of the endianness of the target
1715 * - Use offset of 0 to check for odd elements
1716 * - Use offset of 4 to check for even elements
1718 * - Use offset of 0 to check for even elements
1719 * - Use offset of 4 to check for odd elements
1720 * A detailed description of the vector element ordering for little endian and
1721 * big endian can be found at
1722 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1723 * Targeting your applications - what little endian and big endian IBM XL C/C++
1724 * compiler differences mean to you
1726 * The mask to the shuffle vector instruction specifies the indices of the
1727 * elements from the two input vectors to place in the result. The elements are
1728 * numbered in array-access order, starting with the first vector. These vectors
1729 * are always of type v16i8, thus each vector will contain 16 elements of size
1730 * 8. More info on the shuffle vector can be found in the
1731 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1732 * Language Reference.
1734 * The RHSStartValue indicates whether the same input vectors are used (unary)
1735 * or two different input vectors are used, based on the following:
1736 * - If the instruction uses the same vector for both inputs, the range of the
1737 * indices will be 0 to 15. In this case, the RHSStart value passed should
1739 * - If the instruction has two different vectors then the range of the
1740 * indices will be 0 to 31. In this case, the RHSStart value passed should
1741 * be 16 (indices 0-15 specify elements in the first vector while indices 16
1742 * to 31 specify elements in the second vector).
1744 * \param[in] N The shuffle vector SD Node to analyze
1745 * \param[in] IndexOffset Specifies whether to look for even or odd elements
1746 * \param[in] RHSStartValue Specifies the starting index for the righthand input
1747 * vector to the shuffle_vector instruction
1748 * \return true iff this shuffle vector represents an even or odd word merge
1750 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1751 unsigned RHSStartValue) {
1752 if (N->getValueType(0) != MVT::v16i8)
1755 for (unsigned i = 0; i < 2; ++i)
1756 for (unsigned j = 0; j < 4; ++j)
1757 if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1758 i*RHSStartValue+j+IndexOffset) ||
1759 !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1760 i*RHSStartValue+j+IndexOffset+8))
1766 * Determine if the specified shuffle mask is suitable for the vmrgew or
1767 * vmrgow instructions.
1769 * \param[in] N The shuffle vector SD Node to analyze
1770 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1771 * \param[in] ShuffleKind Identify the type of merge:
1772 * - 0 = big-endian merge with two different inputs;
1773 * - 1 = either-endian merge with two identical inputs;
1774 * - 2 = little-endian merge with two different inputs (inputs are swapped for
1775 * little-endian merges).
1776 * \param[in] DAG The current SelectionDAG
1777 * \return true iff this shuffle mask
1779 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1780 unsigned ShuffleKind, SelectionDAG &DAG) {
1781 if (DAG.getDataLayout().isLittleEndian()) {
1782 unsigned indexOffset = CheckEven ? 4 : 0;
1783 if (ShuffleKind == 1) // Unary
1784 return isVMerge(N, indexOffset, 0);
1785 else if (ShuffleKind == 2) // swapped
1786 return isVMerge(N, indexOffset, 16);
1791 unsigned indexOffset = CheckEven ? 0 : 4;
1792 if (ShuffleKind == 1) // Unary
1793 return isVMerge(N, indexOffset, 0);
1794 else if (ShuffleKind == 0) // Normal
1795 return isVMerge(N, indexOffset, 16);
1802 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1803 /// amount, otherwise return -1.
1804 /// The ShuffleKind distinguishes between big-endian operations with two
1805 /// different inputs (0), either-endian operations with two identical inputs
1806 /// (1), and little-endian operations with two different inputs (2). For the
1807 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
1808 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1809 SelectionDAG &DAG) {
1810 if (N->getValueType(0) != MVT::v16i8)
1813 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1815 // Find the first non-undef value in the shuffle mask.
1817 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1820 if (i == 16) return -1; // all undef.
1822 // Otherwise, check to see if the rest of the elements are consecutively
1823 // numbered from this value.
1824 unsigned ShiftAmt = SVOp->getMaskElt(i);
1825 if (ShiftAmt < i) return -1;
1828 bool isLE = DAG.getDataLayout().isLittleEndian();
1830 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1831 // Check the rest of the elements to see if they are consecutive.
1832 for (++i; i != 16; ++i)
1833 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1835 } else if (ShuffleKind == 1) {
1836 // Check the rest of the elements to see if they are consecutive.
1837 for (++i; i != 16; ++i)
1838 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1844 ShiftAmt = 16 - ShiftAmt;
1849 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1850 /// specifies a splat of a single element that is suitable for input to
1851 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
1852 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1853 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1854 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1856 // The consecutive indices need to specify an element, not part of two
1857 // different elements. So abandon ship early if this isn't the case.
1858 if (N->getMaskElt(0) % EltSize != 0)
1861 // This is a splat operation if each element of the permute is the same, and
1862 // if the value doesn't reference the second vector.
1863 unsigned ElementBase = N->getMaskElt(0);
1865 // FIXME: Handle UNDEF elements too!
1866 if (ElementBase >= 16)
1869 // Check that the indices are consecutive, in the case of a multi-byte element
1870 // splatted with a v16i8 mask.
1871 for (unsigned i = 1; i != EltSize; ++i)
1872 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1875 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1876 if (N->getMaskElt(i) < 0) continue;
1877 for (unsigned j = 0; j != EltSize; ++j)
1878 if (N->getMaskElt(i+j) != N->getMaskElt(j))
1884 /// Check that the mask is shuffling N byte elements. Within each N byte
1885 /// element of the mask, the indices could be either in increasing or
1886 /// decreasing order as long as they are consecutive.
1887 /// \param[in] N the shuffle vector SD Node to analyze
1888 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1889 /// Word/DoubleWord/QuadWord).
1890 /// \param[in] StepLen the delta indices number among the N byte element, if
1891 /// the mask is in increasing/decreasing order then it is 1/-1.
1892 /// \return true iff the mask is shuffling N byte elements.
1893 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1895 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1896 "Unexpected element width.");
1897 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1899 unsigned NumOfElem = 16 / Width;
1900 unsigned MaskVal[16]; // Width is never greater than 16
1901 for (unsigned i = 0; i < NumOfElem; ++i) {
1902 MaskVal[0] = N->getMaskElt(i * Width);
1903 if ((StepLen == 1) && (MaskVal[0] % Width)) {
1905 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
1909 for (unsigned int j = 1; j < Width; ++j) {
1910 MaskVal[j] = N->getMaskElt(i * Width + j);
1911 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
1920 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1921 unsigned &InsertAtByte, bool &Swap, bool IsLE) {
1922 if (!isNByteElemShuffleMask(N, 4, 1))
1925 // Now we look at mask elements 0,4,8,12
1926 unsigned M0 = N->getMaskElt(0) / 4;
1927 unsigned M1 = N->getMaskElt(4) / 4;
1928 unsigned M2 = N->getMaskElt(8) / 4;
1929 unsigned M3 = N->getMaskElt(12) / 4;
1930 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1931 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1933 // Below, let H and L be arbitrary elements of the shuffle mask
1934 // where H is in the range [4,7] and L is in the range [0,3].
1935 // H, 1, 2, 3 or L, 5, 6, 7
1936 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1937 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1938 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1939 InsertAtByte = IsLE ? 12 : 0;
1943 // 0, H, 2, 3 or 4, L, 6, 7
1944 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1945 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1946 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1947 InsertAtByte = IsLE ? 8 : 4;
1951 // 0, 1, H, 3 or 4, 5, L, 7
1952 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1953 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1954 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1955 InsertAtByte = IsLE ? 4 : 8;
1959 // 0, 1, 2, H or 4, 5, 6, L
1960 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1961 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1962 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1963 InsertAtByte = IsLE ? 0 : 12;
1968 // If both vector operands for the shuffle are the same vector, the mask will
1969 // contain only elements from the first one and the second one will be undef.
1970 if (N->getOperand(1).isUndef()) {
1973 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1974 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1975 InsertAtByte = IsLE ? 12 : 0;
1978 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1979 InsertAtByte = IsLE ? 8 : 4;
1982 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1983 InsertAtByte = IsLE ? 4 : 8;
1986 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1987 InsertAtByte = IsLE ? 0 : 12;
1995 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
1996 bool &Swap, bool IsLE) {
1997 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
1998 // Ensure each byte index of the word is consecutive.
1999 if (!isNByteElemShuffleMask(N, 4, 1))
2002 // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2003 unsigned M0 = N->getMaskElt(0) / 4;
2004 unsigned M1 = N->getMaskElt(4) / 4;
2005 unsigned M2 = N->getMaskElt(8) / 4;
2006 unsigned M3 = N->getMaskElt(12) / 4;
2008 // If both vector operands for the shuffle are the same vector, the mask will
2009 // contain only elements from the first one and the second one will be undef.
2010 if (N->getOperand(1).isUndef()) {
2011 assert(M0 < 4 && "Indexing into an undef vector?");
2012 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2015 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2020 // Ensure each word index of the ShuffleVector Mask is consecutive.
2021 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2025 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2026 // Input vectors don't need to be swapped if the leading element
2027 // of the result is one of the 3 left elements of the second vector
2028 // (or if there is no shift to be done at all).
2030 ShiftElts = (8 - M0) % 8;
2031 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2032 // Input vectors need to be swapped if the leading element
2033 // of the result is one of the 3 left elements of the first vector
2034 // (or if we're shifting by 4 - thereby simply swapping the vectors).
2036 ShiftElts = (4 - M0) % 4;
2041 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2042 // Input vectors don't need to be swapped if the leading element
2043 // of the result is one of the 4 elements of the first vector.
2046 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2047 // Input vectors need to be swapped if the leading element
2048 // of the result is one of the 4 elements of the right vector.
2057 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2058 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2060 if (!isNByteElemShuffleMask(N, Width, -1))
2063 for (int i = 0; i < 16; i += Width)
2064 if (N->getMaskElt(i) != i + Width - 1)
2070 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2071 return isXXBRShuffleMaskHelper(N, 2);
2074 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2075 return isXXBRShuffleMaskHelper(N, 4);
2078 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2079 return isXXBRShuffleMaskHelper(N, 8);
2082 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2083 return isXXBRShuffleMaskHelper(N, 16);
2086 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2087 /// if the inputs to the instruction should be swapped and set \p DM to the
2088 /// value for the immediate.
2089 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2090 /// AND element 0 of the result comes from the first input (LE) or second input
2091 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2092 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2094 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2095 bool &Swap, bool IsLE) {
2096 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2098 // Ensure each byte index of the double word is consecutive.
2099 if (!isNByteElemShuffleMask(N, 8, 1))
2102 unsigned M0 = N->getMaskElt(0) / 8;
2103 unsigned M1 = N->getMaskElt(8) / 8;
2104 assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2106 // If both vector operands for the shuffle are the same vector, the mask will
2107 // contain only elements from the first one and the second one will be undef.
2108 if (N->getOperand(1).isUndef()) {
2109 if ((M0 | M1) < 2) {
2110 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2118 if (M0 > 1 && M1 < 2) {
2120 } else if (M0 < 2 && M1 > 1) {
2127 // Note: if control flow comes here that means Swap is already set above
2128 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2131 if (M0 < 2 && M1 > 1) {
2133 } else if (M0 > 1 && M1 < 2) {
2140 // Note: if control flow comes here that means Swap is already set above
2141 DM = (M0 << 1) + (M1 & 1);
2147 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2148 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2149 /// elements are counted from the left of the vector register).
2150 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2151 SelectionDAG &DAG) {
2152 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2153 assert(isSplatShuffleMask(SVOp, EltSize));
2154 if (DAG.getDataLayout().isLittleEndian())
2155 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2157 return SVOp->getMaskElt(0) / EltSize;
2160 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2161 /// by using a vspltis[bhw] instruction of the specified element size, return
2162 /// the constant being splatted. The ByteSize field indicates the number of
2163 /// bytes of each element [124] -> [bhw].
2164 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2165 SDValue OpVal(nullptr, 0);
2167 // If ByteSize of the splat is bigger than the element size of the
2168 // build_vector, then we have a case where we are checking for a splat where
2169 // multiple elements of the buildvector are folded together into a single
2170 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2171 unsigned EltSize = 16/N->getNumOperands();
2172 if (EltSize < ByteSize) {
2173 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
2174 SDValue UniquedVals[4];
2175 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2177 // See if all of the elements in the buildvector agree across.
2178 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2179 if (N->getOperand(i).isUndef()) continue;
2180 // If the element isn't a constant, bail fully out.
2181 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2183 if (!UniquedVals[i&(Multiple-1)].getNode())
2184 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2185 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2186 return SDValue(); // no match.
2189 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2190 // either constant or undef values that are identical for each chunk. See
2191 // if these chunks can form into a larger vspltis*.
2193 // Check to see if all of the leading entries are either 0 or -1. If
2194 // neither, then this won't fit into the immediate field.
2195 bool LeadingZero = true;
2196 bool LeadingOnes = true;
2197 for (unsigned i = 0; i != Multiple-1; ++i) {
2198 if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
2200 LeadingZero &= isNullConstant(UniquedVals[i]);
2201 LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2203 // Finally, check the least significant entry.
2205 if (!UniquedVals[Multiple-1].getNode())
2206 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
2207 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2208 if (Val < 16) // 0,0,0,4 -> vspltisw(4)
2209 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2212 if (!UniquedVals[Multiple-1].getNode())
2213 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2214 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2215 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
2216 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2222 // Check to see if this buildvec has a single non-undef value in its elements.
2223 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2224 if (N->getOperand(i).isUndef()) continue;
2225 if (!OpVal.getNode())
2226 OpVal = N->getOperand(i);
2227 else if (OpVal != N->getOperand(i))
2231 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
2233 unsigned ValSizeInBytes = EltSize;
2235 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2236 Value = CN->getZExtValue();
2237 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2238 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2239 Value = FloatToBits(CN->getValueAPF().convertToFloat());
2242 // If the splat value is larger than the element value, then we can never do
2243 // this splat. The only case that we could fit the replicated bits into our
2244 // immediate field for would be zero, and we prefer to use vxor for it.
2245 if (ValSizeInBytes < ByteSize) return SDValue();
2247 // If the element value is larger than the splat value, check if it consists
2248 // of a repeated bit pattern of size ByteSize.
2249 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2252 // Properly sign extend the value.
2253 int MaskVal = SignExtend32(Value, ByteSize * 8);
2255 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2256 if (MaskVal == 0) return SDValue();
2258 // Finally, if this value fits in a 5 bit sext field, return it
2259 if (SignExtend32<5>(MaskVal) == MaskVal)
2260 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2264 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2265 /// amount, otherwise return -1.
2266 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2267 EVT VT = N->getValueType(0);
2268 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2271 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2273 // Find the first non-undef value in the shuffle mask.
2275 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2278 if (i == 4) return -1; // all undef.
2280 // Otherwise, check to see if the rest of the elements are consecutively
2281 // numbered from this value.
2282 unsigned ShiftAmt = SVOp->getMaskElt(i);
2283 if (ShiftAmt < i) return -1;
2286 // Check the rest of the elements to see if they are consecutive.
2287 for (++i; i != 4; ++i)
2288 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2294 //===----------------------------------------------------------------------===//
2295 // Addressing Mode Selection
2296 //===----------------------------------------------------------------------===//
2298 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2299 /// or 64-bit immediate, and if the value can be accurately represented as a
2300 /// sign extension from a 16-bit value. If so, this returns true and the
2302 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2303 if (!isa<ConstantSDNode>(N))
2306 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2307 if (N->getValueType(0) == MVT::i32)
2308 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2310 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2312 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2313 return isIntS16Immediate(Op.getNode(), Imm);
2317 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2318 /// be represented as an indexed [r+r] operation.
2319 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2321 SelectionDAG &DAG) const {
2322 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2324 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2325 if (Memop->getMemoryVT() == MVT::f64) {
2326 Base = N.getOperand(0);
2327 Index = N.getOperand(1);
2335 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2336 /// can be represented as an indexed [r+r] operation. Returns false if it
2337 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2338 /// non-zero and N can be represented by a base register plus a signed 16-bit
2339 /// displacement, make a more precise judgement by checking (displacement % \p
2340 /// EncodingAlignment).
2341 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
2342 SDValue &Index, SelectionDAG &DAG,
2343 unsigned EncodingAlignment) const {
2345 if (N.getOpcode() == ISD::ADD) {
2346 // Is there any SPE load/store (f64), which can't handle 16bit offset?
2347 // SPE load/store can only handle 8-bit offsets.
2348 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2350 if (isIntS16Immediate(N.getOperand(1), imm) &&
2351 (!EncodingAlignment || !(imm % EncodingAlignment)))
2352 return false; // r+i
2353 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2354 return false; // r+i
2356 Base = N.getOperand(0);
2357 Index = N.getOperand(1);
2359 } else if (N.getOpcode() == ISD::OR) {
2360 if (isIntS16Immediate(N.getOperand(1), imm) &&
2361 (!EncodingAlignment || !(imm % EncodingAlignment)))
2362 return false; // r+i can fold it if we can.
2364 // If this is an or of disjoint bitfields, we can codegen this as an add
2365 // (for better address arithmetic) if the LHS and RHS of the OR are provably
2367 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2369 if (LHSKnown.Zero.getBoolValue()) {
2370 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2371 // If all of the bits are known zero on the LHS or RHS, the add won't
2373 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2374 Base = N.getOperand(0);
2375 Index = N.getOperand(1);
2384 // If we happen to be doing an i64 load or store into a stack slot that has
2385 // less than a 4-byte alignment, then the frame-index elimination may need to
2386 // use an indexed load or store instruction (because the offset may not be a
2387 // multiple of 4). The extra register needed to hold the offset comes from the
2388 // register scavenger, and it is possible that the scavenger will need to use
2389 // an emergency spill slot. As a result, we need to make sure that a spill slot
2390 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2392 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2393 // FIXME: This does not handle the LWA case.
2397 // NOTE: We'll exclude negative FIs here, which come from argument
2398 // lowering, because there are no known test cases triggering this problem
2399 // using packed structures (or similar). We can remove this exclusion if
2400 // we find such a test case. The reason why this is so test-case driven is
2401 // because this entire 'fixup' is only to prevent crashes (from the
2402 // register scavenger) on not-really-valid inputs. For example, if we have:
2404 // %b = bitcast i1* %a to i64*
2405 // store i64* a, i64 b
2406 // then the store should really be marked as 'align 1', but is not. If it
2407 // were marked as 'align 1' then the indexed form would have been
2408 // instruction-selected initially, and the problem this 'fixup' is preventing
2409 // won't happen regardless.
2413 MachineFunction &MF = DAG.getMachineFunction();
2414 MachineFrameInfo &MFI = MF.getFrameInfo();
2416 unsigned Align = MFI.getObjectAlignment(FrameIdx);
2420 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2421 FuncInfo->setHasNonRISpills();
2424 /// Returns true if the address N can be represented by a base register plus
2425 /// a signed 16-bit displacement [r+imm], and if it is not better
2426 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
2427 /// displacements that are multiples of that value.
2428 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
2431 unsigned EncodingAlignment) const {
2432 // FIXME dl should come from parent load or store, not from address
2434 // If this can be more profitably realized as r+r, fail.
2435 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2438 if (N.getOpcode() == ISD::ADD) {
2440 if (isIntS16Immediate(N.getOperand(1), imm) &&
2441 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2442 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2443 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2444 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2445 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2447 Base = N.getOperand(0);
2449 return true; // [r+i]
2450 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2451 // Match LOAD (ADD (X, Lo(G))).
2452 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2453 && "Cannot handle constant offsets yet!");
2454 Disp = N.getOperand(1).getOperand(0); // The global address.
2455 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2456 Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2457 Disp.getOpcode() == ISD::TargetConstantPool ||
2458 Disp.getOpcode() == ISD::TargetJumpTable);
2459 Base = N.getOperand(0);
2460 return true; // [&g+r]
2462 } else if (N.getOpcode() == ISD::OR) {
2464 if (isIntS16Immediate(N.getOperand(1), imm) &&
2465 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
2466 // If this is an or of disjoint bitfields, we can codegen this as an add
2467 // (for better address arithmetic) if the LHS and RHS of the OR are
2468 // provably disjoint.
2469 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2471 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2472 // If all of the bits are known zero on the LHS or RHS, the add won't
2474 if (FrameIndexSDNode *FI =
2475 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2476 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2477 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2479 Base = N.getOperand(0);
2481 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2485 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2486 // Loading from a constant address.
2488 // If this address fits entirely in a 16-bit sext immediate field, codegen
2491 if (isIntS16Immediate(CN, Imm) &&
2492 (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) {
2493 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2494 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2495 CN->getValueType(0));
2499 // Handle 32-bit sext immediates with LIS + addr mode.
2500 if ((CN->getValueType(0) == MVT::i32 ||
2501 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2502 (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) {
2503 int Addr = (int)CN->getZExtValue();
2505 // Otherwise, break this down into an LIS + disp.
2506 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2508 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2510 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2511 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2516 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2517 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2518 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2519 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2522 return true; // [r+0]
2525 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2526 /// represented as an indexed [r+r] operation.
2527 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2529 SelectionDAG &DAG) const {
2530 // Check to see if we can easily represent this as an [r+r] address. This
2531 // will fail if it thinks that the address is more profitably represented as
2532 // reg+imm, e.g. where imm = 0.
2533 if (SelectAddressRegReg(N, Base, Index, DAG))
2536 // If the address is the result of an add, we will utilize the fact that the
2537 // address calculation includes an implicit add. However, we can reduce
2538 // register pressure if we do not materialize a constant just for use as the
2539 // index register. We only get rid of the add if it is not an add of a
2540 // value and a 16-bit signed constant and both have a single use.
2542 if (N.getOpcode() == ISD::ADD &&
2543 (!isIntS16Immediate(N.getOperand(1), imm) ||
2544 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2545 Base = N.getOperand(0);
2546 Index = N.getOperand(1);
2550 // Otherwise, do it the hard way, using R0 as the base register.
2551 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2557 /// Returns true if we should use a direct load into vector instruction
2558 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2559 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2561 // If there are any other uses other than scalar to vector, then we should
2562 // keep it as a scalar load -> direct move pattern to prevent multiple
2564 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2568 EVT MemVT = LD->getMemoryVT();
2569 if (!MemVT.isSimple())
2571 switch(MemVT.getSimpleVT().SimpleTy) {
2575 if (!ST.hasP8Vector())
2580 if (!ST.hasP9Vector())
2587 SDValue LoadedVal(N, 0);
2588 if (!LoadedVal.hasOneUse())
2591 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2593 if (UI.getUse().get().getResNo() == 0 &&
2594 UI->getOpcode() != ISD::SCALAR_TO_VECTOR)
2600 /// getPreIndexedAddressParts - returns true by value, base pointer and
2601 /// offset pointer and addressing mode by reference if the node's address
2602 /// can be legally represented as pre-indexed load / store address.
2603 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2605 ISD::MemIndexedMode &AM,
2606 SelectionDAG &DAG) const {
2607 if (DisablePPCPreinc) return false;
2613 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2614 Ptr = LD->getBasePtr();
2615 VT = LD->getMemoryVT();
2616 Alignment = LD->getAlignment();
2617 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2618 Ptr = ST->getBasePtr();
2619 VT = ST->getMemoryVT();
2620 Alignment = ST->getAlignment();
2625 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2626 // instructions because we can fold these into a more efficient instruction
2627 // instead, (such as LXSD).
2628 if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2632 // PowerPC doesn't have preinc load/store instructions for vectors (except
2633 // for QPX, which does have preinc r+r forms).
2634 if (VT.isVector()) {
2635 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2637 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2643 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2644 // Common code will reject creating a pre-inc form if the base pointer
2645 // is a frame index, or if N is a store and the base pointer is either
2646 // the same as or a predecessor of the value being stored. Check for
2647 // those situations here, and try with swapped Base/Offset instead.
2650 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2653 SDValue Val = cast<StoreSDNode>(N)->getValue();
2654 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2659 std::swap(Base, Offset);
2665 // LDU/STU can only handle immediates that are a multiple of 4.
2666 if (VT != MVT::i64) {
2667 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
2670 // LDU/STU need an address with at least 4-byte alignment.
2674 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
2678 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2679 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
2680 // sext i32 to i64 when addr mode is r+i.
2681 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2682 LD->getExtensionType() == ISD::SEXTLOAD &&
2683 isa<ConstantSDNode>(Offset))
2691 //===----------------------------------------------------------------------===//
2692 // LowerOperation implementation
2693 //===----------------------------------------------------------------------===//
2695 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2696 /// and LoOpFlags to the target MO flags.
2697 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2698 unsigned &HiOpFlags, unsigned &LoOpFlags,
2699 const GlobalValue *GV = nullptr) {
2700 HiOpFlags = PPCII::MO_HA;
2701 LoOpFlags = PPCII::MO_LO;
2703 // Don't use the pic base if not in PIC relocation model.
2705 HiOpFlags |= PPCII::MO_PIC_FLAG;
2706 LoOpFlags |= PPCII::MO_PIC_FLAG;
2709 // If this is a reference to a global value that requires a non-lazy-ptr, make
2710 // sure that instruction lowering adds it.
2711 if (GV && Subtarget.hasLazyResolverStub(GV)) {
2712 HiOpFlags |= PPCII::MO_NLP_FLAG;
2713 LoOpFlags |= PPCII::MO_NLP_FLAG;
2715 if (GV->hasHiddenVisibility()) {
2716 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2717 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
2722 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2723 SelectionDAG &DAG) {
2725 EVT PtrVT = HiPart.getValueType();
2726 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2728 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2729 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2731 // With PIC, the first instruction is actually "GR+hi(&G)".
2733 Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2734 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2736 // Generate non-pic code that has direct accesses to the constant pool.
2737 // The address of the global is just (hi(&g)+lo(&g)).
2738 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2741 static void setUsesTOCBasePtr(MachineFunction &MF) {
2742 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2743 FuncInfo->setUsesTOCBasePtr();
2746 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2747 setUsesTOCBasePtr(DAG.getMachineFunction());
2750 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2752 const bool Is64Bit = Subtarget.isPPC64();
2753 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2754 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2755 : Subtarget.isAIXABI()
2756 ? DAG.getRegister(PPC::R2, VT)
2757 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2758 SDValue Ops[] = { GA, Reg };
2759 return DAG.getMemIntrinsicNode(
2760 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2761 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0,
2762 MachineMemOperand::MOLoad);
2765 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2766 SelectionDAG &DAG) const {
2767 EVT PtrVT = Op.getValueType();
2768 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2769 const Constant *C = CP->getConstVal();
2771 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2772 // The actual address of the GlobalValue is stored in the TOC.
2773 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2774 setUsesTOCBasePtr(DAG);
2775 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
2776 return getTOCEntry(DAG, SDLoc(CP), GA);
2779 unsigned MOHiFlag, MOLoFlag;
2780 bool IsPIC = isPositionIndependent();
2781 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2783 if (IsPIC && Subtarget.isSVR4ABI()) {
2784 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(),
2785 PPCII::MO_PIC_FLAG);
2786 return getTOCEntry(DAG, SDLoc(CP), GA);
2790 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
2792 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
2793 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2796 // For 64-bit PowerPC, prefer the more compact relative encodings.
2797 // This trades 32 bits per jump table entry for one or two instructions
2798 // on the jump site.
2799 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2800 if (isJumpTableRelative())
2801 return MachineJumpTableInfo::EK_LabelDifference32;
2803 return TargetLowering::getJumpTableEncoding();
2806 bool PPCTargetLowering::isJumpTableRelative() const {
2807 if (UseAbsoluteJumpTables)
2809 if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2811 return TargetLowering::isJumpTableRelative();
2814 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2815 SelectionDAG &DAG) const {
2816 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2817 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2819 switch (getTargetMachine().getCodeModel()) {
2820 case CodeModel::Small:
2821 case CodeModel::Medium:
2822 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2824 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2825 getPointerTy(DAG.getDataLayout()));
2830 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2832 MCContext &Ctx) const {
2833 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2834 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2836 switch (getTargetMachine().getCodeModel()) {
2837 case CodeModel::Small:
2838 case CodeModel::Medium:
2839 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2841 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2845 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2846 EVT PtrVT = Op.getValueType();
2847 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2849 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2850 // The actual address of the GlobalValue is stored in the TOC.
2851 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2852 setUsesTOCBasePtr(DAG);
2853 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2854 return getTOCEntry(DAG, SDLoc(JT), GA);
2857 unsigned MOHiFlag, MOLoFlag;
2858 bool IsPIC = isPositionIndependent();
2859 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2861 if (IsPIC && Subtarget.isSVR4ABI()) {
2862 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2863 PPCII::MO_PIC_FLAG);
2864 return getTOCEntry(DAG, SDLoc(GA), GA);
2867 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
2868 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
2869 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
2872 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
2873 SelectionDAG &DAG) const {
2874 EVT PtrVT = Op.getValueType();
2875 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
2876 const BlockAddress *BA = BASDN->getBlockAddress();
2878 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2879 // The actual BlockAddress is stored in the TOC.
2880 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2881 setUsesTOCBasePtr(DAG);
2882 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
2883 return getTOCEntry(DAG, SDLoc(BASDN), GA);
2886 // 32-bit position-independent ELF stores the BlockAddress in the .got.
2887 if (Subtarget.is32BitELFABI() && isPositionIndependent())
2890 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
2892 unsigned MOHiFlag, MOLoFlag;
2893 bool IsPIC = isPositionIndependent();
2894 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2895 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
2896 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
2897 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
2900 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2901 SelectionDAG &DAG) const {
2902 // FIXME: TLS addresses currently use medium model code sequences,
2903 // which is the most useful form. Eventually support for small and
2904 // large models could be added if users need it, at the cost of
2905 // additional complexity.
2906 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2907 if (DAG.getTarget().useEmulatedTLS())
2908 return LowerToTLSEmulatedModel(GA, DAG);
2911 const GlobalValue *GV = GA->getGlobal();
2912 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2913 bool is64bit = Subtarget.isPPC64();
2914 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2915 PICLevel::Level picLevel = M->getPICLevel();
2917 const TargetMachine &TM = getTargetMachine();
2918 TLSModel::Model Model = TM.getTLSModel(GV);
2920 if (Model == TLSModel::LocalExec) {
2921 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2922 PPCII::MO_TPREL_HA);
2923 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2924 PPCII::MO_TPREL_LO);
2925 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
2926 : DAG.getRegister(PPC::R2, MVT::i32);
2928 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
2929 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
2932 if (Model == TLSModel::InitialExec) {
2933 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2934 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2938 setUsesTOCBasePtr(DAG);
2939 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2940 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
2941 PtrVT, GOTReg, TGA);
2943 if (!TM.isPositionIndependent())
2944 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
2945 else if (picLevel == PICLevel::SmallPIC)
2946 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2948 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2950 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
2951 PtrVT, TGA, GOTPtr);
2952 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
2955 if (Model == TLSModel::GeneralDynamic) {
2956 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2959 setUsesTOCBasePtr(DAG);
2960 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2961 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
2964 if (picLevel == PICLevel::SmallPIC)
2965 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2967 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2969 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
2973 if (Model == TLSModel::LocalDynamic) {
2974 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
2977 setUsesTOCBasePtr(DAG);
2978 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
2979 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
2982 if (picLevel == PICLevel::SmallPIC)
2983 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
2985 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
2987 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
2988 PtrVT, GOTPtr, TGA, TGA);
2989 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
2990 PtrVT, TLSAddr, TGA);
2991 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
2994 llvm_unreachable("Unknown TLS model!");
2997 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
2998 SelectionDAG &DAG) const {
2999 EVT PtrVT = Op.getValueType();
3000 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3002 const GlobalValue *GV = GSDN->getGlobal();
3004 // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3005 // The actual address of the GlobalValue is stored in the TOC.
3006 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3007 setUsesTOCBasePtr(DAG);
3008 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3009 return getTOCEntry(DAG, DL, GA);
3012 unsigned MOHiFlag, MOLoFlag;
3013 bool IsPIC = isPositionIndependent();
3014 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3016 if (IsPIC && Subtarget.isSVR4ABI()) {
3017 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3019 PPCII::MO_PIC_FLAG);
3020 return getTOCEntry(DAG, DL, GA);
3024 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3026 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3028 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3030 // If the global reference is actually to a non-lazy-pointer, we have to do an
3031 // extra load to get the address of the global.
3032 if (MOHiFlag & PPCII::MO_NLP_FLAG)
3033 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3037 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3038 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3041 if (Op.getValueType() == MVT::v2i64) {
3042 // When the operands themselves are v2i64 values, we need to do something
3043 // special because VSX has no underlying comparison operations for these.
3044 if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3045 // Equality can be handled by casting to the legal type for Altivec
3046 // comparisons, everything else needs to be expanded.
3047 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3048 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3049 DAG.getSetCC(dl, MVT::v4i32,
3050 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3051 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3058 // We handle most of these in the usual way.
3062 // If we're comparing for equality to zero, expose the fact that this is
3063 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3064 // fold the new nodes.
3065 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3068 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3069 // Leave comparisons against 0 and -1 alone for now, since they're usually
3070 // optimized. FIXME: revisit this when we can custom lower all setcc
3072 if (C->isAllOnesValue() || C->isNullValue())
3076 // If we have an integer seteq/setne, turn it into a compare against zero
3077 // by xor'ing the rhs with the lhs, which is faster than setting a
3078 // condition register, reading it back out, and masking the correct bit. The
3079 // normal approach here uses sub to do this instead of xor. Using xor exposes
3080 // the result to other bit-twiddling opportunities.
3081 EVT LHSVT = Op.getOperand(0).getValueType();
3082 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3083 EVT VT = Op.getValueType();
3084 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3086 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3091 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3092 SDNode *Node = Op.getNode();
3093 EVT VT = Node->getValueType(0);
3094 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3095 SDValue InChain = Node->getOperand(0);
3096 SDValue VAListPtr = Node->getOperand(1);
3097 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3100 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3103 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3104 VAListPtr, MachinePointerInfo(SV), MVT::i8);
3105 InChain = GprIndex.getValue(1);
3107 if (VT == MVT::i64) {
3108 // Check if GprIndex is even
3109 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3110 DAG.getConstant(1, dl, MVT::i32));
3111 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3112 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3113 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3114 DAG.getConstant(1, dl, MVT::i32));
3115 // Align GprIndex to be even if it isn't
3116 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3120 // fpr index is 1 byte after gpr
3121 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3122 DAG.getConstant(1, dl, MVT::i32));
3125 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3126 FprPtr, MachinePointerInfo(SV), MVT::i8);
3127 InChain = FprIndex.getValue(1);
3129 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3130 DAG.getConstant(8, dl, MVT::i32));
3132 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3133 DAG.getConstant(4, dl, MVT::i32));
3136 SDValue OverflowArea =
3137 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3138 InChain = OverflowArea.getValue(1);
3140 SDValue RegSaveArea =
3141 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3142 InChain = RegSaveArea.getValue(1);
3144 // select overflow_area if index > 8
3145 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3146 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3148 // adjustment constant gpr_index * 4/8
3149 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3150 VT.isInteger() ? GprIndex : FprIndex,
3151 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3154 // OurReg = RegSaveArea + RegConstant
3155 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3158 // Floating types are 32 bytes into RegSaveArea
3159 if (VT.isFloatingPoint())
3160 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3161 DAG.getConstant(32, dl, MVT::i32));
3163 // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3164 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3165 VT.isInteger() ? GprIndex : FprIndex,
3166 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3169 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3170 VT.isInteger() ? VAListPtr : FprPtr,
3171 MachinePointerInfo(SV), MVT::i8);
3173 // determine if we should load from reg_save_area or overflow_area
3174 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3176 // increase overflow_area by 4/8 if gpr/fpr > 8
3177 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3178 DAG.getConstant(VT.isInteger() ? 4 : 8,
3181 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3184 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3185 MachinePointerInfo(), MVT::i32);
3187 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3190 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3191 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3193 // We have to copy the entire va_list struct:
3194 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3195 return DAG.getMemcpy(Op.getOperand(0), Op,
3196 Op.getOperand(1), Op.getOperand(2),
3197 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true,
3198 false, MachinePointerInfo(), MachinePointerInfo());
3201 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3202 SelectionDAG &DAG) const {
3203 if (Subtarget.isAIXABI())
3204 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3206 return Op.getOperand(0);
3209 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3210 SelectionDAG &DAG) const {
3211 if (Subtarget.isAIXABI())
3212 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3214 SDValue Chain = Op.getOperand(0);
3215 SDValue Trmp = Op.getOperand(1); // trampoline
3216 SDValue FPtr = Op.getOperand(2); // nested function
3217 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3220 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3221 bool isPPC64 = (PtrVT == MVT::i64);
3222 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3224 TargetLowering::ArgListTy Args;
3225 TargetLowering::ArgListEntry Entry;
3227 Entry.Ty = IntPtrTy;
3228 Entry.Node = Trmp; Args.push_back(Entry);
3230 // TrampSize == (isPPC64 ? 48 : 40);
3231 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3232 isPPC64 ? MVT::i64 : MVT::i32);
3233 Args.push_back(Entry);
3235 Entry.Node = FPtr; Args.push_back(Entry);
3236 Entry.Node = Nest; Args.push_back(Entry);
3238 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3239 TargetLowering::CallLoweringInfo CLI(DAG);
3240 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3241 CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3242 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3244 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3245 return CallResult.second;
3248 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3249 MachineFunction &MF = DAG.getMachineFunction();
3250 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3251 EVT PtrVT = getPointerTy(MF.getDataLayout());
3255 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
3256 // vastart just stores the address of the VarArgsFrameIndex slot into the
3257 // memory location argument.
3258 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3259 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3260 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3261 MachinePointerInfo(SV));
3264 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3265 // We suppose the given va_list is already allocated.
3268 // char gpr; /* index into the array of 8 GPRs
3269 // * stored in the register save area
3270 // * gpr=0 corresponds to r3,
3271 // * gpr=1 to r4, etc.
3273 // char fpr; /* index into the array of 8 FPRs
3274 // * stored in the register save area
3275 // * fpr=0 corresponds to f1,
3276 // * fpr=1 to f2, etc.
3278 // char *overflow_arg_area;
3279 // /* location on stack that holds
3280 // * the next overflow argument
3282 // char *reg_save_area;
3283 // /* where r3:r10 and f1:f8 (if saved)
3288 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3289 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3290 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3292 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3295 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3296 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3298 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3299 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3301 uint64_t FPROffset = 1;
3302 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3304 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3306 // Store first byte : number of int regs
3307 SDValue firstStore =
3308 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3309 MachinePointerInfo(SV), MVT::i8);
3310 uint64_t nextOffset = FPROffset;
3311 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3314 // Store second byte : number of float regs
3315 SDValue secondStore =
3316 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3317 MachinePointerInfo(SV, nextOffset), MVT::i8);
3318 nextOffset += StackOffset;
3319 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3321 // Store second word : arguments given on stack
3322 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3323 MachinePointerInfo(SV, nextOffset));
3324 nextOffset += FrameOffset;
3325 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3327 // Store third word : arguments given in registers
3328 return DAG.getStore(thirdStore, dl, FR, nextPtr,
3329 MachinePointerInfo(SV, nextOffset));
3332 /// FPR - The set of FP registers that should be allocated for arguments
3333 /// on Darwin and AIX.
3334 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3335 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3336 PPC::F11, PPC::F12, PPC::F13};
3338 /// QFPR - The set of QPX registers that should be allocated for arguments.
3339 static const MCPhysReg QFPR[] = {
3340 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
3341 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3343 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3345 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3346 unsigned PtrByteSize) {
3347 unsigned ArgSize = ArgVT.getStoreSize();
3348 if (Flags.isByVal())
3349 ArgSize = Flags.getByValSize();
3351 // Round up to multiples of the pointer size, except for array members,
3352 // which are always packed.
3353 if (!Flags.isInConsecutiveRegs())
3354 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3359 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3361 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3362 ISD::ArgFlagsTy Flags,
3363 unsigned PtrByteSize) {
3364 unsigned Align = PtrByteSize;
3366 // Altivec parameters are padded to a 16 byte boundary.
3367 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3368 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3369 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3370 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3372 // QPX vector types stored in double-precision are padded to a 32 byte
3374 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3377 // ByVal parameters are aligned as requested.
3378 if (Flags.isByVal()) {
3379 unsigned BVAlign = Flags.getByValAlign();
3380 if (BVAlign > PtrByteSize) {
3381 if (BVAlign % PtrByteSize != 0)
3383 "ByVal alignment is not a multiple of the pointer size");
3389 // Array members are always packed to their original alignment.
3390 if (Flags.isInConsecutiveRegs()) {
3391 // If the array member was split into multiple registers, the first
3392 // needs to be aligned to the size of the full type. (Except for
3393 // ppcf128, which is only aligned as its f64 components.)
3394 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3395 Align = OrigVT.getStoreSize();
3397 Align = ArgVT.getStoreSize();
3403 /// CalculateStackSlotUsed - Return whether this argument will use its
3404 /// stack slot (instead of being passed in registers). ArgOffset,
3405 /// AvailableFPRs, and AvailableVRs must hold the current argument
3406 /// position, and will be updated to account for this argument.
3407 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3408 ISD::ArgFlagsTy Flags,
3409 unsigned PtrByteSize,
3410 unsigned LinkageSize,
3411 unsigned ParamAreaSize,
3412 unsigned &ArgOffset,
3413 unsigned &AvailableFPRs,
3414 unsigned &AvailableVRs, bool HasQPX) {
3415 bool UseMemory = false;
3417 // Respect alignment of argument on the stack.
3419 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3420 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3421 // If there's no space left in the argument save area, we must
3422 // use memory (this check also catches zero-sized arguments).
3423 if (ArgOffset >= LinkageSize + ParamAreaSize)
3426 // Allocate argument on the stack.
3427 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3428 if (Flags.isInConsecutiveRegsLast())
3429 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3430 // If we overran the argument save area, we must use memory
3431 // (this check catches arguments passed partially in memory)
3432 if (ArgOffset > LinkageSize + ParamAreaSize)
3435 // However, if the argument is actually passed in an FPR or a VR,
3436 // we don't use memory after all.
3437 if (!Flags.isByVal()) {
3438 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3439 // QPX registers overlap with the scalar FP registers.
3440 (HasQPX && (ArgVT == MVT::v4f32 ||
3441 ArgVT == MVT::v4f64 ||
3442 ArgVT == MVT::v4i1)))
3443 if (AvailableFPRs > 0) {
3447 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3448 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3449 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3450 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3451 if (AvailableVRs > 0) {
3460 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3461 /// ensure minimum alignment required for target.
3462 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3463 unsigned NumBytes) {
3464 unsigned TargetAlign = Lowering->getStackAlignment();
3465 unsigned AlignMask = TargetAlign - 1;
3466 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
3470 SDValue PPCTargetLowering::LowerFormalArguments(
3471 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3472 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3473 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3474 if (Subtarget.isAIXABI())
3475 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3477 if (Subtarget.is64BitELFABI())
3478 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3480 if (Subtarget.is32BitELFABI())
3481 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3484 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3488 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3489 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3490 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3491 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3493 // 32-bit SVR4 ABI Stack Frame Layout:
3494 // +-----------------------------------+
3495 // +--> | Back chain |
3496 // | +-----------------------------------+
3497 // | | Floating-point register save area |
3498 // | +-----------------------------------+
3499 // | | General register save area |
3500 // | +-----------------------------------+
3501 // | | CR save word |
3502 // | +-----------------------------------+
3503 // | | VRSAVE save word |
3504 // | +-----------------------------------+
3505 // | | Alignment padding |
3506 // | +-----------------------------------+
3507 // | | Vector register save area |
3508 // | +-----------------------------------+
3509 // | | Local variable space |
3510 // | +-----------------------------------+
3511 // | | Parameter list area |
3512 // | +-----------------------------------+
3513 // | | LR save word |
3514 // | +-----------------------------------+
3515 // SP--> +--- | Back chain |
3516 // +-----------------------------------+
3519 // System V Application Binary Interface PowerPC Processor Supplement
3520 // AltiVec Technology Programming Interface Manual
3522 MachineFunction &MF = DAG.getMachineFunction();
3523 MachineFrameInfo &MFI = MF.getFrameInfo();
3524 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3526 EVT PtrVT = getPointerTy(MF.getDataLayout());
3527 // Potential tail calls could cause overwriting of argument stack slots.
3528 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3529 (CallConv == CallingConv::Fast));
3530 unsigned PtrByteSize = 4;
3532 // Assign locations to all of the incoming arguments.
3533 SmallVector<CCValAssign, 16> ArgLocs;
3534 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3537 // Reserve space for the linkage area on the stack.
3538 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3539 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3541 CCInfo.PreAnalyzeFormalArguments(Ins);
3543 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3544 CCInfo.clearWasPPCF128();
3546 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3547 CCValAssign &VA = ArgLocs[i];
3549 // Arguments stored in registers.
3550 if (VA.isRegLoc()) {
3551 const TargetRegisterClass *RC;
3552 EVT ValVT = VA.getValVT();
3554 switch (ValVT.getSimpleVT().SimpleTy) {
3556 llvm_unreachable("ValVT not supported by formal arguments Lowering");
3559 RC = &PPC::GPRCRegClass;
3562 if (Subtarget.hasP8Vector())
3563 RC = &PPC::VSSRCRegClass;
3564 else if (Subtarget.hasSPE())
3565 RC = &PPC::GPRCRegClass;
3567 RC = &PPC::F4RCRegClass;
3570 if (Subtarget.hasVSX())
3571 RC = &PPC::VSFRCRegClass;
3572 else if (Subtarget.hasSPE())
3573 // SPE passes doubles in GPR pairs.
3574 RC = &PPC::GPRCRegClass;
3576 RC = &PPC::F8RCRegClass;
3581 RC = &PPC::VRRCRegClass;
3584 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3588 RC = &PPC::VRRCRegClass;
3591 RC = &PPC::QFRCRegClass;
3594 RC = &PPC::QBRCRegClass;
3599 // Transform the arguments stored in physical registers into
3601 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3602 assert(i + 1 < e && "No second half of double precision argument");
3603 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3604 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3605 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3606 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3607 if (!Subtarget.isLittleEndian())
3608 std::swap (ArgValueLo, ArgValueHi);
3609 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3612 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3613 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3614 ValVT == MVT::i1 ? MVT::i32 : ValVT);
3615 if (ValVT == MVT::i1)
3616 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3619 InVals.push_back(ArgValue);
3621 // Argument stored in memory.
3622 assert(VA.isMemLoc());
3624 // Get the extended size of the argument type in stack
3625 unsigned ArgSize = VA.getLocVT().getStoreSize();
3626 // Get the actual size of the argument type
3627 unsigned ObjSize = VA.getValVT().getStoreSize();
3628 unsigned ArgOffset = VA.getLocMemOffset();
3629 // Stack objects in PPC32 are right justified.
3630 ArgOffset += ArgSize - ObjSize;
3631 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3633 // Create load nodes to retrieve arguments from the stack.
3634 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3636 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3640 // Assign locations to all of the incoming aggregate by value arguments.
3641 // Aggregates passed by value are stored in the local variable space of the
3642 // caller's stack frame, right above the parameter list area.
3643 SmallVector<CCValAssign, 16> ByValArgLocs;
3644 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3645 ByValArgLocs, *DAG.getContext());
3647 // Reserve stack space for the allocations in CCInfo.
3648 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3650 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3652 // Area that is at least reserved in the caller of this function.
3653 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3654 MinReservedArea = std::max(MinReservedArea, LinkageSize);
3656 // Set the size that is at least reserved in caller of this function. Tail
3657 // call optimized function's reserved stack space needs to be aligned so that
3658 // taking the difference between two stack areas will result in an aligned
3661 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3662 FuncInfo->setMinReservedArea(MinReservedArea);
3664 SmallVector<SDValue, 8> MemOps;
3666 // If the function takes variable number of arguments, make a frame index for
3667 // the start of the first vararg value... for expansion of llvm.va_start.
3669 static const MCPhysReg GPArgRegs[] = {
3670 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3671 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3673 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3675 static const MCPhysReg FPArgRegs[] = {
3676 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3679 unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3681 if (useSoftFloat() || hasSPE())
3684 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3685 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3687 // Make room for NumGPArgRegs and NumFPArgRegs.
3688 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3689 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3691 FuncInfo->setVarArgsStackOffset(
3692 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3693 CCInfo.getNextStackOffset(), true));
3695 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false));
3696 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3698 // The fixed integer arguments of a variadic function are stored to the
3699 // VarArgsFrameIndex on the stack so that they may be loaded by
3700 // dereferencing the result of va_next.
3701 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3702 // Get an existing live-in vreg, or add a new one.
3703 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3705 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3707 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3709 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3710 MemOps.push_back(Store);
3711 // Increment the address by four for the next argument to store
3712 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3713 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3716 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3718 // The double arguments are stored to the VarArgsFrameIndex
3720 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3721 // Get an existing live-in vreg, or add a new one.
3722 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3724 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3726 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3728 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3729 MemOps.push_back(Store);
3730 // Increment the address by eight for the next argument to store
3731 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3733 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3737 if (!MemOps.empty())
3738 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3743 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3744 // value to MVT::i64 and then truncate to the correct register size.
3745 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3746 EVT ObjectVT, SelectionDAG &DAG,
3748 const SDLoc &dl) const {
3750 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3751 DAG.getValueType(ObjectVT));
3752 else if (Flags.isZExt())
3753 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3754 DAG.getValueType(ObjectVT));
3756 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3759 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3760 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3761 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3762 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3763 // TODO: add description of PPC stack frame format, or at least some docs.
3765 bool isELFv2ABI = Subtarget.isELFv2ABI();
3766 bool isLittleEndian = Subtarget.isLittleEndian();
3767 MachineFunction &MF = DAG.getMachineFunction();
3768 MachineFrameInfo &MFI = MF.getFrameInfo();
3769 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3771 assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3772 "fastcc not supported on varargs functions");
3774 EVT PtrVT = getPointerTy(MF.getDataLayout());
3775 // Potential tail calls could cause overwriting of argument stack slots.
3776 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3777 (CallConv == CallingConv::Fast));
3778 unsigned PtrByteSize = 8;
3779 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3781 static const MCPhysReg GPR[] = {
3782 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3783 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3785 static const MCPhysReg VR[] = {
3786 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3787 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3790 const unsigned Num_GPR_Regs = array_lengthof(GPR);
3791 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3792 const unsigned Num_VR_Regs = array_lengthof(VR);
3793 const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3795 // Do a first pass over the arguments to determine whether the ABI
3796 // guarantees that our caller has allocated the parameter save area
3797 // on its stack frame. In the ELFv1 ABI, this is always the case;
3798 // in the ELFv2 ABI, it is true if this is a vararg function or if
3799 // any parameter is located in a stack slot.
3801 bool HasParameterArea = !isELFv2ABI || isVarArg;
3802 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3803 unsigned NumBytes = LinkageSize;
3804 unsigned AvailableFPRs = Num_FPR_Regs;
3805 unsigned AvailableVRs = Num_VR_Regs;
3806 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3807 if (Ins[i].Flags.isNest())
3810 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3811 PtrByteSize, LinkageSize, ParamAreaSize,
3812 NumBytes, AvailableFPRs, AvailableVRs,
3813 Subtarget.hasQPX()))
3814 HasParameterArea = true;
3817 // Add DAG nodes to load the arguments or copy them out of registers. On
3818 // entry to a function on PPC, the arguments start after the linkage area,
3819 // although the first ones are often in registers.
3821 unsigned ArgOffset = LinkageSize;
3822 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3823 unsigned &QFPR_idx = FPR_idx;
3824 SmallVector<SDValue, 8> MemOps;
3825 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3826 unsigned CurArgIdx = 0;
3827 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3829 bool needsLoad = false;
3830 EVT ObjectVT = Ins[ArgNo].VT;
3831 EVT OrigVT = Ins[ArgNo].ArgVT;
3832 unsigned ObjSize = ObjectVT.getStoreSize();
3833 unsigned ArgSize = ObjSize;
3834 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3835 if (Ins[ArgNo].isOrigArg()) {
3836 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3837 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3839 // We re-align the argument offset for each argument, except when using the
3840 // fast calling convention, when we need to make sure we do that only when
3841 // we'll actually use a stack slot.
3842 unsigned CurArgOffset, Align;
3843 auto ComputeArgOffset = [&]() {
3844 /* Respect alignment of argument on the stack. */
3845 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
3846 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3847 CurArgOffset = ArgOffset;
3850 if (CallConv != CallingConv::Fast) {
3853 /* Compute GPR index associated with argument offset. */
3854 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3855 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
3858 // FIXME the codegen can be much improved in some cases.
3859 // We do not have to keep everything in memory.
3860 if (Flags.isByVal()) {
3861 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3863 if (CallConv == CallingConv::Fast)
3866 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
3867 ObjSize = Flags.getByValSize();
3868 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3869 // Empty aggregate parameters do not take up registers. Examples:
3873 // etc. However, we have to provide a place-holder in InVals, so
3874 // pretend we have an 8-byte item at the current address for that
3877 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
3878 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3879 InVals.push_back(FIN);
3883 // Create a stack object covering all stack doublewords occupied
3884 // by the argument. If the argument is (fully or partially) on
3885 // the stack, or if the argument is fully in registers but the
3886 // caller has allocated the parameter save anyway, we can refer
3887 // directly to the caller's stack frame. Otherwise, create a
3888 // local copy in our own frame.
3890 if (HasParameterArea ||
3891 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3892 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
3894 FI = MFI.CreateStackObject(ArgSize, Align, false);
3895 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3897 // Handle aggregates smaller than 8 bytes.
3898 if (ObjSize < PtrByteSize) {
3899 // The value of the object is its address, which differs from the
3900 // address of the enclosing doubleword on big-endian systems.
3902 if (!isLittleEndian) {
3903 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
3904 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
3906 InVals.push_back(Arg);
3908 if (GPR_idx != Num_GPR_Regs) {
3909 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3910 FuncInfo->addLiveInAttr(VReg, Flags);
3911 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3914 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3915 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3916 (ObjSize == 2 ? MVT::i16 : MVT::i32));
3917 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
3918 MachinePointerInfo(&*FuncArg), ObjType);
3920 // For sizes that don't fit a truncating store (3, 5, 6, 7),
3921 // store the whole register as-is to the parameter save area
3923 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3924 MachinePointerInfo(&*FuncArg));
3927 MemOps.push_back(Store);
3929 // Whether we copied from a register or not, advance the offset
3930 // into the parameter save area by a full doubleword.
3931 ArgOffset += PtrByteSize;
3935 // The value of the object is its address, which is the address of
3936 // its first stack doubleword.
3937 InVals.push_back(FIN);
3939 // Store whatever pieces of the object are in registers to memory.
3940 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3941 if (GPR_idx == Num_GPR_Regs)
3944 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3945 FuncInfo->addLiveInAttr(VReg, Flags);
3946 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3949 SDValue Off = DAG.getConstant(j, dl, PtrVT);
3950 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
3952 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
3953 MachinePointerInfo(&*FuncArg, j));
3954 MemOps.push_back(Store);
3957 ArgOffset += ArgSize;
3961 switch (ObjectVT.getSimpleVT().SimpleTy) {
3962 default: llvm_unreachable("Unhandled argument type!");
3966 if (Flags.isNest()) {
3967 // The 'nest' parameter, if any, is passed in R11.
3968 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3969 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3971 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3972 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3977 // These can be scalar arguments or elements of an integer array type
3978 // passed directly. Clang may use those instead of "byval" aggregate
3979 // types to avoid forcing arguments to memory unnecessarily.
3980 if (GPR_idx != Num_GPR_Regs) {
3981 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3982 FuncInfo->addLiveInAttr(VReg, Flags);
3983 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
3985 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
3986 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3987 // value to MVT::i64 and then truncate to the correct register size.
3988 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3990 if (CallConv == CallingConv::Fast)
3994 ArgSize = PtrByteSize;
3996 if (CallConv != CallingConv::Fast || needsLoad)
4002 // These can be scalar arguments or elements of a float array type
4003 // passed directly. The latter are used to implement ELFv2 homogenous
4004 // float aggregates.
4005 if (FPR_idx != Num_FPR_Regs) {
4008 if (ObjectVT == MVT::f32)
4009 VReg = MF.addLiveIn(FPR[FPR_idx],
4010 Subtarget.hasP8Vector()
4011 ? &PPC::VSSRCRegClass
4012 : &PPC::F4RCRegClass);
4014 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4015 ? &PPC::VSFRCRegClass
4016 : &PPC::F8RCRegClass);
4018 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4020 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4021 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4022 // once we support fp <-> gpr moves.
4024 // This can only ever happen in the presence of f32 array types,
4025 // since otherwise we never run out of FPRs before running out
4027 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4028 FuncInfo->addLiveInAttr(VReg, Flags);
4029 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4031 if (ObjectVT == MVT::f32) {
4032 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4033 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4034 DAG.getConstant(32, dl, MVT::i32));
4035 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4038 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4040 if (CallConv == CallingConv::Fast)
4046 // When passing an array of floats, the array occupies consecutive
4047 // space in the argument area; only round up to the next doubleword
4048 // at the end of the array. Otherwise, each float takes 8 bytes.
4049 if (CallConv != CallingConv::Fast || needsLoad) {
4050 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4051 ArgOffset += ArgSize;
4052 if (Flags.isInConsecutiveRegsLast())
4053 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4064 if (!Subtarget.hasQPX()) {
4065 // These can be scalar arguments or elements of a vector array type
4066 // passed directly. The latter are used to implement ELFv2 homogenous
4067 // vector aggregates.
4068 if (VR_idx != Num_VR_Regs) {
4069 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4070 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4073 if (CallConv == CallingConv::Fast)
4077 if (CallConv != CallingConv::Fast || needsLoad)
4082 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4083 "Invalid QPX parameter type");
4088 // QPX vectors are treated like their scalar floating-point subregisters
4089 // (except that they're larger).
4090 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4091 if (QFPR_idx != Num_QFPR_Regs) {
4092 const TargetRegisterClass *RC;
4093 switch (ObjectVT.getSimpleVT().SimpleTy) {
4094 case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4095 case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4096 default: RC = &PPC::QBRCRegClass; break;
4099 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4100 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4103 if (CallConv == CallingConv::Fast)
4107 if (CallConv != CallingConv::Fast || needsLoad)
4112 // We need to load the argument to a virtual register if we determined
4113 // above that we ran out of physical registers of the appropriate type.
4115 if (ObjSize < ArgSize && !isLittleEndian)
4116 CurArgOffset += ArgSize - ObjSize;
4117 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4118 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4119 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4122 InVals.push_back(ArgVal);
4125 // Area that is at least reserved in the caller of this function.
4126 unsigned MinReservedArea;
4127 if (HasParameterArea)
4128 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4130 MinReservedArea = LinkageSize;
4132 // Set the size that is at least reserved in caller of this function. Tail
4133 // call optimized functions' reserved stack space needs to be aligned so that
4134 // taking the difference between two stack areas will result in an aligned
4137 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4138 FuncInfo->setMinReservedArea(MinReservedArea);
4140 // If the function takes variable number of arguments, make a frame index for
4141 // the start of the first vararg value... for expansion of llvm.va_start.
4143 int Depth = ArgOffset;
4145 FuncInfo->setVarArgsFrameIndex(
4146 MFI.CreateFixedObject(PtrByteSize, Depth, true));
4147 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4149 // If this function is vararg, store any remaining integer argument regs
4150 // to their spots on the stack so that they may be loaded by dereferencing
4151 // the result of va_next.
4152 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4153 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4154 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4155 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4157 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4158 MemOps.push_back(Store);
4159 // Increment the address by four for the next argument to store
4160 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4161 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4165 if (!MemOps.empty())
4166 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4171 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4172 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4173 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4174 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4175 // TODO: add description of PPC stack frame format, or at least some docs.
4177 MachineFunction &MF = DAG.getMachineFunction();
4178 MachineFrameInfo &MFI = MF.getFrameInfo();
4179 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4181 EVT PtrVT = getPointerTy(MF.getDataLayout());
4182 bool isPPC64 = PtrVT == MVT::i64;
4183 // Potential tail calls could cause overwriting of argument stack slots.
4184 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4185 (CallConv == CallingConv::Fast));
4186 unsigned PtrByteSize = isPPC64 ? 8 : 4;
4187 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4188 unsigned ArgOffset = LinkageSize;
4189 // Area that is at least reserved in caller of this function.
4190 unsigned MinReservedArea = ArgOffset;
4192 static const MCPhysReg GPR_32[] = { // 32-bit registers.
4193 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4194 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4196 static const MCPhysReg GPR_64[] = { // 64-bit registers.
4197 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4198 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4200 static const MCPhysReg VR[] = {
4201 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4202 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4205 const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4206 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4207 const unsigned Num_VR_Regs = array_lengthof( VR);
4209 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4211 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4213 // In 32-bit non-varargs functions, the stack space for vectors is after the
4214 // stack space for non-vectors. We do not use this space unless we have
4215 // too many vectors to fit in registers, something that only occurs in
4216 // constructed examples:), but we have to walk the arglist to figure
4217 // that out...for the pathological case, compute VecArgOffset as the
4218 // start of the vector parameter area. Computing VecArgOffset is the
4219 // entire point of the following loop.
4220 unsigned VecArgOffset = ArgOffset;
4221 if (!isVarArg && !isPPC64) {
4222 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4224 EVT ObjectVT = Ins[ArgNo].VT;
4225 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4227 if (Flags.isByVal()) {
4228 // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4229 unsigned ObjSize = Flags.getByValSize();
4231 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4232 VecArgOffset += ArgSize;
4236 switch(ObjectVT.getSimpleVT().SimpleTy) {
4237 default: llvm_unreachable("Unhandled argument type!");
4243 case MVT::i64: // PPC64
4245 // FIXME: We are guaranteed to be !isPPC64 at this point.
4246 // Does MVT::i64 apply?
4253 // Nothing to do, we're only looking at Nonvector args here.
4258 // We've found where the vector parameter area in memory is. Skip the
4259 // first 12 parameters; these don't use that memory.
4260 VecArgOffset = ((VecArgOffset+15)/16)*16;
4261 VecArgOffset += 12*16;
4263 // Add DAG nodes to load the arguments or copy them out of registers. On
4264 // entry to a function on PPC, the arguments start after the linkage area,
4265 // although the first ones are often in registers.
4267 SmallVector<SDValue, 8> MemOps;
4268 unsigned nAltivecParamsAtEnd = 0;
4269 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4270 unsigned CurArgIdx = 0;
4271 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4273 bool needsLoad = false;
4274 EVT ObjectVT = Ins[ArgNo].VT;
4275 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4276 unsigned ArgSize = ObjSize;
4277 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4278 if (Ins[ArgNo].isOrigArg()) {
4279 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4280 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4282 unsigned CurArgOffset = ArgOffset;
4284 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4285 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4286 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4287 if (isVarArg || isPPC64) {
4288 MinReservedArea = ((MinReservedArea+15)/16)*16;
4289 MinReservedArea += CalculateStackSlotSize(ObjectVT,
4292 } else nAltivecParamsAtEnd++;
4294 // Calculate min reserved area.
4295 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4299 // FIXME the codegen can be much improved in some cases.
4300 // We do not have to keep everything in memory.
4301 if (Flags.isByVal()) {
4302 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4304 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4305 ObjSize = Flags.getByValSize();
4306 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4307 // Objects of size 1 and 2 are right justified, everything else is
4308 // left justified. This means the memory address is adjusted forwards.
4309 if (ObjSize==1 || ObjSize==2) {
4310 CurArgOffset = CurArgOffset + (4 - ObjSize);
4312 // The value of the object is its address.
4313 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4314 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4315 InVals.push_back(FIN);
4316 if (ObjSize==1 || ObjSize==2) {
4317 if (GPR_idx != Num_GPR_Regs) {
4320 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4322 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4323 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4324 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4326 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4327 MachinePointerInfo(&*FuncArg), ObjType);
4328 MemOps.push_back(Store);
4332 ArgOffset += PtrByteSize;
4336 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4337 // Store whatever pieces of the object are in registers
4338 // to memory. ArgOffset will be the address of the beginning
4340 if (GPR_idx != Num_GPR_Regs) {
4343 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4345 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4346 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4347 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4348 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4349 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4350 MachinePointerInfo(&*FuncArg, j));
4351 MemOps.push_back(Store);
4353 ArgOffset += PtrByteSize;
4355 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4362 switch (ObjectVT.getSimpleVT().SimpleTy) {
4363 default: llvm_unreachable("Unhandled argument type!");
4367 if (GPR_idx != Num_GPR_Regs) {
4368 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4369 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4371 if (ObjectVT == MVT::i1)
4372 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4377 ArgSize = PtrByteSize;
4379 // All int arguments reserve stack space in the Darwin ABI.
4380 ArgOffset += PtrByteSize;
4384 case MVT::i64: // PPC64
4385 if (GPR_idx != Num_GPR_Regs) {
4386 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4387 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4389 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4390 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4391 // value to MVT::i64 and then truncate to the correct register size.
4392 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4397 ArgSize = PtrByteSize;
4399 // All int arguments reserve stack space in the Darwin ABI.
4405 // Every 4 bytes of argument space consumes one of the GPRs available for
4406 // argument passing.
4407 if (GPR_idx != Num_GPR_Regs) {
4409 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4412 if (FPR_idx != Num_FPR_Regs) {
4415 if (ObjectVT == MVT::f32)
4416 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4418 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4420 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4426 // All FP arguments reserve stack space in the Darwin ABI.
4427 ArgOffset += isPPC64 ? 8 : ObjSize;
4433 // Note that vector arguments in registers don't reserve stack space,
4434 // except in varargs functions.
4435 if (VR_idx != Num_VR_Regs) {
4436 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4437 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4439 while ((ArgOffset % 16) != 0) {
4440 ArgOffset += PtrByteSize;
4441 if (GPR_idx != Num_GPR_Regs)
4445 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4449 if (!isVarArg && !isPPC64) {
4450 // Vectors go after all the nonvectors.
4451 CurArgOffset = VecArgOffset;
4454 // Vectors are aligned.
4455 ArgOffset = ((ArgOffset+15)/16)*16;
4456 CurArgOffset = ArgOffset;
4464 // We need to load the argument to a virtual register if we determined above
4465 // that we ran out of physical registers of the appropriate type.
4467 int FI = MFI.CreateFixedObject(ObjSize,
4468 CurArgOffset + (ArgSize - ObjSize),
4470 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4471 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4474 InVals.push_back(ArgVal);
4477 // Allow for Altivec parameters at the end, if needed.
4478 if (nAltivecParamsAtEnd) {
4479 MinReservedArea = ((MinReservedArea+15)/16)*16;
4480 MinReservedArea += 16*nAltivecParamsAtEnd;
4483 // Area that is at least reserved in the caller of this function.
4484 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4486 // Set the size that is at least reserved in caller of this function. Tail
4487 // call optimized functions' reserved stack space needs to be aligned so that
4488 // taking the difference between two stack areas will result in an aligned
4491 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4492 FuncInfo->setMinReservedArea(MinReservedArea);
4494 // If the function takes variable number of arguments, make a frame index for
4495 // the start of the first vararg value... for expansion of llvm.va_start.
4497 int Depth = ArgOffset;
4499 FuncInfo->setVarArgsFrameIndex(
4500 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4502 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4504 // If this function is vararg, store any remaining integer argument regs
4505 // to their spots on the stack so that they may be loaded by dereferencing
4506 // the result of va_next.
4507 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4511 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4513 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4515 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4517 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4518 MemOps.push_back(Store);
4519 // Increment the address by four for the next argument to store
4520 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4521 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4525 if (!MemOps.empty())
4526 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4531 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4532 /// adjusted to accommodate the arguments for the tailcall.
4533 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4534 unsigned ParamSize) {
4536 if (!isTailCall) return 0;
4538 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4539 unsigned CallerMinReservedArea = FI->getMinReservedArea();
4540 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4541 // Remember only if the new adjustment is bigger.
4542 if (SPDiff < FI->getTailCallSPDelta())
4543 FI->setTailCallSPDelta(SPDiff);
4548 static bool isFunctionGlobalAddress(SDValue Callee);
4551 callsShareTOCBase(const Function *Caller, SDValue Callee,
4552 const TargetMachine &TM) {
4553 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4554 // don't have enough information to determine if the caller and calle share
4555 // the same TOC base, so we have to pessimistically assume they don't for
4557 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4561 const GlobalValue *GV = G->getGlobal();
4562 // The medium and large code models are expected to provide a sufficiently
4563 // large TOC to provide all data addressing needs of a module with a
4564 // single TOC. Since each module will be addressed with a single TOC then we
4565 // only need to check that caller and callee don't cross dso boundaries.
4566 if (CodeModel::Medium == TM.getCodeModel() ||
4567 CodeModel::Large == TM.getCodeModel())
4568 return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV);
4570 // Otherwise we need to ensure callee and caller are in the same section,
4571 // since the linker may allocate multiple TOCs, and we don't know which
4572 // sections will belong to the same TOC base.
4574 if (!GV->isStrongDefinitionForLinker())
4577 // Any explicitly-specified sections and section prefixes must also match.
4578 // Also, if we're using -ffunction-sections, then each function is always in
4579 // a different section (the same is true for COMDAT functions).
4580 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4581 GV->getSection() != Caller->getSection())
4583 if (const auto *F = dyn_cast<Function>(GV)) {
4584 if (F->getSectionPrefix() != Caller->getSectionPrefix())
4588 // If the callee might be interposed, then we can't assume the ultimate call
4589 // target will be in the same section. Even in cases where we can assume that
4590 // interposition won't happen, in any case where the linker might insert a
4591 // stub to allow for interposition, we must generate code as though
4592 // interposition might occur. To understand why this matters, consider a
4593 // situation where: a -> b -> c where the arrows indicate calls. b and c are
4594 // in the same section, but a is in a different module (i.e. has a different
4595 // TOC base pointer). If the linker allows for interposition between b and c,
4596 // then it will generate a stub for the call edge between b and c which will
4597 // save the TOC pointer into the designated stack slot allocated by b. If we
4598 // return true here, and therefore allow a tail call between b and c, that
4599 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base
4600 // pointer into the stack slot allocated by a (where the a -> b stub saved
4601 // a's TOC base pointer). If we're not considering a tail call, but rather,
4602 // whether a nop is needed after the call instruction in b, because the linker
4603 // will insert a stub, it might complain about a missing nop if we omit it
4604 // (although many don't complain in this case).
4605 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4612 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4613 const SmallVectorImpl<ISD::OutputArg> &Outs) {
4614 assert(Subtarget.is64BitELFABI());
4616 const unsigned PtrByteSize = 8;
4617 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4619 static const MCPhysReg GPR[] = {
4620 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4621 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4623 static const MCPhysReg VR[] = {
4624 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4625 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4628 const unsigned NumGPRs = array_lengthof(GPR);
4629 const unsigned NumFPRs = 13;
4630 const unsigned NumVRs = array_lengthof(VR);
4631 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4633 unsigned NumBytes = LinkageSize;
4634 unsigned AvailableFPRs = NumFPRs;
4635 unsigned AvailableVRs = NumVRs;
4637 for (const ISD::OutputArg& Param : Outs) {
4638 if (Param.Flags.isNest()) continue;
4640 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4641 PtrByteSize, LinkageSize, ParamAreaSize,
4642 NumBytes, AvailableFPRs, AvailableVRs,
4643 Subtarget.hasQPX()))
4650 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) {
4651 if (CS.arg_size() != CallerFn->arg_size())
4654 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin();
4655 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end();
4656 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4658 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4659 const Value* CalleeArg = *CalleeArgIter;
4660 const Value* CallerArg = &(*CallerArgIter);
4661 if (CalleeArg == CallerArg)
4664 // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4665 // tail call @callee([4 x i64] undef, [4 x i64] %b)
4667 // 1st argument of callee is undef and has the same type as caller.
4668 if (CalleeArg->getType() == CallerArg->getType() &&
4669 isa<UndefValue>(CalleeArg))
4678 // Returns true if TCO is possible between the callers and callees
4679 // calling conventions.
4681 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4682 CallingConv::ID CalleeCC) {
4683 // Tail calls are possible with fastcc and ccc.
4684 auto isTailCallableCC = [] (CallingConv::ID CC){
4685 return CC == CallingConv::C || CC == CallingConv::Fast;
4687 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4690 // We can safely tail call both fastcc and ccc callees from a c calling
4691 // convention caller. If the caller is fastcc, we may have less stack space
4692 // than a non-fastcc caller with the same signature so disable tail-calls in
4694 return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4698 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4700 CallingConv::ID CalleeCC,
4701 ImmutableCallSite CS,
4703 const SmallVectorImpl<ISD::OutputArg> &Outs,
4704 const SmallVectorImpl<ISD::InputArg> &Ins,
4705 SelectionDAG& DAG) const {
4706 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4708 if (DisableSCO && !TailCallOpt) return false;
4710 // Variadic argument functions are not supported.
4711 if (isVarArg) return false;
4713 auto &Caller = DAG.getMachineFunction().getFunction();
4714 // Check that the calling conventions are compatible for tco.
4715 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4718 // Caller contains any byval parameter is not supported.
4719 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4722 // Callee contains any byval parameter is not supported, too.
4723 // Note: This is a quick work around, because in some cases, e.g.
4724 // caller's stack size > callee's stack size, we are still able to apply
4725 // sibling call optimization. For example, gcc is able to do SCO for caller1
4726 // in the following example, but not for caller2.
4731 // __attribute__((noinline)) int callee(struct test v, struct test *b) {
4735 // void caller1(struct test a, struct test c, struct test *b) {
4736 // callee(gTest, b); }
4737 // void caller2(struct test *b) { callee(gTest, b); }
4738 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4741 // If callee and caller use different calling conventions, we cannot pass
4742 // parameters on stack since offsets for the parameter area may be different.
4743 if (Caller.getCallingConv() != CalleeCC &&
4744 needStackSlotPassParameters(Subtarget, Outs))
4747 // No TCO/SCO on indirect call because Caller have to restore its TOC
4748 if (!isFunctionGlobalAddress(Callee) &&
4749 !isa<ExternalSymbolSDNode>(Callee))
4752 // If the caller and callee potentially have different TOC bases then we
4753 // cannot tail call since we need to restore the TOC pointer after the call.
4754 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4755 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4758 // TCO allows altering callee ABI, so we don't have to check further.
4759 if (CalleeCC == CallingConv::Fast && TailCallOpt)
4762 if (DisableSCO) return false;
4764 // If callee use the same argument list that caller is using, then we can
4765 // apply SCO on this case. If it is not, then we need to check if callee needs
4766 // stack for passing arguments.
4767 if (!hasSameArgumentList(&Caller, CS) &&
4768 needStackSlotPassParameters(Subtarget, Outs)) {
4775 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4776 /// for tail call optimization. Targets which want to do tail call
4777 /// optimization should implement this function.
4779 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4780 CallingConv::ID CalleeCC,
4782 const SmallVectorImpl<ISD::InputArg> &Ins,
4783 SelectionDAG& DAG) const {
4784 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4787 // Variable argument functions are not supported.
4791 MachineFunction &MF = DAG.getMachineFunction();
4792 CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4793 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4794 // Functions containing by val parameters are not supported.
4795 for (unsigned i = 0; i != Ins.size(); i++) {
4796 ISD::ArgFlagsTy Flags = Ins[i].Flags;
4797 if (Flags.isByVal()) return false;
4800 // Non-PIC/GOT tail calls are supported.
4801 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4804 // At the moment we can only do local tail calls (in same module, hidden
4805 // or protected) if we are generating PIC.
4806 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4807 return G->getGlobal()->hasHiddenVisibility()
4808 || G->getGlobal()->hasProtectedVisibility();
4814 /// isCallCompatibleAddress - Return the immediate to use if the specified
4815 /// 32-bit value is representable in the immediate field of a BxA instruction.
4816 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4817 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4818 if (!C) return nullptr;
4820 int Addr = C->getZExtValue();
4821 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
4822 SignExtend32<26>(Addr) != Addr)
4823 return nullptr; // Top 6 bits have to be sext of immediate.
4827 (int)C->getZExtValue() >> 2, SDLoc(Op),
4828 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4834 struct TailCallArgumentInfo {
4839 TailCallArgumentInfo() = default;
4842 } // end anonymous namespace
4844 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4845 static void StoreTailCallArgumentsToStackSlot(
4846 SelectionDAG &DAG, SDValue Chain,
4847 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4848 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4849 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4850 SDValue Arg = TailCallArgs[i].Arg;
4851 SDValue FIN = TailCallArgs[i].FrameIdxOp;
4852 int FI = TailCallArgs[i].FrameIdx;
4853 // Store relative to framepointer.
4854 MemOpChains.push_back(DAG.getStore(
4855 Chain, dl, Arg, FIN,
4856 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4860 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4861 /// the appropriate stack slot for the tail call optimized function call.
4862 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4863 SDValue OldRetAddr, SDValue OldFP,
4864 int SPDiff, const SDLoc &dl) {
4866 // Calculate the new stack slot for the return address.
4867 MachineFunction &MF = DAG.getMachineFunction();
4868 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4869 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4870 bool isPPC64 = Subtarget.isPPC64();
4871 int SlotSize = isPPC64 ? 8 : 4;
4872 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4873 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4874 NewRetAddrLoc, true);
4875 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4876 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4877 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4878 MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4880 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
4881 // slot as the FP is never overwritten.
4882 if (Subtarget.isDarwinABI()) {
4883 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4884 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc,
4886 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
4887 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
4888 MachinePointerInfo::getFixedStack(
4889 DAG.getMachineFunction(), NewFPIdx));
4895 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4896 /// the position of the argument.
4898 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4899 SDValue Arg, int SPDiff, unsigned ArgOffset,
4900 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4901 int Offset = ArgOffset + SPDiff;
4902 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4903 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4904 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4905 SDValue FIN = DAG.getFrameIndex(FI, VT);
4906 TailCallArgumentInfo Info;
4908 Info.FrameIdxOp = FIN;
4910 TailCallArguments.push_back(Info);
4913 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4914 /// stack slot. Returns the chain as result and the loaded frame pointers in
4915 /// LROpOut/FPOpout. Used when tail calling.
4916 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4917 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4918 SDValue &FPOpOut, const SDLoc &dl) const {
4920 // Load the LR and FP stack slot for later adjusting.
4921 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4922 LROpOut = getReturnAddrFrameIndex(DAG);
4923 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4924 Chain = SDValue(LROpOut.getNode(), 1);
4926 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
4927 // slot as the FP is never overwritten.
4928 if (Subtarget.isDarwinABI()) {
4929 FPOpOut = getFramePointerFrameIndex(DAG);
4930 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo());
4931 Chain = SDValue(FPOpOut.getNode(), 1);
4937 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4938 /// by "Src" to address "Dst" of size "Size". Alignment information is
4939 /// specified by the specific parameter attribute. The copy will be passed as
4940 /// a byval function parameter.
4941 /// Sometimes what we are copying is the end of a larger object, the part that
4942 /// does not fit in registers.
4943 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4944 SDValue Chain, ISD::ArgFlagsTy Flags,
4945 SelectionDAG &DAG, const SDLoc &dl) {
4946 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4947 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
4948 false, false, false, MachinePointerInfo(),
4949 MachinePointerInfo());
4952 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4954 static void LowerMemOpCallTo(
4955 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4956 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4957 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4958 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4959 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4964 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4966 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4967 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4968 DAG.getConstant(ArgOffset, dl, PtrVT));
4970 MemOpChains.push_back(
4971 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4972 // Calculate and remember argument location.
4973 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4978 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4979 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4981 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4982 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4983 // might overwrite each other in case of tail call optimization.
4984 SmallVector<SDValue, 8> MemOpChains2;
4985 // Do not flag preceding copytoreg stuff together with the following stuff.
4987 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4989 if (!MemOpChains2.empty())
4990 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4992 // Store the return address to the appropriate stack slot.
4993 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4995 // Emit callseq_end just before tailcall node.
4996 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4997 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4998 InFlag = Chain.getValue(1);
5001 // Is this global address that of a function that can be called by name? (as
5002 // opposed to something that must hold a descriptor for an indirect call).
5003 static bool isFunctionGlobalAddress(SDValue Callee) {
5004 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5005 if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5006 Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5009 return G->getGlobal()->getValueType()->isFunctionTy();
5015 SDValue PPCTargetLowering::LowerCallResult(
5016 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5017 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5018 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5019 SmallVector<CCValAssign, 16> RVLocs;
5020 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5023 CCRetInfo.AnalyzeCallResult(
5024 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5028 // Copy all of the result registers out of their specified physreg.
5029 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5030 CCValAssign &VA = RVLocs[i];
5031 assert(VA.isRegLoc() && "Can only return in registers!");
5035 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5036 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5038 Chain = Lo.getValue(1);
5039 InFlag = Lo.getValue(2);
5040 VA = RVLocs[++i]; // skip ahead to next loc
5041 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5043 Chain = Hi.getValue(1);
5044 InFlag = Hi.getValue(2);
5045 if (!Subtarget.isLittleEndian())
5047 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5049 Val = DAG.getCopyFromReg(Chain, dl,
5050 VA.getLocReg(), VA.getLocVT(), InFlag);
5051 Chain = Val.getValue(1);
5052 InFlag = Val.getValue(2);
5055 switch (VA.getLocInfo()) {
5056 default: llvm_unreachable("Unknown loc info!");
5057 case CCValAssign::Full: break;
5058 case CCValAssign::AExt:
5059 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5061 case CCValAssign::ZExt:
5062 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5063 DAG.getValueType(VA.getValVT()));
5064 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5066 case CCValAssign::SExt:
5067 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5068 DAG.getValueType(VA.getValVT()));
5069 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5073 InVals.push_back(Val);
5079 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5080 const PPCSubtarget &Subtarget, bool isPatchPoint) {
5081 // PatchPoint calls are not indirect.
5085 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5088 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5089 // becuase the immediate function pointer points to a descriptor instead of
5090 // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5091 // pointer immediate points to the global entry point, while the BLA would
5092 // need to jump to the local entry point (see rL211174).
5093 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5094 isBLACompatibleAddress(Callee, DAG))
5100 static unsigned getCallOpcode(bool isIndirectCall, bool isPatchPoint,
5101 bool isTailCall, const Function &Caller,
5102 const SDValue &Callee,
5103 const PPCSubtarget &Subtarget,
5104 const TargetMachine &TM) {
5106 return PPCISD::TC_RETURN;
5108 // This is a call through a function pointer.
5109 if (isIndirectCall) {
5110 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5111 // indirect calls. The save of the caller's TOC pointer to the stack will be
5112 // inserted into the DAG as part of call lowering. The restore of the TOC
5113 // pointer is modeled by using a pseudo instruction for the call opcode that
5114 // represents the 2 instruction sequence of an indirect branch and link,
5115 // immediately followed by a load of the TOC pointer from the the stack save
5117 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5118 return PPCISD::BCTRL_LOAD_TOC;
5120 // An indirect call that does not need a TOC restore.
5121 return PPCISD::BCTRL;
5124 // The ABIs that maintain a TOC pointer accross calls need to have a nop
5125 // immediately following the call instruction if the caller and callee may
5126 // have different TOC bases. At link time if the linker determines the calls
5127 // may not share a TOC base, the call is redirected to a trampoline inserted
5128 // by the linker. The trampoline will (among other things) save the callers
5129 // TOC pointer at an ABI designated offset in the linkage area and the linker
5130 // will rewrite the nop to be a load of the TOC pointer from the linkage area
5132 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5133 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5136 return PPCISD::CALL;
5139 static bool isValidAIXExternalSymSDNode(StringRef SymName) {
5140 return StringSwitch<bool>(SymName)
5141 .Cases("__divdi3", "__fixunsdfdi", "__floatundidf", "__floatundisf",
5142 "__moddi3", "__udivdi3", "__umoddi3", true)
5143 .Cases("ceil", "floor", "memcpy", "memmove", "memset", "round", true)
5147 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5148 const SDLoc &dl, const PPCSubtarget &Subtarget) {
5149 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5150 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5151 return SDValue(Dest, 0);
5153 // Returns true if the callee is local, and false otherwise.
5154 auto isLocalCallee = [&]() {
5155 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5156 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5157 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5159 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5160 !dyn_cast_or_null<GlobalIFunc>(GV);
5163 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in
5164 // a static relocation model causes some versions of GNU LD (2.17.50, at
5165 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5166 // built with secure-PLT.
5168 Subtarget.is32BitELFABI() && !isLocalCallee() &&
5169 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5171 // On AIX, direct function calls reference the symbol for the function's
5172 // entry point, which is named by prepending a "." before the function's
5174 const auto getAIXFuncEntryPointSymbolSDNode =
5175 [&](StringRef FuncName, bool IsDeclaration,
5176 const XCOFF::StorageClass &SC) {
5177 auto &Context = DAG.getMachineFunction().getMMI().getContext();
5179 MCSymbolXCOFF *S = cast<MCSymbolXCOFF>(
5180 Context.getOrCreateSymbol(Twine(".") + Twine(FuncName)));
5182 if (IsDeclaration && !S->hasContainingCsect()) {
5183 // On AIX, an undefined symbol needs to be associated with a
5184 // MCSectionXCOFF to get the correct storage mapping class.
5185 // In this case, XCOFF::XMC_PR.
5186 MCSectionXCOFF *Sec = Context.getXCOFFSection(
5187 S->getName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5188 SectionKind::getMetadata());
5189 S->setContainingCsect(Sec);
5193 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5194 return DAG.getMCSymbol(S, PtrVT);
5197 if (isFunctionGlobalAddress(Callee)) {
5198 const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5199 const GlobalValue *GV = G->getGlobal();
5201 if (!Subtarget.isAIXABI())
5202 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5203 UsePlt ? PPCII::MO_PLT : 0);
5205 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5206 const GlobalObject *GO = cast<GlobalObject>(GV);
5207 const XCOFF::StorageClass SC =
5208 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO);
5209 return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(),
5213 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5214 const char *SymName = S->getSymbol();
5215 if (!Subtarget.isAIXABI())
5216 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5217 UsePlt ? PPCII::MO_PLT : 0);
5219 // If there exists a user-declared function whose name is the same as the
5220 // ExternalSymbol's, then we pick up the user-declared version.
5221 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5222 if (const Function *F =
5223 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5224 const XCOFF::StorageClass SC =
5225 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5226 return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(),
5230 // TODO: Remove this when the support for ExternalSymbolSDNode is complete.
5231 if (isValidAIXExternalSymSDNode(SymName)) {
5232 return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT);
5235 report_fatal_error("Unexpected ExternalSymbolSDNode: " + Twine(SymName));
5238 // No transformation needed.
5239 assert(Callee.getNode() && "What no callee?");
5243 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5244 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5245 "Expected a CALLSEQ_STARTSDNode.");
5247 // The last operand is the chain, except when the node has glue. If the node
5248 // has glue, then the last operand is the glue, and the chain is the second
5250 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5251 if (LastValue.getValueType() != MVT::Glue)
5254 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5257 // Creates the node that moves a functions address into the count register
5258 // to prepare for an indirect call instruction.
5259 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5260 SDValue &Glue, SDValue &Chain,
5262 SDValue MTCTROps[] = {Chain, Callee, Glue};
5263 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5264 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5265 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5266 // The glue is the second value produced.
5267 Glue = Chain.getValue(1);
5270 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5271 SDValue &Glue, SDValue &Chain,
5272 SDValue CallSeqStart,
5273 ImmutableCallSite CS, const SDLoc &dl,
5275 const PPCSubtarget &Subtarget) {
5276 // Function pointers in the 64-bit SVR4 ABI do not point to the function
5277 // entry point, but to the function descriptor (the function entry point
5278 // address is part of the function descriptor though).
5279 // The function descriptor is a three doubleword structure with the
5280 // following fields: function entry point, TOC base address and
5281 // environment pointer.
5282 // Thus for a call through a function pointer, the following actions need
5284 // 1. Save the TOC of the caller in the TOC save area of its stack
5285 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5286 // 2. Load the address of the function entry point from the function
5288 // 3. Load the TOC of the callee from the function descriptor into r2.
5289 // 4. Load the environment pointer from the function descriptor into
5291 // 5. Branch to the function entry point address.
5292 // 6. On return of the callee, the TOC of the caller needs to be
5293 // restored (this is done in FinishCall()).
5295 // The loads are scheduled at the beginning of the call sequence, and the
5296 // register copies are flagged together to ensure that no other
5297 // operations can be scheduled in between. E.g. without flagging the
5298 // copies together, a TOC access in the caller could be scheduled between
5299 // the assignment of the callee TOC and the branch to the callee, which leads
5300 // to incorrect code.
5302 // Start by loading the function address from the descriptor.
5303 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5304 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5305 ? (MachineMemOperand::MODereferenceable |
5306 MachineMemOperand::MOInvariant)
5307 : MachineMemOperand::MONone;
5309 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr);
5311 // Registers used in building the DAG.
5312 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5313 const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5315 // Offsets of descriptor members.
5316 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5317 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5319 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5320 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5322 // One load for the functions entry point address.
5323 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5324 Alignment, MMOFlags);
5326 // One for loading the TOC anchor for the module that contains the called
5328 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5329 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5331 DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5332 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5334 // One for loading the environment pointer.
5335 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5336 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5337 SDValue LoadEnvPtr =
5338 DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5339 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5342 // Then copy the newly loaded TOC anchor to the TOC pointer.
5343 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5344 Chain = TOCVal.getValue(0);
5345 Glue = TOCVal.getValue(1);
5347 // If the function call has an explicit 'nest' parameter, it takes the
5348 // place of the environment pointer.
5349 assert((!hasNest || !Subtarget.isAIXABI()) &&
5350 "Nest parameter is not supported on AIX.");
5352 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5353 Chain = EnvVal.getValue(0);
5354 Glue = EnvVal.getValue(1);
5357 // The rest of the indirect call sequence is the same as the non-descriptor
5359 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5363 buildCallOperands(SmallVectorImpl<SDValue> &Ops, CallingConv::ID CallConv,
5364 const SDLoc &dl, bool isTailCall, bool isVarArg,
5365 bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5366 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5367 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5368 const PPCSubtarget &Subtarget, bool isIndirect) {
5369 const bool IsPPC64 = Subtarget.isPPC64();
5370 // MVT for a general purpose register.
5371 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5373 // First operand is always the chain.
5374 Ops.push_back(Chain);
5376 // If it's a direct call pass the callee as the second operand.
5378 Ops.push_back(Callee);
5380 assert(!isPatchPoint && "Patch point call are not indirect.");
5382 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5383 // on the stack (this would have been done in `LowerCall_64SVR4` or
5384 // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5385 // represents both the indirect branch and a load that restores the TOC
5386 // pointer from the linkage area. The operand for the TOC restore is an add
5387 // of the TOC save offset to the stack pointer. This must be the second
5388 // operand: after the chain input but before any other variadic arguments.
5389 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
5390 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5392 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5393 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5394 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5395 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5396 Ops.push_back(AddTOC);
5399 // Add the register used for the environment pointer.
5400 if (Subtarget.usesFunctionDescriptors() && !hasNest)
5401 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5405 // Add CTR register as callee so a bctr can be emitted later.
5407 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5410 // If this is a tail call add stack pointer delta.
5412 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5414 // Add argument registers to the end of the list so that they are known live
5416 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5417 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5418 RegsToPass[i].second.getValueType()));
5420 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5421 // no way to mark dependencies as implicit here.
5422 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5423 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && !isPatchPoint)
5424 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5426 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5427 if (isVarArg && Subtarget.is32BitELFABI())
5428 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5430 // Add a register mask operand representing the call-preserved registers.
5431 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5432 const uint32_t *Mask =
5433 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
5434 assert(Mask && "Missing call preserved mask for calling convention");
5435 Ops.push_back(DAG.getRegisterMask(Mask));
5437 // If the glue is valid, it is the last operand.
5439 Ops.push_back(Glue);
5442 SDValue PPCTargetLowering::FinishCall(
5443 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg,
5444 bool isPatchPoint, bool hasNest, SelectionDAG &DAG,
5445 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5446 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5447 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5448 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const {
5450 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI())
5451 setUsesTOCBasePtr(DAG);
5453 const bool isIndirect = isIndirectCall(Callee, DAG, Subtarget, isPatchPoint);
5454 unsigned CallOpc = getCallOpcode(isIndirect, isPatchPoint, isTailCall,
5455 DAG.getMachineFunction().getFunction(),
5456 Callee, Subtarget, DAG.getTarget());
5459 Callee = transformCallee(Callee, DAG, dl, Subtarget);
5460 else if (Subtarget.usesFunctionDescriptors())
5461 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS,
5462 dl, hasNest, Subtarget);
5464 prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5466 // Build the operand list for the call instruction.
5467 SmallVector<SDValue, 8> Ops;
5468 buildCallOperands(Ops, CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5469 hasNest, DAG, RegsToPass, Glue, Chain, Callee, SPDiff,
5470 Subtarget, isIndirect);
5474 assert(((Callee.getOpcode() == ISD::Register &&
5475 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5476 Callee.getOpcode() == ISD::TargetExternalSymbol ||
5477 Callee.getOpcode() == ISD::TargetGlobalAddress ||
5478 isa<ConstantSDNode>(Callee)) &&
5479 "Expecting a global address, external symbol, absolute value or "
5481 assert(CallOpc == PPCISD::TC_RETURN &&
5482 "Unexpected call opcode for a tail call.");
5483 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5484 return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5487 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5488 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5489 Glue = Chain.getValue(1);
5491 // When performing tail call optimization the callee pops its arguments off
5492 // the stack. Account for this here so these bytes can be pushed back on in
5493 // PPCFrameLowering::eliminateCallFramePseudoInstr.
5494 int BytesCalleePops = (CallConv == CallingConv::Fast &&
5495 getTargetMachine().Options.GuaranteedTailCallOpt)
5499 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5500 DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5502 Glue = Chain.getValue(1);
5504 return LowerCallResult(Chain, Glue, CallConv, isVarArg, Ins, dl, DAG, InVals);
5508 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5509 SmallVectorImpl<SDValue> &InVals) const {
5510 SelectionDAG &DAG = CLI.DAG;
5512 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5513 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5514 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
5515 SDValue Chain = CLI.Chain;
5516 SDValue Callee = CLI.Callee;
5517 bool &isTailCall = CLI.IsTailCall;
5518 CallingConv::ID CallConv = CLI.CallConv;
5519 bool isVarArg = CLI.IsVarArg;
5520 bool isPatchPoint = CLI.IsPatchPoint;
5521 ImmutableCallSite CS = CLI.CS;
5524 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall()))
5526 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5528 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
5529 isVarArg, Outs, Ins, DAG);
5531 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5535 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5538 assert(isa<GlobalAddressSDNode>(Callee) &&
5539 "Callee should be an llvm::Function object.");
5541 const GlobalValue *GV =
5542 cast<GlobalAddressSDNode>(Callee)->getGlobal();
5543 const unsigned Width =
5544 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0");
5545 dbgs() << "TCO caller: "
5546 << left_justify(DAG.getMachineFunction().getName(), Width)
5547 << ", callee linkage: " << GV->getVisibility() << ", "
5548 << GV->getLinkage() << "\n");
5552 if (!isTailCall && CS && CS.isMustTailCall())
5553 report_fatal_error("failed to perform tail call elimination on a call "
5554 "site marked musttail");
5556 // When long calls (i.e. indirect calls) are always used, calls are always
5557 // made via function pointer. If we have a function name, first translate it
5559 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5561 Callee = LowerGlobalAddress(Callee, DAG);
5563 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5564 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
5565 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5566 dl, DAG, InVals, CS);
5568 if (Subtarget.isSVR4ABI())
5569 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
5570 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5571 dl, DAG, InVals, CS);
5573 if (Subtarget.isAIXABI())
5574 return LowerCall_AIX(Chain, Callee, CallConv, isVarArg,
5575 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5576 dl, DAG, InVals, CS);
5578 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
5579 isTailCall, isPatchPoint, Outs, OutVals, Ins,
5580 dl, DAG, InVals, CS);
5583 SDValue PPCTargetLowering::LowerCall_32SVR4(
5584 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5585 bool isTailCall, bool isPatchPoint,
5586 const SmallVectorImpl<ISD::OutputArg> &Outs,
5587 const SmallVectorImpl<SDValue> &OutVals,
5588 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5589 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5590 ImmutableCallSite CS) const {
5591 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5592 // of the 32-bit SVR4 ABI stack frame layout.
5594 assert((CallConv == CallingConv::C ||
5595 CallConv == CallingConv::Cold ||
5596 CallConv == CallingConv::Fast) && "Unknown calling convention!");
5598 unsigned PtrByteSize = 4;
5600 MachineFunction &MF = DAG.getMachineFunction();
5602 // Mark this function as potentially containing a function that contains a
5603 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5604 // and restoring the callers stack pointer in this functions epilog. This is
5605 // done because by tail calling the called function might overwrite the value
5606 // in this function's (MF) stack pointer stack slot 0(SP).
5607 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5608 CallConv == CallingConv::Fast)
5609 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5611 // Count how many bytes are to be pushed on the stack, including the linkage
5612 // area, parameter list area and the part of the local variable space which
5613 // contains copies of aggregates which are passed by value.
5615 // Assign locations to all of the outgoing arguments.
5616 SmallVector<CCValAssign, 16> ArgLocs;
5617 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
5619 // Reserve space for the linkage area on the stack.
5620 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5623 CCInfo.PreAnalyzeCallOperands(Outs);
5626 // Handle fixed and variable vector arguments differently.
5627 // Fixed vector arguments go into registers as long as registers are
5628 // available. Variable vector arguments always go into memory.
5629 unsigned NumArgs = Outs.size();
5631 for (unsigned i = 0; i != NumArgs; ++i) {
5632 MVT ArgVT = Outs[i].VT;
5633 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5636 if (Outs[i].IsFixed) {
5637 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5640 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5646 errs() << "Call operand #" << i << " has unhandled type "
5647 << EVT(ArgVT).getEVTString() << "\n";
5649 llvm_unreachable(nullptr);
5653 // All arguments are treated the same.
5654 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5656 CCInfo.clearWasPPCF128();
5658 // Assign locations to all of the outgoing aggregate by value arguments.
5659 SmallVector<CCValAssign, 16> ByValArgLocs;
5660 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext());
5662 // Reserve stack space for the allocations in CCInfo.
5663 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
5665 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5667 // Size of the linkage area, parameter list area and the part of the local
5668 // space variable where copies of aggregates which are passed by value are
5670 unsigned NumBytes = CCByValInfo.getNextStackOffset();
5672 // Calculate by how many bytes the stack has to be adjusted in case of tail
5673 // call optimization.
5674 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
5676 // Adjust the stack pointer for the new arguments...
5677 // These operations are automatically eliminated by the prolog/epilog pass
5678 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5679 SDValue CallSeqStart = Chain;
5681 // Load the return address and frame pointer so it can be moved somewhere else
5684 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5686 // Set up a copy of the stack pointer for use loading and storing any
5687 // arguments that may not fit in the registers available for argument
5689 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5691 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5692 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5693 SmallVector<SDValue, 8> MemOpChains;
5695 bool seenFloatArg = false;
5696 // Walk the register/memloc assignments, inserting copies/loads.
5697 // i - Tracks the index into the list of registers allocated for the call
5698 // RealArgIdx - Tracks the index into the list of actual function arguments
5699 // j - Tracks the index into the list of byval arguments
5700 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5702 ++i, ++RealArgIdx) {
5703 CCValAssign &VA = ArgLocs[i];
5704 SDValue Arg = OutVals[RealArgIdx];
5705 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5707 if (Flags.isByVal()) {
5708 // Argument is an aggregate which is passed by value, thus we need to
5709 // create a copy of it in the local variable space of the current stack
5710 // frame (which is the stack frame of the caller) and pass the address of
5711 // this copy to the callee.
5712 assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5713 CCValAssign &ByValVA = ByValArgLocs[j++];
5714 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5716 // Memory reserved in the local variable space of the callers stack frame.
5717 unsigned LocMemOffset = ByValVA.getLocMemOffset();
5719 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5720 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5723 // Create a copy of the argument in the local area of the current
5725 SDValue MemcpyCall =
5726 CreateCopyOfByValArgument(Arg, PtrOff,
5727 CallSeqStart.getNode()->getOperand(0),
5730 // This must go outside the CALLSEQ_START..END.
5731 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5733 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5734 NewCallSeqStart.getNode());
5735 Chain = CallSeqStart = NewCallSeqStart;
5737 // Pass the address of the aggregate copy on the stack either in a
5738 // physical register or in the parameter list area of the current stack
5739 // frame to the callee.
5743 // When useCRBits() is true, there can be i1 arguments.
5744 // It is because getRegisterType(MVT::i1) => MVT::i1,
5745 // and for other integer types getRegisterType() => MVT::i32.
5746 // Extend i1 and ensure callee will get i32.
5747 if (Arg.getValueType() == MVT::i1)
5748 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5751 if (VA.isRegLoc()) {
5752 seenFloatArg |= VA.getLocVT().isFloatingPoint();
5753 // Put argument in a physical register.
5754 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5755 bool IsLE = Subtarget.isLittleEndian();
5756 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5757 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5758 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5759 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5760 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5761 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5764 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5766 // Put argument in the parameter list area of the current stack frame.
5767 assert(VA.isMemLoc());
5768 unsigned LocMemOffset = VA.getLocMemOffset();
5771 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5772 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5775 MemOpChains.push_back(
5776 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5778 // Calculate and remember argument location.
5779 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5785 if (!MemOpChains.empty())
5786 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5788 // Build a sequence of copy-to-reg nodes chained together with token chain
5789 // and flag operands which copy the outgoing args into the appropriate regs.
5791 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5792 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5793 RegsToPass[i].second, InFlag);
5794 InFlag = Chain.getValue(1);
5797 // Set CR bit 6 to true if this is a vararg call with floating args passed in
5800 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5801 SDValue Ops[] = { Chain, InFlag };
5803 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5804 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5806 InFlag = Chain.getValue(1);
5810 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5813 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5814 /* unused except on PPC64 ELFv1 */ false, DAG,
5815 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5816 NumBytes, Ins, InVals, CS);
5819 // Copy an argument into memory, being careful to do this outside the
5820 // call sequence for the call to which the argument belongs.
5821 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5822 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5823 SelectionDAG &DAG, const SDLoc &dl) const {
5824 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5825 CallSeqStart.getNode()->getOperand(0),
5827 // The MEMCPY must go outside the CALLSEQ_START..END.
5828 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5829 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5831 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5832 NewCallSeqStart.getNode());
5833 return NewCallSeqStart;
5836 SDValue PPCTargetLowering::LowerCall_64SVR4(
5837 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
5838 bool isTailCall, bool isPatchPoint,
5839 const SmallVectorImpl<ISD::OutputArg> &Outs,
5840 const SmallVectorImpl<SDValue> &OutVals,
5841 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5842 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5843 ImmutableCallSite CS) const {
5844 bool isELFv2ABI = Subtarget.isELFv2ABI();
5845 bool isLittleEndian = Subtarget.isLittleEndian();
5846 unsigned NumOps = Outs.size();
5847 bool hasNest = false;
5848 bool IsSibCall = false;
5850 EVT PtrVT = getPointerTy(DAG.getDataLayout());
5851 unsigned PtrByteSize = 8;
5853 MachineFunction &MF = DAG.getMachineFunction();
5855 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5858 // Mark this function as potentially containing a function that contains a
5859 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5860 // and restoring the callers stack pointer in this functions epilog. This is
5861 // done because by tail calling the called function might overwrite the value
5862 // in this function's (MF) stack pointer stack slot 0(SP).
5863 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5864 CallConv == CallingConv::Fast)
5865 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5867 assert(!(CallConv == CallingConv::Fast && isVarArg) &&
5868 "fastcc not supported on varargs functions");
5870 // Count how many bytes are to be pushed on the stack, including the linkage
5871 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
5872 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5873 // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5874 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5875 unsigned NumBytes = LinkageSize;
5876 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5877 unsigned &QFPR_idx = FPR_idx;
5879 static const MCPhysReg GPR[] = {
5880 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5881 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5883 static const MCPhysReg VR[] = {
5884 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5885 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5888 const unsigned NumGPRs = array_lengthof(GPR);
5889 const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5890 const unsigned NumVRs = array_lengthof(VR);
5891 const unsigned NumQFPRs = NumFPRs;
5893 // On ELFv2, we can avoid allocating the parameter area if all the arguments
5894 // can be passed to the callee in registers.
5895 // For the fast calling convention, there is another check below.
5896 // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5897 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast;
5898 if (!HasParameterArea) {
5899 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5900 unsigned AvailableFPRs = NumFPRs;
5901 unsigned AvailableVRs = NumVRs;
5902 unsigned NumBytesTmp = NumBytes;
5903 for (unsigned i = 0; i != NumOps; ++i) {
5904 if (Outs[i].Flags.isNest()) continue;
5905 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5906 PtrByteSize, LinkageSize, ParamAreaSize,
5907 NumBytesTmp, AvailableFPRs, AvailableVRs,
5908 Subtarget.hasQPX()))
5909 HasParameterArea = true;
5913 // When using the fast calling convention, we don't provide backing for
5914 // arguments that will be in registers.
5915 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5917 // Avoid allocating parameter area for fastcc functions if all the arguments
5918 // can be passed in the registers.
5919 if (CallConv == CallingConv::Fast)
5920 HasParameterArea = false;
5922 // Add up all the space actually used.
5923 for (unsigned i = 0; i != NumOps; ++i) {
5924 ISD::ArgFlagsTy Flags = Outs[i].Flags;
5925 EVT ArgVT = Outs[i].VT;
5926 EVT OrigVT = Outs[i].ArgVT;
5931 if (CallConv == CallingConv::Fast) {
5932 if (Flags.isByVal()) {
5933 NumGPRsUsed += (Flags.getByValSize()+7)/8;
5934 if (NumGPRsUsed > NumGPRs)
5935 HasParameterArea = true;
5937 switch (ArgVT.getSimpleVT().SimpleTy) {
5938 default: llvm_unreachable("Unexpected ValueType for argument!");
5942 if (++NumGPRsUsed <= NumGPRs)
5952 if (++NumVRsUsed <= NumVRs)
5956 // When using QPX, this is handled like a FP register, otherwise, it
5957 // is an Altivec register.
5958 if (Subtarget.hasQPX()) {
5959 if (++NumFPRsUsed <= NumFPRs)
5962 if (++NumVRsUsed <= NumVRs)
5968 case MVT::v4f64: // QPX
5969 case MVT::v4i1: // QPX
5970 if (++NumFPRsUsed <= NumFPRs)
5974 HasParameterArea = true;
5978 /* Respect alignment of argument on the stack. */
5980 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5981 NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5983 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5984 if (Flags.isInConsecutiveRegsLast())
5985 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5988 unsigned NumBytesActuallyUsed = NumBytes;
5990 // In the old ELFv1 ABI,
5991 // the prolog code of the callee may store up to 8 GPR argument registers to
5992 // the stack, allowing va_start to index over them in memory if its varargs.
5993 // Because we cannot tell if this is needed on the caller side, we have to
5994 // conservatively assume that it is needed. As such, make sure we have at
5995 // least enough stack space for the caller to store the 8 GPRs.
5996 // In the ELFv2 ABI, we allocate the parameter area iff a callee
5997 // really requires memory operands, e.g. a vararg function.
5998 if (HasParameterArea)
5999 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6001 NumBytes = LinkageSize;
6003 // Tail call needs the stack to be aligned.
6004 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6005 CallConv == CallingConv::Fast)
6006 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6010 // Calculate by how many bytes the stack has to be adjusted in case of tail
6011 // call optimization.
6013 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6015 // To protect arguments on the stack from being clobbered in a tail call,
6016 // force all the loads to happen before doing any other lowering.
6018 Chain = DAG.getStackArgumentTokenFactor(Chain);
6020 // Adjust the stack pointer for the new arguments...
6021 // These operations are automatically eliminated by the prolog/epilog pass
6023 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6024 SDValue CallSeqStart = Chain;
6026 // Load the return address and frame pointer so it can be move somewhere else
6029 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6031 // Set up a copy of the stack pointer for use loading and storing any
6032 // arguments that may not fit in the registers available for argument
6034 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6036 // Figure out which arguments are going to go in registers, and which in
6037 // memory. Also, if this is a vararg function, floating point operations
6038 // must be stored to our stack, and loaded into integer regs as well, if
6039 // any integer regs are available for argument passing.
6040 unsigned ArgOffset = LinkageSize;
6042 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6043 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6045 SmallVector<SDValue, 8> MemOpChains;
6046 for (unsigned i = 0; i != NumOps; ++i) {
6047 SDValue Arg = OutVals[i];
6048 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6049 EVT ArgVT = Outs[i].VT;
6050 EVT OrigVT = Outs[i].ArgVT;
6052 // PtrOff will be used to store the current argument to the stack if a
6053 // register cannot be found for it.
6056 // We re-align the argument offset for each argument, except when using the
6057 // fast calling convention, when we need to make sure we do that only when
6058 // we'll actually use a stack slot.
6059 auto ComputePtrOff = [&]() {
6060 /* Respect alignment of argument on the stack. */
6062 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6063 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
6065 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6067 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6070 if (CallConv != CallingConv::Fast) {
6073 /* Compute GPR index associated with argument offset. */
6074 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6075 GPR_idx = std::min(GPR_idx, NumGPRs);
6078 // Promote integers to 64-bit values.
6079 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6080 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6081 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6082 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6085 // FIXME memcpy is used way more than necessary. Correctness first.
6086 // Note: "by value" is code for passing a structure by value, not
6088 if (Flags.isByVal()) {
6089 // Note: Size includes alignment padding, so
6090 // struct x { short a; char b; }
6091 // will have Size = 4. With #pragma pack(1), it will have Size = 3.
6092 // These are the proper values we need for right-justifying the
6093 // aggregate in a parameter register.
6094 unsigned Size = Flags.getByValSize();
6096 // An empty aggregate parameter takes up no storage and no
6101 if (CallConv == CallingConv::Fast)
6104 // All aggregates smaller than 8 bytes must be passed right-justified.
6105 if (Size==1 || Size==2 || Size==4) {
6106 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6107 if (GPR_idx != NumGPRs) {
6108 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6109 MachinePointerInfo(), VT);
6110 MemOpChains.push_back(Load.getValue(1));
6111 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6113 ArgOffset += PtrByteSize;
6118 if (GPR_idx == NumGPRs && Size < 8) {
6119 SDValue AddPtr = PtrOff;
6120 if (!isLittleEndian) {
6121 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6122 PtrOff.getValueType());
6123 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6125 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6128 ArgOffset += PtrByteSize;
6131 // Copy entire object into memory. There are cases where gcc-generated
6132 // code assumes it is there, even if it could be put entirely into
6133 // registers. (This is not what the doc says.)
6135 // FIXME: The above statement is likely due to a misunderstanding of the
6136 // documents. All arguments must be copied into the parameter area BY
6137 // THE CALLEE in the event that the callee takes the address of any
6138 // formal argument. That has not yet been implemented. However, it is
6139 // reasonable to use the stack area as a staging area for the register
6142 // Skip this for small aggregates, as we will use the same slot for a
6143 // right-justified copy, below.
6145 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6149 // When a register is available, pass a small aggregate right-justified.
6150 if (Size < 8 && GPR_idx != NumGPRs) {
6151 // The easiest way to get this right-justified in a register
6152 // is to copy the structure into the rightmost portion of a
6153 // local variable slot, then load the whole slot into the
6155 // FIXME: The memcpy seems to produce pretty awful code for
6156 // small aggregates, particularly for packed ones.
6157 // FIXME: It would be preferable to use the slot in the
6158 // parameter save area instead of a new local variable.
6159 SDValue AddPtr = PtrOff;
6160 if (!isLittleEndian) {
6161 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6162 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6164 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6168 // Load the slot into the register.
6170 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6171 MemOpChains.push_back(Load.getValue(1));
6172 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6174 // Done with this argument.
6175 ArgOffset += PtrByteSize;
6179 // For aggregates larger than PtrByteSize, copy the pieces of the
6180 // object that fit into registers from the parameter save area.
6181 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6182 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6183 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6184 if (GPR_idx != NumGPRs) {
6186 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6187 MemOpChains.push_back(Load.getValue(1));
6188 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6189 ArgOffset += PtrByteSize;
6191 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6198 switch (Arg.getSimpleValueType().SimpleTy) {
6199 default: llvm_unreachable("Unexpected ValueType for argument!");
6203 if (Flags.isNest()) {
6204 // The 'nest' parameter, if any, is passed in R11.
6205 RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6210 // These can be scalar arguments or elements of an integer array type
6211 // passed directly. Clang may use those instead of "byval" aggregate
6212 // types to avoid forcing arguments to memory unnecessarily.
6213 if (GPR_idx != NumGPRs) {
6214 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6216 if (CallConv == CallingConv::Fast)
6219 assert(HasParameterArea &&
6220 "Parameter area must exist to pass an argument in memory.");
6221 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6222 true, isTailCall, false, MemOpChains,
6223 TailCallArguments, dl);
6224 if (CallConv == CallingConv::Fast)
6225 ArgOffset += PtrByteSize;
6227 if (CallConv != CallingConv::Fast)
6228 ArgOffset += PtrByteSize;
6232 // These can be scalar arguments or elements of a float array type
6233 // passed directly. The latter are used to implement ELFv2 homogenous
6234 // float aggregates.
6236 // Named arguments go into FPRs first, and once they overflow, the
6237 // remaining arguments go into GPRs and then the parameter save area.
6238 // Unnamed arguments for vararg functions always go to GPRs and
6239 // then the parameter save area. For now, put all arguments to vararg
6240 // routines always in both locations (FPR *and* GPR or stack slot).
6241 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
6242 bool NeededLoad = false;
6244 // First load the argument into the next available FPR.
6245 if (FPR_idx != NumFPRs)
6246 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6248 // Next, load the argument into GPR or stack slot if needed.
6249 if (!NeedGPROrStack)
6251 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) {
6252 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6253 // once we support fp <-> gpr moves.
6255 // In the non-vararg case, this can only ever happen in the
6256 // presence of f32 array types, since otherwise we never run
6257 // out of FPRs before running out of GPRs.
6260 // Double values are always passed in a single GPR.
6261 if (Arg.getValueType() != MVT::f32) {
6262 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6264 // Non-array float values are extended and passed in a GPR.
6265 } else if (!Flags.isInConsecutiveRegs()) {
6266 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6267 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6269 // If we have an array of floats, we collect every odd element
6270 // together with its predecessor into one GPR.
6271 } else if (ArgOffset % PtrByteSize != 0) {
6273 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6274 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6275 if (!isLittleEndian)
6277 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6279 // The final element, if even, goes into the first half of a GPR.
6280 } else if (Flags.isInConsecutiveRegsLast()) {
6281 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6282 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6283 if (!isLittleEndian)
6284 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6285 DAG.getConstant(32, dl, MVT::i32));
6287 // Non-final even elements are skipped; they will be handled
6288 // together the with subsequent argument on the next go-around.
6292 if (ArgVal.getNode())
6293 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6295 if (CallConv == CallingConv::Fast)
6298 // Single-precision floating-point values are mapped to the
6299 // second (rightmost) word of the stack doubleword.
6300 if (Arg.getValueType() == MVT::f32 &&
6301 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6302 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6303 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6306 assert(HasParameterArea &&
6307 "Parameter area must exist to pass an argument in memory.");
6308 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6309 true, isTailCall, false, MemOpChains,
6310 TailCallArguments, dl);
6314 // When passing an array of floats, the array occupies consecutive
6315 // space in the argument area; only round up to the next doubleword
6316 // at the end of the array. Otherwise, each float takes 8 bytes.
6317 if (CallConv != CallingConv::Fast || NeededLoad) {
6318 ArgOffset += (Arg.getValueType() == MVT::f32 &&
6319 Flags.isInConsecutiveRegs()) ? 4 : 8;
6320 if (Flags.isInConsecutiveRegsLast())
6321 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6333 if (!Subtarget.hasQPX()) {
6334 // These can be scalar arguments or elements of a vector array type
6335 // passed directly. The latter are used to implement ELFv2 homogenous
6336 // vector aggregates.
6338 // For a varargs call, named arguments go into VRs or on the stack as
6339 // usual; unnamed arguments always go to the stack or the corresponding
6340 // GPRs when within range. For now, we always put the value in both
6341 // locations (or even all three).
6343 assert(HasParameterArea &&
6344 "Parameter area must exist if we have a varargs call.");
6345 // We could elide this store in the case where the object fits
6346 // entirely in R registers. Maybe later.
6348 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6349 MemOpChains.push_back(Store);
6350 if (VR_idx != NumVRs) {
6352 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6353 MemOpChains.push_back(Load.getValue(1));
6354 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6357 for (unsigned i=0; i<16; i+=PtrByteSize) {
6358 if (GPR_idx == NumGPRs)
6360 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6361 DAG.getConstant(i, dl, PtrVT));
6363 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6364 MemOpChains.push_back(Load.getValue(1));
6365 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6370 // Non-varargs Altivec params go into VRs or on the stack.
6371 if (VR_idx != NumVRs) {
6372 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6374 if (CallConv == CallingConv::Fast)
6377 assert(HasParameterArea &&
6378 "Parameter area must exist to pass an argument in memory.");
6379 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6380 true, isTailCall, true, MemOpChains,
6381 TailCallArguments, dl);
6382 if (CallConv == CallingConv::Fast)
6386 if (CallConv != CallingConv::Fast)
6391 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6392 "Invalid QPX parameter type");
6397 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6399 assert(HasParameterArea &&
6400 "Parameter area must exist if we have a varargs call.");
6401 // We could elide this store in the case where the object fits
6402 // entirely in R registers. Maybe later.
6404 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6405 MemOpChains.push_back(Store);
6406 if (QFPR_idx != NumQFPRs) {
6407 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6408 PtrOff, MachinePointerInfo());
6409 MemOpChains.push_back(Load.getValue(1));
6410 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6412 ArgOffset += (IsF32 ? 16 : 32);
6413 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6414 if (GPR_idx == NumGPRs)
6416 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6417 DAG.getConstant(i, dl, PtrVT));
6419 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6420 MemOpChains.push_back(Load.getValue(1));
6421 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6426 // Non-varargs QPX params go into registers or on the stack.
6427 if (QFPR_idx != NumQFPRs) {
6428 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6430 if (CallConv == CallingConv::Fast)
6433 assert(HasParameterArea &&
6434 "Parameter area must exist to pass an argument in memory.");
6435 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6436 true, isTailCall, true, MemOpChains,
6437 TailCallArguments, dl);
6438 if (CallConv == CallingConv::Fast)
6439 ArgOffset += (IsF32 ? 16 : 32);
6442 if (CallConv != CallingConv::Fast)
6443 ArgOffset += (IsF32 ? 16 : 32);
6449 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6450 "mismatch in size of parameter area");
6451 (void)NumBytesActuallyUsed;
6453 if (!MemOpChains.empty())
6454 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6456 // Check if this is an indirect call (MTCTR/BCTRL).
6457 // See prepareDescriptorIndirectCall and buildCallOperands for more
6458 // information about calls through function pointers in the 64-bit SVR4 ABI.
6459 if (!isTailCall && !isPatchPoint &&
6460 !isFunctionGlobalAddress(Callee) &&
6461 !isa<ExternalSymbolSDNode>(Callee)) {
6462 // Load r2 into a virtual register and store it to the TOC save area.
6463 setUsesTOCBasePtr(DAG);
6464 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6465 // TOC save area offset.
6466 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6467 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6468 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6469 Chain = DAG.getStore(
6470 Val.getValue(1), dl, Val, AddPtr,
6471 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
6472 // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6473 // This does not mean the MTCTR instruction must use R12; it's easier
6474 // to model this as an extra parameter, so do that.
6475 if (isELFv2ABI && !isPatchPoint)
6476 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6479 // Build a sequence of copy-to-reg nodes chained together with token chain
6480 // and flag operands which copy the outgoing args into the appropriate regs.
6482 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6483 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6484 RegsToPass[i].second, InFlag);
6485 InFlag = Chain.getValue(1);
6488 if (isTailCall && !IsSibCall)
6489 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6492 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
6493 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
6494 SPDiff, NumBytes, Ins, InVals, CS);
6497 SDValue PPCTargetLowering::LowerCall_Darwin(
6498 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
6499 bool isTailCall, bool isPatchPoint,
6500 const SmallVectorImpl<ISD::OutputArg> &Outs,
6501 const SmallVectorImpl<SDValue> &OutVals,
6502 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6503 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6504 ImmutableCallSite CS) const {
6505 unsigned NumOps = Outs.size();
6507 EVT PtrVT = getPointerTy(DAG.getDataLayout());
6508 bool isPPC64 = PtrVT == MVT::i64;
6509 unsigned PtrByteSize = isPPC64 ? 8 : 4;
6511 MachineFunction &MF = DAG.getMachineFunction();
6513 // Mark this function as potentially containing a function that contains a
6514 // tail call. As a consequence the frame pointer will be used for dynamicalloc
6515 // and restoring the callers stack pointer in this functions epilog. This is
6516 // done because by tail calling the called function might overwrite the value
6517 // in this function's (MF) stack pointer stack slot 0(SP).
6518 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6519 CallConv == CallingConv::Fast)
6520 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6522 // Count how many bytes are to be pushed on the stack, including the linkage
6523 // area, and parameter passing area. We start with 24/48 bytes, which is
6524 // prereserved space for [SP][CR][LR][3 x unused].
6525 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6526 unsigned NumBytes = LinkageSize;
6528 // Add up all the space actually used.
6529 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6530 // they all go in registers, but we must reserve stack space for them for
6531 // possible use by the caller. In varargs or 64-bit calls, parameters are
6532 // assigned stack space in order, with padding so Altivec parameters are
6534 unsigned nAltivecParamsAtEnd = 0;
6535 for (unsigned i = 0; i != NumOps; ++i) {
6536 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6537 EVT ArgVT = Outs[i].VT;
6538 // Varargs Altivec parameters are padded to a 16 byte boundary.
6539 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6540 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6541 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6542 if (!isVarArg && !isPPC64) {
6543 // Non-varargs Altivec parameters go after all the non-Altivec
6544 // parameters; handle those later so we know how much padding we need.
6545 nAltivecParamsAtEnd++;
6548 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6549 NumBytes = ((NumBytes+15)/16)*16;
6551 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6554 // Allow for Altivec parameters at the end, if needed.
6555 if (nAltivecParamsAtEnd) {
6556 NumBytes = ((NumBytes+15)/16)*16;
6557 NumBytes += 16*nAltivecParamsAtEnd;
6560 // The prolog code of the callee may store up to 8 GPR argument registers to
6561 // the stack, allowing va_start to index over them in memory if its varargs.
6562 // Because we cannot tell if this is needed on the caller side, we have to
6563 // conservatively assume that it is needed. As such, make sure we have at
6564 // least enough stack space for the caller to store the 8 GPRs.
6565 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6567 // Tail call needs the stack to be aligned.
6568 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6569 CallConv == CallingConv::Fast)
6570 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6572 // Calculate by how many bytes the stack has to be adjusted in case of tail
6573 // call optimization.
6574 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
6576 // To protect arguments on the stack from being clobbered in a tail call,
6577 // force all the loads to happen before doing any other lowering.
6579 Chain = DAG.getStackArgumentTokenFactor(Chain);
6581 // Adjust the stack pointer for the new arguments...
6582 // These operations are automatically eliminated by the prolog/epilog pass
6583 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6584 SDValue CallSeqStart = Chain;
6586 // Load the return address and frame pointer so it can be move somewhere else
6589 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6591 // Set up a copy of the stack pointer for use loading and storing any
6592 // arguments that may not fit in the registers available for argument
6596 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6598 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6600 // Figure out which arguments are going to go in registers, and which in
6601 // memory. Also, if this is a vararg function, floating point operations
6602 // must be stored to our stack, and loaded into integer regs as well, if
6603 // any integer regs are available for argument passing.
6604 unsigned ArgOffset = LinkageSize;
6605 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6607 static const MCPhysReg GPR_32[] = { // 32-bit registers.
6608 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6609 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6611 static const MCPhysReg GPR_64[] = { // 64-bit registers.
6612 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6613 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6615 static const MCPhysReg VR[] = {
6616 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6617 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6619 const unsigned NumGPRs = array_lengthof(GPR_32);
6620 const unsigned NumFPRs = 13;
6621 const unsigned NumVRs = array_lengthof(VR);
6623 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6625 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6626 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6628 SmallVector<SDValue, 8> MemOpChains;
6629 for (unsigned i = 0; i != NumOps; ++i) {
6630 SDValue Arg = OutVals[i];
6631 ISD::ArgFlagsTy Flags = Outs[i].Flags;
6633 // PtrOff will be used to store the current argument to the stack if a
6634 // register cannot be found for it.
6637 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6639 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6641 // On PPC64, promote integers to 64-bit values.
6642 if (isPPC64 && Arg.getValueType() == MVT::i32) {
6643 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6644 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6645 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6648 // FIXME memcpy is used way more than necessary. Correctness first.
6649 // Note: "by value" is code for passing a structure by value, not
6651 if (Flags.isByVal()) {
6652 unsigned Size = Flags.getByValSize();
6653 // Very small objects are passed right-justified. Everything else is
6654 // passed left-justified.
6655 if (Size==1 || Size==2) {
6656 EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6657 if (GPR_idx != NumGPRs) {
6658 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6659 MachinePointerInfo(), VT);
6660 MemOpChains.push_back(Load.getValue(1));
6661 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6663 ArgOffset += PtrByteSize;
6665 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6666 PtrOff.getValueType());
6667 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6668 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6671 ArgOffset += PtrByteSize;
6675 // Copy entire object into memory. There are cases where gcc-generated
6676 // code assumes it is there, even if it could be put entirely into
6677 // registers. (This is not what the doc says.)
6678 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6682 // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6683 // copy the pieces of the object that fit into registers from the
6684 // parameter save area.
6685 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6686 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6687 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6688 if (GPR_idx != NumGPRs) {
6690 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6691 MemOpChains.push_back(Load.getValue(1));
6692 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6693 ArgOffset += PtrByteSize;
6695 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6702 switch (Arg.getSimpleValueType().SimpleTy) {
6703 default: llvm_unreachable("Unexpected ValueType for argument!");
6707 if (GPR_idx != NumGPRs) {
6708 if (Arg.getValueType() == MVT::i1)
6709 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6711 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6713 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6714 isPPC64, isTailCall, false, MemOpChains,
6715 TailCallArguments, dl);
6717 ArgOffset += PtrByteSize;
6721 if (FPR_idx != NumFPRs) {
6722 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6726 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6727 MemOpChains.push_back(Store);
6729 // Float varargs are always shadowed in available integer registers
6730 if (GPR_idx != NumGPRs) {
6732 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6733 MemOpChains.push_back(Load.getValue(1));
6734 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6736 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6737 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6738 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6740 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6741 MemOpChains.push_back(Load.getValue(1));
6742 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6745 // If we have any FPRs remaining, we may also have GPRs remaining.
6746 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6748 if (GPR_idx != NumGPRs)
6750 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6751 !isPPC64) // PPC64 has 64-bit GPR's obviously :)
6755 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6756 isPPC64, isTailCall, false, MemOpChains,
6757 TailCallArguments, dl);
6761 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6768 // These go aligned on the stack, or in the corresponding R registers
6769 // when within range. The Darwin PPC ABI doc claims they also go in
6770 // V registers; in fact gcc does this only for arguments that are
6771 // prototyped, not for those that match the ... We do it for all
6772 // arguments, seems to work.
6773 while (ArgOffset % 16 !=0) {
6774 ArgOffset += PtrByteSize;
6775 if (GPR_idx != NumGPRs)
6778 // We could elide this store in the case where the object fits
6779 // entirely in R registers. Maybe later.
6780 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6781 DAG.getConstant(ArgOffset, dl, PtrVT));
6783 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6784 MemOpChains.push_back(Store);
6785 if (VR_idx != NumVRs) {
6787 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6788 MemOpChains.push_back(Load.getValue(1));
6789 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6792 for (unsigned i=0; i<16; i+=PtrByteSize) {
6793 if (GPR_idx == NumGPRs)
6795 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6796 DAG.getConstant(i, dl, PtrVT));
6798 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6799 MemOpChains.push_back(Load.getValue(1));
6800 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6805 // Non-varargs Altivec params generally go in registers, but have
6806 // stack space allocated at the end.
6807 if (VR_idx != NumVRs) {
6808 // Doesn't have GPR space allocated.
6809 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6810 } else if (nAltivecParamsAtEnd==0) {
6811 // We are emitting Altivec params in order.
6812 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6813 isPPC64, isTailCall, true, MemOpChains,
6814 TailCallArguments, dl);
6820 // If all Altivec parameters fit in registers, as they usually do,
6821 // they get stack space following the non-Altivec parameters. We
6822 // don't track this here because nobody below needs it.
6823 // If there are more Altivec parameters than fit in registers emit
6825 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6827 // Offset is aligned; skip 1st 12 params which go in V registers.
6828 ArgOffset = ((ArgOffset+15)/16)*16;
6830 for (unsigned i = 0; i != NumOps; ++i) {
6831 SDValue Arg = OutVals[i];
6832 EVT ArgType = Outs[i].VT;
6833 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
6834 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
6837 // We are emitting Altivec params in order.
6838 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6839 isPPC64, isTailCall, true, MemOpChains,
6840 TailCallArguments, dl);
6847 if (!MemOpChains.empty())
6848 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6850 // On Darwin, R12 must contain the address of an indirect callee. This does
6851 // not mean the MTCTR instruction must use R12; it's easier to model this as
6852 // an extra parameter, so do that.
6854 !isFunctionGlobalAddress(Callee) &&
6855 !isa<ExternalSymbolSDNode>(Callee) &&
6856 !isBLACompatibleAddress(Callee, DAG))
6857 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
6858 PPC::R12), Callee));
6860 // Build a sequence of copy-to-reg nodes chained together with token chain
6861 // and flag operands which copy the outgoing args into the appropriate regs.
6863 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6864 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6865 RegsToPass[i].second, InFlag);
6866 InFlag = Chain.getValue(1);
6870 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6873 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6874 /* unused except on PPC64 ELFv1 */ false, DAG,
6875 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6876 NumBytes, Ins, InVals, CS);
6879 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6880 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6883 if (ValVT == MVT::f128)
6884 report_fatal_error("f128 is unimplemented on AIX.");
6886 if (ArgFlags.isByVal())
6887 report_fatal_error("Passing structure by value is unimplemented.");
6889 if (ArgFlags.isNest())
6890 report_fatal_error("Nest arguments are unimplemented.");
6892 if (ValVT.isVector() || LocVT.isVector())
6893 report_fatal_error("Vector arguments are unimplemented on AIX.");
6895 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6896 State.getMachineFunction().getSubtarget());
6897 const bool IsPPC64 = Subtarget.isPPC64();
6898 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6900 static const MCPhysReg GPR_32[] = {// 32-bit registers.
6901 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6902 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6903 static const MCPhysReg GPR_64[] = {// 64-bit registers.
6904 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6905 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6907 // Arguments always reserve parameter save area.
6908 switch (ValVT.SimpleTy) {
6910 report_fatal_error("Unhandled value type for argument.");
6912 // i64 arguments should have been split to i32 for PPC32.
6913 assert(IsPPC64 && "PPC32 should have split i64 values.");
6917 State.AllocateStack(PtrByteSize, PtrByteSize);
6918 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6919 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6920 // Promote integers if needed.
6921 if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
6922 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6923 : CCValAssign::LocInfo::ZExt;
6924 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6927 report_fatal_error("Handling of placing parameters on the stack is "
6933 // Parameter save area (PSA) is reserved even if the float passes in fpr.
6934 const unsigned StoreSize = LocVT.getStoreSize();
6935 // Floats are always 4-byte aligned in the PSA on AIX.
6936 // This includes f64 in 64-bit mode for ABI compatibility.
6937 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4);
6938 if (unsigned Reg = State.AllocateReg(FPR))
6939 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6941 report_fatal_error("Handling of placing parameters on the stack is "
6944 // AIX requires that GPRs are reserved for float arguments.
6945 // Successfully reserved GPRs are only initialized for vararg calls.
6946 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6947 for (unsigned I = 0; I < StoreSize; I += PtrByteSize) {
6948 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6949 if (State.isVarArg()) {
6950 // Custom handling is required for:
6951 // f64 in PPC32 needs to be split into 2 GPRs.
6952 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6954 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6956 } else if (State.isVarArg()) {
6957 report_fatal_error("Handling of placing parameters on the stack is "
6968 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6970 assert((IsPPC64 || SVT != MVT::i64) &&
6971 "i64 should have been split for 32-bit codegen.");
6975 report_fatal_error("Unexpected value type for formal argument");
6979 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6981 return &PPC::F4RCRegClass;
6983 return &PPC::F8RCRegClass;
6987 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6988 SelectionDAG &DAG, SDValue ArgValue,
6989 MVT LocVT, const SDLoc &dl) {
6990 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6991 assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
6994 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6995 DAG.getValueType(ValVT));
6996 else if (Flags.isZExt())
6997 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6998 DAG.getValueType(ValVT));
7000 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7003 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7004 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7005 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7006 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7008 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7009 CallConv == CallingConv::Fast) &&
7010 "Unexpected calling convention!");
7013 report_fatal_error("This call type is unimplemented on AIX.");
7015 if (getTargetMachine().Options.GuaranteedTailCallOpt)
7016 report_fatal_error("Tail call support is unimplemented on AIX.");
7019 report_fatal_error("Soft float support is unimplemented on AIX.");
7021 const PPCSubtarget &Subtarget =
7022 static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7023 if (Subtarget.hasQPX())
7024 report_fatal_error("QPX support is not supported on AIX.");
7026 const bool IsPPC64 = Subtarget.isPPC64();
7027 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7029 // Assign locations to all of the incoming arguments.
7030 SmallVector<CCValAssign, 16> ArgLocs;
7031 MachineFunction &MF = DAG.getMachineFunction();
7032 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7034 // Reserve space for the linkage area on the stack.
7035 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7036 // On AIX a minimum of 8 words is saved to the parameter save area.
7037 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7038 CCInfo.AllocateStack(LinkageSize + MinParameterSaveArea, PtrByteSize);
7039 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7042 CCValAssign &VA = ArgLocs[i];
7044 ISD::ArgFlagsTy Flags = Ins[i].Flags;
7045 if (VA.isRegLoc()) {
7046 EVT ValVT = VA.getValVT();
7047 MVT LocVT = VA.getLocVT();
7048 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7050 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7051 ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7052 if (ValVT.isScalarInteger() &&
7053 (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7055 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7057 InVals.push_back(ArgValue);
7059 report_fatal_error("Handling of formal arguments on the stack is "
7064 // Area that is at least reserved in the caller of this function.
7065 unsigned MinReservedArea = CCInfo.getNextStackOffset();
7067 // Set the size that is at least reserved in caller of this function. Tail
7068 // call optimized function's reserved stack space needs to be aligned so
7069 // that taking the difference between two stack areas will result in an
7072 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
7073 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7074 FuncInfo->setMinReservedArea(MinReservedArea);
7079 SDValue PPCTargetLowering::LowerCall_AIX(
7080 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
7081 bool isTailCall, bool isPatchPoint,
7082 const SmallVectorImpl<ISD::OutputArg> &Outs,
7083 const SmallVectorImpl<SDValue> &OutVals,
7084 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7085 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7086 ImmutableCallSite CS) const {
7088 assert((CallConv == CallingConv::C ||
7089 CallConv == CallingConv::Cold ||
7090 CallConv == CallingConv::Fast) && "Unexpected calling convention!");
7093 report_fatal_error("This call type is unimplemented on AIX.");
7095 const PPCSubtarget& Subtarget =
7096 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7097 if (Subtarget.hasQPX())
7098 report_fatal_error("QPX is not supported on AIX.");
7099 if (Subtarget.hasAltivec())
7100 report_fatal_error("Altivec support is unimplemented on AIX.");
7102 MachineFunction &MF = DAG.getMachineFunction();
7103 SmallVector<CCValAssign, 16> ArgLocs;
7104 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7106 // Reserve space for the linkage save area (LSA) on the stack.
7107 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7108 // [SP][CR][LR][2 x reserved][TOC].
7109 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7110 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7111 const bool IsPPC64 = Subtarget.isPPC64();
7112 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7113 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
7114 CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7116 // The prolog code of the callee may store up to 8 GPR argument registers to
7117 // the stack, allowing va_start to index over them in memory if the callee
7119 // Because we cannot tell if this is needed on the caller side, we have to
7120 // conservatively assume that it is needed. As such, make sure we have at
7121 // least enough stack space for the caller to store the 8 GPRs.
7122 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7123 const unsigned NumBytes = LinkageSize + MinParameterSaveAreaSize;
7125 // Adjust the stack pointer for the new arguments...
7126 // These operations are automatically eliminated by the prolog/epilog pass.
7127 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7128 SDValue CallSeqStart = Chain;
7130 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7132 for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7133 CCValAssign &VA = ArgLocs[I++];
7136 report_fatal_error("Handling of placing parameters on the stack is "
7140 "Unexpected non-register location for function call argument.");
7142 SDValue Arg = OutVals[VA.getValNo()];
7144 if (!VA.needsCustom()) {
7145 switch (VA.getLocInfo()) {
7147 report_fatal_error("Unexpected argument extension type.");
7148 case CCValAssign::Full:
7150 case CCValAssign::ZExt:
7151 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7153 case CCValAssign::SExt:
7154 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7157 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7162 // Custom handling is used for GPR initializations for vararg float
7164 assert(isVarArg && VA.getValVT().isFloatingPoint() &&
7165 VA.getLocVT().isInteger() &&
7166 "Unexpected custom register handling for calling convention.");
7169 DAG.getBitcast(MVT::getIntegerVT(VA.getValVT().getSizeInBits()), Arg);
7171 if (Arg.getValueType().getStoreSize() == VA.getLocVT().getStoreSize())
7172 // f32 in 32-bit GPR
7173 // f64 in 64-bit GPR
7174 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7175 else if (Arg.getValueType().getSizeInBits() < VA.getLocVT().getSizeInBits())
7176 // f32 in 64-bit GPR.
7177 RegsToPass.push_back(std::make_pair(
7178 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, VA.getLocVT())));
7180 // f64 in two 32-bit GPRs
7181 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7182 assert(Arg.getValueType() == MVT::f64 && isVarArg && !IsPPC64 &&
7183 "Unexpected custom register for argument!");
7184 CCValAssign &GPR1 = VA;
7185 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7186 DAG.getConstant(32, dl, MVT::i8));
7187 RegsToPass.push_back(std::make_pair(
7188 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7189 assert(I != E && "A second custom GPR is expected!");
7190 CCValAssign &GPR2 = ArgLocs[I++];
7191 assert(GPR2.isRegLoc() && GPR2.getValNo() == GPR1.getValNo() &&
7192 GPR2.needsCustom() && "A second custom GPR is expected!");
7193 RegsToPass.push_back(std::make_pair(
7194 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7198 // For indirect calls, we need to save the TOC base to the stack for
7199 // restoration after the call.
7200 if (!isTailCall && !isPatchPoint &&
7201 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) {
7202 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7203 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7204 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7205 const unsigned TOCSaveOffset =
7206 Subtarget.getFrameLowering()->getTOCSaveOffset();
7208 setUsesTOCBasePtr(DAG);
7209 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7210 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7211 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7212 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7213 Chain = DAG.getStore(
7214 Val.getValue(1), dl, Val, AddPtr,
7215 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7218 // Build a sequence of copy-to-reg nodes chained together with token chain
7219 // and flag operands which copy the outgoing args into the appropriate regs.
7221 for (auto Reg : RegsToPass) {
7222 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7223 InFlag = Chain.getValue(1);
7226 const int SPDiff = 0;
7227 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
7228 /* unused except on PPC64 ELFv1 */ false, DAG, RegsToPass,
7229 InFlag, Chain, CallSeqStart, Callee, SPDiff, NumBytes, Ins,
7234 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7235 MachineFunction &MF, bool isVarArg,
7236 const SmallVectorImpl<ISD::OutputArg> &Outs,
7237 LLVMContext &Context) const {
7238 SmallVector<CCValAssign, 16> RVLocs;
7239 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7240 return CCInfo.CheckReturn(
7241 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7247 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7249 const SmallVectorImpl<ISD::OutputArg> &Outs,
7250 const SmallVectorImpl<SDValue> &OutVals,
7251 const SDLoc &dl, SelectionDAG &DAG) const {
7252 SmallVector<CCValAssign, 16> RVLocs;
7253 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7255 CCInfo.AnalyzeReturn(Outs,
7256 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7261 SmallVector<SDValue, 4> RetOps(1, Chain);
7263 // Copy the result values into the output registers.
7264 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7265 CCValAssign &VA = RVLocs[i];
7266 assert(VA.isRegLoc() && "Can only return in registers!");
7268 SDValue Arg = OutVals[RealResIdx];
7270 switch (VA.getLocInfo()) {
7271 default: llvm_unreachable("Unknown loc info!");
7272 case CCValAssign::Full: break;
7273 case CCValAssign::AExt:
7274 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7276 case CCValAssign::ZExt:
7277 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7279 case CCValAssign::SExt:
7280 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7283 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7284 bool isLittleEndian = Subtarget.isLittleEndian();
7285 // Legalize ret f64 -> ret 2 x i32.
7287 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7288 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7289 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7290 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7291 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7292 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7293 Flag = Chain.getValue(1);
7294 VA = RVLocs[++i]; // skip ahead to next loc
7295 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7297 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7298 Flag = Chain.getValue(1);
7299 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7302 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
7303 const MCPhysReg *I =
7304 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
7308 if (PPC::G8RCRegClass.contains(*I))
7309 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
7310 else if (PPC::F8RCRegClass.contains(*I))
7311 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
7312 else if (PPC::CRRCRegClass.contains(*I))
7313 RetOps.push_back(DAG.getRegister(*I, MVT::i1));
7314 else if (PPC::VRRCRegClass.contains(*I))
7315 RetOps.push_back(DAG.getRegister(*I, MVT::Other));
7317 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
7321 RetOps[0] = Chain; // Update chain.
7323 // Add the flag if we have it.
7325 RetOps.push_back(Flag);
7327 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7331 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7332 SelectionDAG &DAG) const {
7335 // Get the correct type for integers.
7336 EVT IntVT = Op.getValueType();
7339 SDValue Chain = Op.getOperand(0);
7340 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7341 // Build a DYNAREAOFFSET node.
7342 SDValue Ops[2] = {Chain, FPSIdx};
7343 SDVTList VTs = DAG.getVTList(IntVT);
7344 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7347 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7348 SelectionDAG &DAG) const {
7349 // When we pop the dynamic allocation we need to restore the SP link.
7352 // Get the correct type for pointers.
7353 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7355 // Construct the stack pointer operand.
7356 bool isPPC64 = Subtarget.isPPC64();
7357 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7358 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7360 // Get the operands for the STACKRESTORE.
7361 SDValue Chain = Op.getOperand(0);
7362 SDValue SaveSP = Op.getOperand(1);
7364 // Load the old link SP.
7365 SDValue LoadLinkSP =
7366 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7368 // Restore the stack pointer.
7369 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7371 // Store the old link SP.
7372 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7375 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7376 MachineFunction &MF = DAG.getMachineFunction();
7377 bool isPPC64 = Subtarget.isPPC64();
7378 EVT PtrVT = getPointerTy(MF.getDataLayout());
7380 // Get current frame pointer save index. The users of this index will be
7381 // primarily DYNALLOC instructions.
7382 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7383 int RASI = FI->getReturnAddrSaveIndex();
7385 // If the frame pointer save index hasn't been defined yet.
7387 // Find out what the fix offset of the frame pointer save area.
7388 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7389 // Allocate the frame index for frame pointer save area.
7390 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7392 FI->setReturnAddrSaveIndex(RASI);
7394 return DAG.getFrameIndex(RASI, PtrVT);
7398 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7399 MachineFunction &MF = DAG.getMachineFunction();
7400 bool isPPC64 = Subtarget.isPPC64();
7401 EVT PtrVT = getPointerTy(MF.getDataLayout());
7403 // Get current frame pointer save index. The users of this index will be
7404 // primarily DYNALLOC instructions.
7405 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7406 int FPSI = FI->getFramePointerSaveIndex();
7408 // If the frame pointer save index hasn't been defined yet.
7410 // Find out what the fix offset of the frame pointer save area.
7411 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7412 // Allocate the frame index for frame pointer save area.
7413 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7415 FI->setFramePointerSaveIndex(FPSI);
7417 return DAG.getFrameIndex(FPSI, PtrVT);
7420 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7421 SelectionDAG &DAG) const {
7423 SDValue Chain = Op.getOperand(0);
7424 SDValue Size = Op.getOperand(1);
7427 // Get the correct type for pointers.
7428 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7430 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7431 DAG.getConstant(0, dl, PtrVT), Size);
7432 // Construct a node for the frame pointer save index.
7433 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7434 // Build a DYNALLOC node.
7435 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7436 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7437 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7440 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7441 SelectionDAG &DAG) const {
7442 MachineFunction &MF = DAG.getMachineFunction();
7444 bool isPPC64 = Subtarget.isPPC64();
7445 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7447 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7448 return DAG.getFrameIndex(FI, PtrVT);
7451 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7452 SelectionDAG &DAG) const {
7454 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7455 DAG.getVTList(MVT::i32, MVT::Other),
7456 Op.getOperand(0), Op.getOperand(1));
7459 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7460 SelectionDAG &DAG) const {
7462 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7463 Op.getOperand(0), Op.getOperand(1));
7466 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7467 if (Op.getValueType().isVector())
7468 return LowerVectorLoad(Op, DAG);
7470 assert(Op.getValueType() == MVT::i1 &&
7471 "Custom lowering only for i1 loads");
7473 // First, load 8 bits into 32 bits, then truncate to 1 bit.
7476 LoadSDNode *LD = cast<LoadSDNode>(Op);
7478 SDValue Chain = LD->getChain();
7479 SDValue BasePtr = LD->getBasePtr();
7480 MachineMemOperand *MMO = LD->getMemOperand();
7483 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7484 BasePtr, MVT::i8, MMO);
7485 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7487 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7488 return DAG.getMergeValues(Ops, dl);
7491 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7492 if (Op.getOperand(1).getValueType().isVector())
7493 return LowerVectorStore(Op, DAG);
7495 assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7496 "Custom lowering only for i1 stores");
7498 // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7501 StoreSDNode *ST = cast<StoreSDNode>(Op);
7503 SDValue Chain = ST->getChain();
7504 SDValue BasePtr = ST->getBasePtr();
7505 SDValue Value = ST->getValue();
7506 MachineMemOperand *MMO = ST->getMemOperand();
7508 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7510 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7513 // FIXME: Remove this once the ANDI glue bug is fixed:
7514 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7515 assert(Op.getValueType() == MVT::i1 &&
7516 "Custom lowering only for i1 results");
7519 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7522 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7523 SelectionDAG &DAG) const {
7525 // Implements a vector truncate that fits in a vector register as a shuffle.
7526 // We want to legalize vector truncates down to where the source fits in
7527 // a vector register (and target is therefore smaller than vector register
7528 // size). At that point legalization will try to custom lower the sub-legal
7529 // result and get here - where we can contain the truncate as a single target
7532 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7533 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7535 // We will implement it for big-endian ordering as this (where x denotes
7537 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7538 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7540 // The same operation in little-endian ordering will be:
7541 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7542 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7544 assert(Op.getValueType().isVector() && "Vector type expected.");
7547 SDValue N1 = Op.getOperand(0);
7548 unsigned SrcSize = N1.getValueType().getSizeInBits();
7549 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
7550 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7552 EVT TrgVT = Op.getValueType();
7553 unsigned TrgNumElts = TrgVT.getVectorNumElements();
7554 EVT EltVT = TrgVT.getVectorElementType();
7555 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7556 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7558 // First list the elements we want to keep.
7559 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7560 SmallVector<int, 16> ShuffV;
7561 if (Subtarget.isLittleEndian())
7562 for (unsigned i = 0; i < TrgNumElts; ++i)
7563 ShuffV.push_back(i * SizeMult);
7565 for (unsigned i = 1; i <= TrgNumElts; ++i)
7566 ShuffV.push_back(i * SizeMult - 1);
7568 // Populate the remaining elements with undefs.
7569 for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7570 // ShuffV.push_back(i + WideNumElts);
7571 ShuffV.push_back(WideNumElts + 1);
7573 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
7574 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
7577 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7579 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7580 // Not FP? Not a fsel.
7581 if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
7582 !Op.getOperand(2).getValueType().isFloatingPoint())
7585 bool HasNoInfs = DAG.getTarget().Options.NoInfsFPMath;
7586 bool HasNoNaNs = DAG.getTarget().Options.NoNaNsFPMath;
7587 // We might be able to do better than this under some circumstances, but in
7588 // general, fsel-based lowering of select is a finite-math-only optimization.
7589 // For more information, see section F.3 of the 2.06 ISA specification.
7590 // With ISA 3.0, we have xsmaxcdp/xsmincdp which are OK to emit even in the
7591 // presence of infinities.
7592 if (!Subtarget.hasP9Vector() && (!HasNoInfs || !HasNoNaNs))
7594 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7596 EVT ResVT = Op.getValueType();
7597 EVT CmpVT = Op.getOperand(0).getValueType();
7598 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7599 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
7602 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7605 // Not a min/max but with finite math, we may still be able to use fsel.
7606 if (HasNoInfs && HasNoNaNs)
7611 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7614 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7618 // TODO: Propagate flags from the select rather than global settings.
7620 Flags.setNoInfs(true);
7621 Flags.setNoNaNs(true);
7623 // If the RHS of the comparison is a 0.0, we don't need to do the
7624 // subtraction at all.
7626 if (isFloatingPointZero(RHS))
7628 default: break; // SETUO etc aren't handled by fsel.
7633 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7634 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7635 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7636 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7637 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7638 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7639 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7642 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7646 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7647 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7648 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7651 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7655 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7656 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7657 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7658 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7663 default: break; // SETUO etc aren't handled by fsel.
7668 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7669 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7670 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7671 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7672 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7673 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7674 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7675 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7678 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7679 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7680 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7681 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7684 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7685 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7686 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7687 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7690 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7691 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7692 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7693 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7696 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7697 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7698 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7699 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7704 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7706 const SDLoc &dl) const {
7707 assert(Op.getOperand(0).getValueType().isFloatingPoint());
7708 SDValue Src = Op.getOperand(0);
7709 if (Src.getValueType() == MVT::f32)
7710 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7713 switch (Op.getSimpleValueType().SimpleTy) {
7714 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7717 Op.getOpcode() == ISD::FP_TO_SINT
7719 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7723 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7724 "i64 FP_TO_UINT is supported only with FPCVT");
7725 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7731 // Convert the FP value to an int value through memory.
7732 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7733 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
7734 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7735 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7736 MachinePointerInfo MPI =
7737 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7739 // Emit a store to the stack slot.
7741 unsigned Alignment = DAG.getEVTAlignment(Tmp.getValueType());
7743 MachineFunction &MF = DAG.getMachineFunction();
7745 MachineMemOperand *MMO =
7746 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
7747 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
7748 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7749 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7751 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
7753 // Result is a load from the stack slot. If loading 4 bytes, make sure to
7754 // add in a bias on big endian.
7755 if (Op.getValueType() == MVT::i32 && !i32Stack) {
7756 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7757 DAG.getConstant(4, dl, FIPtr.getValueType()));
7758 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7764 RLI.Alignment = Alignment;
7767 /// Custom lowers floating point to integer conversions to use
7768 /// the direct move instructions available in ISA 2.07 to avoid the
7769 /// need for load/store combinations.
7770 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7772 const SDLoc &dl) const {
7773 assert(Op.getOperand(0).getValueType().isFloatingPoint());
7774 SDValue Src = Op.getOperand(0);
7776 if (Src.getValueType() == MVT::f32)
7777 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7780 switch (Op.getSimpleValueType().SimpleTy) {
7781 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7784 Op.getOpcode() == ISD::FP_TO_SINT
7786 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
7788 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
7791 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
7792 "i64 FP_TO_UINT is supported only with FPCVT");
7793 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
7796 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
7802 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7803 const SDLoc &dl) const {
7805 // FP to INT conversions are legal for f128.
7806 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128))
7809 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7810 // PPC (the libcall is not available).
7811 if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
7812 if (Op.getValueType() == MVT::i32) {
7813 if (Op.getOpcode() == ISD::FP_TO_SINT) {
7814 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7815 MVT::f64, Op.getOperand(0),
7816 DAG.getIntPtrConstant(0, dl));
7817 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
7818 MVT::f64, Op.getOperand(0),
7819 DAG.getIntPtrConstant(1, dl));
7821 // Add the two halves of the long double in round-to-zero mode.
7822 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7824 // Now use a smaller FP_TO_SINT.
7825 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7827 if (Op.getOpcode() == ISD::FP_TO_UINT) {
7828 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7829 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7830 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
7831 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7832 // FIXME: generated code sucks.
7833 // TODO: Are there fast-math-flags to propagate to this FSUB?
7834 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
7835 Op.getOperand(0), Tmp);
7836 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7837 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
7838 DAG.getConstant(0x80000000, dl, MVT::i32));
7839 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
7841 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
7849 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7850 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7853 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7855 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7856 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
7859 // We're trying to insert a regular store, S, and then a load, L. If the
7860 // incoming value, O, is a load, we might just be able to have our load use the
7861 // address used by O. However, we don't know if anything else will store to
7862 // that address before we can load from it. To prevent this situation, we need
7863 // to insert our load, L, into the chain as a peer of O. To do this, we give L
7864 // the same chain operand as O, we create a token factor from the chain results
7865 // of O and L, and we replace all uses of O's chain result with that token
7866 // factor (see spliceIntoChain below for this last part).
7867 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
7870 ISD::LoadExtType ET) const {
7872 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
7873 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
7874 if (ET == ISD::NON_EXTLOAD &&
7875 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
7876 isOperationLegalOrCustom(Op.getOpcode(),
7877 Op.getOperand(0).getValueType())) {
7879 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7883 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
7884 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
7885 LD->isNonTemporal())
7887 if (LD->getMemoryVT() != MemVT)
7890 RLI.Ptr = LD->getBasePtr();
7891 if (LD->isIndexed() && !LD->getOffset().isUndef()) {
7892 assert(LD->getAddressingMode() == ISD::PRE_INC &&
7893 "Non-pre-inc AM on PPC?");
7894 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
7898 RLI.Chain = LD->getChain();
7899 RLI.MPI = LD->getPointerInfo();
7900 RLI.IsDereferenceable = LD->isDereferenceable();
7901 RLI.IsInvariant = LD->isInvariant();
7902 RLI.Alignment = LD->getAlignment();
7903 RLI.AAInfo = LD->getAAInfo();
7904 RLI.Ranges = LD->getRanges();
7906 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
7910 // Given the head of the old chain, ResChain, insert a token factor containing
7911 // it and NewResChain, and make users of ResChain now be users of that token
7913 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
7914 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
7915 SDValue NewResChain,
7916 SelectionDAG &DAG) const {
7920 SDLoc dl(NewResChain);
7922 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
7923 NewResChain, DAG.getUNDEF(MVT::Other));
7924 assert(TF.getNode() != NewResChain.getNode() &&
7925 "A new TF really is required here");
7927 DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
7928 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
7931 /// Analyze profitability of direct move
7932 /// prefer float load to int load plus direct move
7933 /// when there is no integer use of int load
7934 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
7935 SDNode *Origin = Op.getOperand(0).getNode();
7936 if (Origin->getOpcode() != ISD::LOAD)
7939 // If there is no LXSIBZX/LXSIHZX, like Power8,
7940 // prefer direct move if the memory size is 1 or 2 bytes.
7941 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
7942 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
7945 for (SDNode::use_iterator UI = Origin->use_begin(),
7946 UE = Origin->use_end();
7949 // Only look at the users of the loaded value.
7950 if (UI.getUse().get().getResNo() != 0)
7953 if (UI->getOpcode() != ISD::SINT_TO_FP &&
7954 UI->getOpcode() != ISD::UINT_TO_FP)
7961 /// Custom lowers integer to floating point conversions to use
7962 /// the direct move instructions available in ISA 2.07 to avoid the
7963 /// need for load/store combinations.
7964 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
7966 const SDLoc &dl) const {
7967 assert((Op.getValueType() == MVT::f32 ||
7968 Op.getValueType() == MVT::f64) &&
7969 "Invalid floating point type as target of conversion");
7970 assert(Subtarget.hasFPCVT() &&
7971 "Int to FP conversions with direct moves require FPCVT");
7973 SDValue Src = Op.getOperand(0);
7974 bool SinglePrec = Op.getValueType() == MVT::f32;
7975 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
7976 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
7977 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
7978 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
7981 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
7983 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7986 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
7987 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
7993 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
7995 EVT VecVT = Vec.getValueType();
7996 assert(VecVT.isVector() && "Expected a vector type.");
7997 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
7999 EVT EltVT = VecVT.getVectorElementType();
8000 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8001 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8003 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8004 SmallVector<SDValue, 16> Ops(NumConcat);
8006 SDValue UndefVec = DAG.getUNDEF(VecVT);
8007 for (unsigned i = 1; i < NumConcat; ++i)
8010 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8013 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8014 const SDLoc &dl) const {
8016 unsigned Opc = Op.getOpcode();
8017 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8018 "Unexpected conversion type");
8019 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8020 "Supports conversions to v2f64/v4f32 only.");
8022 bool SignedConv = Opc == ISD::SINT_TO_FP;
8023 bool FourEltRes = Op.getValueType() == MVT::v4f32;
8025 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8026 EVT WideVT = Wide.getValueType();
8027 unsigned WideNumElts = WideVT.getVectorNumElements();
8028 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8030 SmallVector<int, 16> ShuffV;
8031 for (unsigned i = 0; i < WideNumElts; ++i)
8032 ShuffV.push_back(i + WideNumElts);
8034 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8035 int SaveElts = FourEltRes ? 4 : 2;
8036 if (Subtarget.isLittleEndian())
8037 for (int i = 0; i < SaveElts; i++)
8038 ShuffV[i * Stride] = i;
8040 for (int i = 1; i <= SaveElts; i++)
8041 ShuffV[i * Stride - 1] = i - 1;
8043 SDValue ShuffleSrc2 =
8044 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8045 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8047 SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST;
8050 if (!Subtarget.hasP9Altivec() && SignedConv) {
8051 Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8052 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8053 DAG.getValueType(Op.getOperand(0).getValueType()));
8055 Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange);
8057 return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8060 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8061 SelectionDAG &DAG) const {
8064 EVT InVT = Op.getOperand(0).getValueType();
8065 EVT OutVT = Op.getValueType();
8066 if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8067 isOperationCustom(Op.getOpcode(), InVT))
8068 return LowerINT_TO_FPVector(Op, DAG, dl);
8070 // Conversions to f128 are legal.
8071 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128))
8074 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8075 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8078 SDValue Value = Op.getOperand(0);
8079 // The values are now known to be -1 (false) or 1 (true). To convert this
8080 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8081 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8082 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8084 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8086 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8088 if (Op.getValueType() != MVT::v4f64)
8089 Value = DAG.getNode(ISD::FP_ROUND, dl,
8090 Op.getValueType(), Value,
8091 DAG.getIntPtrConstant(1, dl));
8095 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8096 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8099 if (Op.getOperand(0).getValueType() == MVT::i1)
8100 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8101 DAG.getConstantFP(1.0, dl, Op.getValueType()),
8102 DAG.getConstantFP(0.0, dl, Op.getValueType()));
8104 // If we have direct moves, we can do all the conversion, skip the store/load
8105 // however, without FPCVT we can't do most conversions.
8106 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8107 Subtarget.isPPC64() && Subtarget.hasFPCVT())
8108 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8110 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8111 "UINT_TO_FP is supported only with FPCVT");
8113 // If we have FCFIDS, then use it when converting to single-precision.
8114 // Otherwise, convert to double-precision and then round.
8115 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8116 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8118 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8120 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8124 if (Op.getOperand(0).getValueType() == MVT::i64) {
8125 SDValue SINT = Op.getOperand(0);
8126 // When converting to single-precision, we actually need to convert
8127 // to double-precision first and then round to single-precision.
8128 // To avoid double-rounding effects during that operation, we have
8129 // to prepare the input operand. Bits that might be truncated when
8130 // converting to double-precision are replaced by a bit that won't
8131 // be lost at this stage, but is below the single-precision rounding
8134 // However, if -enable-unsafe-fp-math is in effect, accept double
8135 // rounding to avoid the extra overhead.
8136 if (Op.getValueType() == MVT::f32 &&
8137 !Subtarget.hasFPCVT() &&
8138 !DAG.getTarget().Options.UnsafeFPMath) {
8140 // Twiddle input to make sure the low 11 bits are zero. (If this
8141 // is the case, we are guaranteed the value will fit into the 53 bit
8142 // mantissa of an IEEE double-precision value without rounding.)
8143 // If any of those low 11 bits were not zero originally, make sure
8144 // bit 12 (value 2048) is set instead, so that the final rounding
8145 // to single-precision gets the correct result.
8146 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8147 SINT, DAG.getConstant(2047, dl, MVT::i64));
8148 Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8149 Round, DAG.getConstant(2047, dl, MVT::i64));
8150 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8151 Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8152 Round, DAG.getConstant(-2048, dl, MVT::i64));
8154 // However, we cannot use that value unconditionally: if the magnitude
8155 // of the input value is small, the bit-twiddling we did above might
8156 // end up visibly changing the output. Fortunately, in that case, we
8157 // don't need to twiddle bits since the original input will convert
8158 // exactly to double-precision floating-point already. Therefore,
8159 // construct a conditional to use the original value if the top 11
8160 // bits are all sign-bit copies, and use the rounded value computed
8162 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8163 SINT, DAG.getConstant(53, dl, MVT::i32));
8164 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8165 Cond, DAG.getConstant(1, dl, MVT::i64));
8166 Cond = DAG.getSetCC(dl, MVT::i32,
8167 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8169 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8175 MachineFunction &MF = DAG.getMachineFunction();
8176 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8177 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8178 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8179 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8180 } else if (Subtarget.hasLFIWAX() &&
8181 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8182 MachineMemOperand *MMO =
8183 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8184 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8185 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8186 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8187 DAG.getVTList(MVT::f64, MVT::Other),
8188 Ops, MVT::i32, MMO);
8189 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8190 } else if (Subtarget.hasFPCVT() &&
8191 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8192 MachineMemOperand *MMO =
8193 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8194 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8195 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8196 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8197 DAG.getVTList(MVT::f64, MVT::Other),
8198 Ops, MVT::i32, MMO);
8199 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8200 } else if (((Subtarget.hasLFIWAX() &&
8201 SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8202 (Subtarget.hasFPCVT() &&
8203 SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8204 SINT.getOperand(0).getValueType() == MVT::i32) {
8205 MachineFrameInfo &MFI = MF.getFrameInfo();
8206 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8208 int FrameIdx = MFI.CreateStackObject(4, 4, false);
8209 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8212 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8213 MachinePointerInfo::getFixedStack(
8214 DAG.getMachineFunction(), FrameIdx));
8216 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8217 "Expected an i32 store");
8222 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8225 MachineMemOperand *MMO =
8226 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8227 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8228 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8229 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8230 PPCISD::LFIWZX : PPCISD::LFIWAX,
8231 dl, DAG.getVTList(MVT::f64, MVT::Other),
8232 Ops, MVT::i32, MMO);
8234 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8236 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8238 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8239 FP = DAG.getNode(ISD::FP_ROUND, dl,
8240 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8244 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8245 "Unhandled INT_TO_FP type in custom expander!");
8246 // Since we only generate this in 64-bit mode, we can take advantage of
8247 // 64-bit registers. In particular, sign extend the input value into the
8248 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8249 // then lfd it and fcfid it.
8250 MachineFunction &MF = DAG.getMachineFunction();
8251 MachineFrameInfo &MFI = MF.getFrameInfo();
8252 EVT PtrVT = getPointerTy(MF.getDataLayout());
8255 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8258 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8260 int FrameIdx = MFI.CreateStackObject(4, 4, false);
8261 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8264 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8265 MachinePointerInfo::getFixedStack(
8266 DAG.getMachineFunction(), FrameIdx));
8268 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8269 "Expected an i32 store");
8274 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8278 MachineMemOperand *MMO =
8279 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8280 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8281 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8282 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8283 PPCISD::LFIWZX : PPCISD::LFIWAX,
8284 dl, DAG.getVTList(MVT::f64, MVT::Other),
8285 Ops, MVT::i32, MMO);
8287 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8289 assert(Subtarget.isPPC64() &&
8290 "i32->FP without LFIWAX supported only on PPC64");
8292 int FrameIdx = MFI.CreateStackObject(8, 8, false);
8293 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8295 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8298 // STD the extended value into the stack slot.
8299 SDValue Store = DAG.getStore(
8300 DAG.getEntryNode(), dl, Ext64, FIdx,
8301 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8303 // Load the value as a double.
8305 MVT::f64, dl, Store, FIdx,
8306 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8309 // FCFID it and return it.
8310 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8311 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8312 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8313 DAG.getIntPtrConstant(0, dl));
8317 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8318 SelectionDAG &DAG) const {
8321 The rounding mode is in bits 30:31 of FPSR, and has the following
8328 FLT_ROUNDS, on the other hand, expects the following:
8335 To perform the conversion, we do:
8336 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8339 MachineFunction &MF = DAG.getMachineFunction();
8340 EVT VT = Op.getValueType();
8341 EVT PtrVT = getPointerTy(MF.getDataLayout());
8343 // Save FP Control Word to register
8345 MVT::f64, // return register
8346 MVT::Glue // unused in this context
8348 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None);
8350 // Save FP register to stack slot
8351 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false);
8352 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8353 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot,
8354 MachinePointerInfo());
8356 // Load FP Control Word from low 32 bits of stack slot.
8357 SDValue Four = DAG.getConstant(4, dl, PtrVT);
8358 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8359 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo());
8361 // Transform as necessary
8363 DAG.getNode(ISD::AND, dl, MVT::i32,
8364 CWD, DAG.getConstant(3, dl, MVT::i32));
8366 DAG.getNode(ISD::SRL, dl, MVT::i32,
8367 DAG.getNode(ISD::AND, dl, MVT::i32,
8368 DAG.getNode(ISD::XOR, dl, MVT::i32,
8369 CWD, DAG.getConstant(3, dl, MVT::i32)),
8370 DAG.getConstant(3, dl, MVT::i32)),
8371 DAG.getConstant(1, dl, MVT::i32));
8374 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8376 return DAG.getNode((VT.getSizeInBits() < 16 ?
8377 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
8380 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8381 EVT VT = Op.getValueType();
8382 unsigned BitWidth = VT.getSizeInBits();
8384 assert(Op.getNumOperands() == 3 &&
8385 VT == Op.getOperand(1).getValueType() &&
8388 // Expand into a bunch of logical ops. Note that these ops
8389 // depend on the PPC behavior for oversized shift amounts.
8390 SDValue Lo = Op.getOperand(0);
8391 SDValue Hi = Op.getOperand(1);
8392 SDValue Amt = Op.getOperand(2);
8393 EVT AmtVT = Amt.getValueType();
8395 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8396 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8397 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8398 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8399 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8400 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8401 DAG.getConstant(-BitWidth, dl, AmtVT));
8402 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8403 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8404 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8405 SDValue OutOps[] = { OutLo, OutHi };
8406 return DAG.getMergeValues(OutOps, dl);
8409 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8410 EVT VT = Op.getValueType();
8412 unsigned BitWidth = VT.getSizeInBits();
8413 assert(Op.getNumOperands() == 3 &&
8414 VT == Op.getOperand(1).getValueType() &&
8417 // Expand into a bunch of logical ops. Note that these ops
8418 // depend on the PPC behavior for oversized shift amounts.
8419 SDValue Lo = Op.getOperand(0);
8420 SDValue Hi = Op.getOperand(1);
8421 SDValue Amt = Op.getOperand(2);
8422 EVT AmtVT = Amt.getValueType();
8424 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8425 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8426 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8427 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8428 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8429 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8430 DAG.getConstant(-BitWidth, dl, AmtVT));
8431 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8432 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8433 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8434 SDValue OutOps[] = { OutLo, OutHi };
8435 return DAG.getMergeValues(OutOps, dl);
8438 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8440 EVT VT = Op.getValueType();
8441 unsigned BitWidth = VT.getSizeInBits();
8442 assert(Op.getNumOperands() == 3 &&
8443 VT == Op.getOperand(1).getValueType() &&
8446 // Expand into a bunch of logical ops, followed by a select_cc.
8447 SDValue Lo = Op.getOperand(0);
8448 SDValue Hi = Op.getOperand(1);
8449 SDValue Amt = Op.getOperand(2);
8450 EVT AmtVT = Amt.getValueType();
8452 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8453 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8454 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8455 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8456 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8457 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8458 DAG.getConstant(-BitWidth, dl, AmtVT));
8459 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8460 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8461 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8462 Tmp4, Tmp6, ISD::SETLE);
8463 SDValue OutOps[] = { OutLo, OutHi };
8464 return DAG.getMergeValues(OutOps, dl);
8467 //===----------------------------------------------------------------------===//
8468 // Vector related lowering.
8471 /// BuildSplatI - Build a canonical splati of Val with an element size of
8472 /// SplatSize. Cast the result to VT.
8473 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
8474 SelectionDAG &DAG, const SDLoc &dl) {
8475 static const MVT VTys[] = { // canonical VT to use for each size.
8476 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8479 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8481 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
8485 EVT CanonicalVT = VTys[SplatSize-1];
8487 // Build a canonical splat for this value.
8488 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8491 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8492 /// specified intrinsic ID.
8493 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8494 const SDLoc &dl, EVT DestVT = MVT::Other) {
8495 if (DestVT == MVT::Other) DestVT = Op.getValueType();
8496 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8497 DAG.getConstant(IID, dl, MVT::i32), Op);
8500 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8501 /// specified intrinsic ID.
8502 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8503 SelectionDAG &DAG, const SDLoc &dl,
8504 EVT DestVT = MVT::Other) {
8505 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8506 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8507 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8510 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8511 /// specified intrinsic ID.
8512 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8513 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8514 EVT DestVT = MVT::Other) {
8515 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8516 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8517 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8520 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8521 /// amount. The result has the specified value type.
8522 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8523 SelectionDAG &DAG, const SDLoc &dl) {
8524 // Force LHS/RHS to be the right type.
8525 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8526 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8529 for (unsigned i = 0; i != 16; ++i)
8531 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8532 return DAG.getNode(ISD::BITCAST, dl, VT, T);
8535 /// Do we have an efficient pattern in a .td file for this node?
8537 /// \param V - pointer to the BuildVectorSDNode being matched
8538 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8540 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8541 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8542 /// the opposite is true (expansion is beneficial) are:
8543 /// - The node builds a vector out of integers that are not 32 or 64-bits
8544 /// - The node builds a vector out of constants
8545 /// - The node is a "load-and-splat"
8546 /// In all other cases, we will choose to keep the BUILD_VECTOR.
8547 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8550 EVT VecVT = V->getValueType(0);
8551 bool RightType = VecVT == MVT::v2f64 ||
8552 (HasP8Vector && VecVT == MVT::v4f32) ||
8553 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8557 bool IsSplat = true;
8558 bool IsLoad = false;
8559 SDValue Op0 = V->getOperand(0);
8561 // This function is called in a block that confirms the node is not a constant
8562 // splat. So a constant BUILD_VECTOR here means the vector is built out of
8563 // different constants.
8564 if (V->isConstant())
8566 for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8567 if (V->getOperand(i).isUndef())
8569 // We want to expand nodes that represent load-and-splat even if the
8570 // loaded value is a floating point truncation or conversion to int.
8571 if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8572 (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8573 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8574 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8575 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8576 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8577 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8579 // If the operands are different or the input is not a load and has more
8580 // uses than just this BV node, then it isn't a splat.
8581 if (V->getOperand(i) != Op0 ||
8582 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8585 return !(IsSplat && IsLoad);
8588 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8589 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8592 SDValue Op0 = Op->getOperand(0);
8594 if (!EnableQuadPrecision ||
8595 (Op.getValueType() != MVT::f128 ) ||
8596 (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8597 (Op0.getOperand(0).getValueType() != MVT::i64) ||
8598 (Op0.getOperand(1).getValueType() != MVT::i64))
8601 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8605 static const SDValue *getNormalLoadInput(const SDValue &Op) {
8606 const SDValue *InputLoad = &Op;
8607 if (InputLoad->getOpcode() == ISD::BITCAST)
8608 InputLoad = &InputLoad->getOperand(0);
8609 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR)
8610 InputLoad = &InputLoad->getOperand(0);
8611 if (InputLoad->getOpcode() != ISD::LOAD)
8613 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8614 return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8617 // If this is a case we can't handle, return null and let the default
8618 // expansion code take care of it. If we CAN select this case, and if it
8619 // selects to a single instruction, return Op. Otherwise, if we can codegen
8620 // this case more efficiently than a constant pool load, lower it to the
8621 // sequence of ops that should be used.
8622 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8623 SelectionDAG &DAG) const {
8625 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8626 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
8628 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
8629 // We first build an i32 vector, load it into a QPX register,
8630 // then convert it to a floating-point vector and compare it
8631 // to a zero vector to get the boolean result.
8632 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8633 int FrameIdx = MFI.CreateStackObject(16, 16, false);
8634 MachinePointerInfo PtrInfo =
8635 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8636 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8637 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8639 assert(BVN->getNumOperands() == 4 &&
8640 "BUILD_VECTOR for v4i1 does not have 4 operands");
8642 bool IsConst = true;
8643 for (unsigned i = 0; i < 4; ++i) {
8644 if (BVN->getOperand(i).isUndef()) continue;
8645 if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
8653 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
8655 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
8658 for (unsigned i = 0; i < 4; ++i) {
8659 if (BVN->getOperand(i).isUndef())
8660 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
8661 else if (isNullConstant(BVN->getOperand(i)))
8667 Constant *CP = ConstantVector::get(CV);
8668 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()),
8669 16 /* alignment */);
8671 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
8672 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
8673 return DAG.getMemIntrinsicNode(
8674 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
8675 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
8678 SmallVector<SDValue, 4> Stores;
8679 for (unsigned i = 0; i < 4; ++i) {
8680 if (BVN->getOperand(i).isUndef()) continue;
8682 unsigned Offset = 4*i;
8683 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
8684 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
8686 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
8687 if (StoreSize > 4) {
8689 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
8690 PtrInfo.getWithOffset(Offset), MVT::i32));
8692 SDValue StoreValue = BVN->getOperand(i);
8694 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
8696 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
8697 PtrInfo.getWithOffset(Offset)));
8702 if (!Stores.empty())
8703 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
8705 StoreChain = DAG.getEntryNode();
8707 // Now load from v4i32 into the QPX register; this will extend it to
8708 // v4i64 but not yet convert it to a floating point. Nevertheless, this
8709 // is typed as v4f64 because the QPX register integer states are not
8710 // explicitly represented.
8712 SDValue Ops[] = {StoreChain,
8713 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
8715 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
8717 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
8718 dl, VTs, Ops, MVT::v4i32, PtrInfo);
8719 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
8720 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
8723 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
8725 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
8728 // All other QPX vectors are handled by generic code.
8729 if (Subtarget.hasQPX())
8732 // Check if this is a splat of a constant value.
8733 APInt APSplatBits, APSplatUndef;
8734 unsigned SplatBitSize;
8736 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8737 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
8738 SplatBitSize > 32) {
8740 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
8741 // Handle load-and-splat patterns as we have instructions that will do this
8743 if (InputLoad && DAG.isSplatValue(Op, true)) {
8744 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8746 // We have handling for 4 and 8 byte elements.
8747 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8749 // Checking for a single use of this load, we have to check for vector
8750 // width (128 bits) / ElementSize uses (since each operand of the
8751 // BUILD_VECTOR is a separate use of the value.
8752 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
8753 ((Subtarget.hasVSX() && ElementSize == 64) ||
8754 (Subtarget.hasP9Vector() && ElementSize == 32))) {
8756 LD->getChain(), // Chain
8757 LD->getBasePtr(), // Ptr
8758 DAG.getValueType(Op.getValueType()) // VT
8761 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
8762 DAG.getVTList(Op.getValueType(), MVT::Other),
8763 Ops, LD->getMemoryVT(), LD->getMemOperand());
8767 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
8768 // lowered to VSX instructions under certain conditions.
8769 // Without VSX, there is no pattern more efficient than expanding the node.
8770 if (Subtarget.hasVSX() &&
8771 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8772 Subtarget.hasP8Vector()))
8777 unsigned SplatBits = APSplatBits.getZExtValue();
8778 unsigned SplatUndef = APSplatUndef.getZExtValue();
8779 unsigned SplatSize = SplatBitSize / 8;
8781 // First, handle single instruction cases.
8784 if (SplatBits == 0) {
8785 // Canonicalize all zero vectors to be v4i32.
8786 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
8787 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
8788 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
8793 // We have XXSPLTIB for constant splats one byte wide
8794 // FIXME: SplatBits is an unsigned int being cast to an int while passing it
8795 // as an argument to BuildSplatiI. Given SplatSize == 1 it is okay here.
8796 if (Subtarget.hasP9Vector() && SplatSize == 1)
8797 return BuildSplatI(SplatBits, SplatSize, Op.getValueType(), DAG, dl);
8799 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
8800 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
8802 if (SextVal >= -16 && SextVal <= 15)
8803 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
8805 // Two instruction sequences.
8807 // If this value is in the range [-32,30] and is even, use:
8808 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
8809 // If this value is in the range [17,31] and is odd, use:
8810 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
8811 // If this value is in the range [-31,-17] and is odd, use:
8812 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
8813 // Note the last two are three-instruction sequences.
8814 if (SextVal >= -32 && SextVal <= 31) {
8815 // To avoid having these optimizations undone by constant folding,
8816 // we convert to a pseudo that will be expanded later into one of
8818 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
8819 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
8820 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
8821 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
8822 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
8823 if (VT == Op.getValueType())
8826 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
8829 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
8830 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
8832 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
8833 // Make -1 and vspltisw -1:
8834 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
8836 // Make the VSLW intrinsic, computing 0x8000_0000.
8837 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
8840 // xor by OnesV to invert it.
8841 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
8842 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8845 // Check to see if this is a wide variety of vsplti*, binop self cases.
8846 static const signed char SplatCsts[] = {
8847 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
8848 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
8851 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
8852 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
8853 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
8854 int i = SplatCsts[idx];
8856 // Figure out what shift amount will be used by altivec if shifted by i in
8858 unsigned TypeShiftAmt = i & (SplatBitSize-1);
8860 // vsplti + shl self.
8861 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
8862 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8863 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8864 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
8865 Intrinsic::ppc_altivec_vslw
8867 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8868 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8871 // vsplti + srl self.
8872 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8873 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8874 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8875 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
8876 Intrinsic::ppc_altivec_vsrw
8878 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8879 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8882 // vsplti + sra self.
8883 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
8884 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8885 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8886 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
8887 Intrinsic::ppc_altivec_vsraw
8889 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8890 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8893 // vsplti + rol self.
8894 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
8895 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
8896 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
8897 static const unsigned IIDs[] = { // Intrinsic to use for each size.
8898 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
8899 Intrinsic::ppc_altivec_vrlw
8901 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
8902 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
8905 // t = vsplti c, result = vsldoi t, t, 1
8906 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
8907 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8908 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
8909 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8911 // t = vsplti c, result = vsldoi t, t, 2
8912 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
8913 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8914 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
8915 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8917 // t = vsplti c, result = vsldoi t, t, 3
8918 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
8919 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
8920 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
8921 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
8928 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
8929 /// the specified operations to build the shuffle.
8930 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
8931 SDValue RHS, SelectionDAG &DAG,
8933 unsigned OpNum = (PFEntry >> 26) & 0x0F;
8934 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8935 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
8938 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
8950 if (OpNum == OP_COPY) {
8951 if (LHSID == (1*9+2)*9+3) return LHS;
8952 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
8956 SDValue OpLHS, OpRHS;
8957 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
8958 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
8962 default: llvm_unreachable("Unknown i32 permute!");
8964 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
8965 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
8966 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
8967 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
8970 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
8971 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
8972 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
8973 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
8976 for (unsigned i = 0; i != 16; ++i)
8977 ShufIdxs[i] = (i&3)+0;
8980 for (unsigned i = 0; i != 16; ++i)
8981 ShufIdxs[i] = (i&3)+4;
8984 for (unsigned i = 0; i != 16; ++i)
8985 ShufIdxs[i] = (i&3)+8;
8988 for (unsigned i = 0; i != 16; ++i)
8989 ShufIdxs[i] = (i&3)+12;
8992 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
8994 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
8996 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
8998 EVT VT = OpLHS.getValueType();
8999 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9000 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9001 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9002 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9005 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9006 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9008 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9009 SelectionDAG &DAG) const {
9010 const unsigned BytesInVector = 16;
9011 bool IsLE = Subtarget.isLittleEndian();
9013 SDValue V1 = N->getOperand(0);
9014 SDValue V2 = N->getOperand(1);
9015 unsigned ShiftElts = 0, InsertAtByte = 0;
9018 // Shifts required to get the byte we want at element 7.
9019 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
9020 0, 15, 14, 13, 12, 11, 10, 9};
9021 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9022 1, 2, 3, 4, 5, 6, 7, 8};
9024 ArrayRef<int> Mask = N->getMask();
9025 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9027 // For each mask element, find out if we're just inserting something
9028 // from V2 into V1 or vice versa.
9029 // Possible permutations inserting an element from V2 into V1:
9030 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9031 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9033 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9034 // Inserting from V1 into V2 will be similar, except mask range will be
9037 bool FoundCandidate = false;
9038 // If both vector operands for the shuffle are the same vector, the mask
9039 // will contain only elements from the first one and the second one will be
9041 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9042 // Go through the mask of half-words to find an element that's being moved
9043 // from one vector to the other.
9044 for (unsigned i = 0; i < BytesInVector; ++i) {
9045 unsigned CurrentElement = Mask[i];
9046 // If 2nd operand is undefined, we should only look for element 7 in the
9048 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9051 bool OtherElementsInOrder = true;
9052 // Examine the other elements in the Mask to see if they're in original
9054 for (unsigned j = 0; j < BytesInVector; ++j) {
9057 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9058 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
9059 // in which we always assume we're always picking from the 1st operand.
9061 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9062 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9063 OtherElementsInOrder = false;
9067 // If other elements are in original order, we record the number of shifts
9068 // we need to get the element we want into element 7. Also record which byte
9069 // in the vector we should insert into.
9070 if (OtherElementsInOrder) {
9071 // If 2nd operand is undefined, we assume no shifts and no swapping.
9076 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9077 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9078 : BigEndianShifts[CurrentElement & 0xF];
9079 Swap = CurrentElement < BytesInVector;
9081 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9082 FoundCandidate = true;
9087 if (!FoundCandidate)
9090 // Candidate found, construct the proper SDAG sequence with VINSERTB,
9091 // optionally with VECSHL if shift is required.
9097 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9098 DAG.getConstant(ShiftElts, dl, MVT::i32));
9099 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9100 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9102 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9103 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9106 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9107 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9109 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9110 SelectionDAG &DAG) const {
9111 const unsigned NumHalfWords = 8;
9112 const unsigned BytesInVector = NumHalfWords * 2;
9113 // Check that the shuffle is on half-words.
9114 if (!isNByteElemShuffleMask(N, 2, 1))
9117 bool IsLE = Subtarget.isLittleEndian();
9119 SDValue V1 = N->getOperand(0);
9120 SDValue V2 = N->getOperand(1);
9121 unsigned ShiftElts = 0, InsertAtByte = 0;
9124 // Shifts required to get the half-word we want at element 3.
9125 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9126 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9129 uint32_t OriginalOrderLow = 0x1234567;
9130 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9131 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a
9132 // 32-bit space, only need 4-bit nibbles per element.
9133 for (unsigned i = 0; i < NumHalfWords; ++i) {
9134 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9135 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9138 // For each mask element, find out if we're just inserting something
9139 // from V2 into V1 or vice versa. Possible permutations inserting an element
9141 // X, 1, 2, 3, 4, 5, 6, 7
9142 // 0, X, 2, 3, 4, 5, 6, 7
9143 // 0, 1, X, 3, 4, 5, 6, 7
9144 // 0, 1, 2, X, 4, 5, 6, 7
9145 // 0, 1, 2, 3, X, 5, 6, 7
9146 // 0, 1, 2, 3, 4, X, 6, 7
9147 // 0, 1, 2, 3, 4, 5, X, 7
9148 // 0, 1, 2, 3, 4, 5, 6, X
9149 // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9151 bool FoundCandidate = false;
9152 // Go through the mask of half-words to find an element that's being moved
9153 // from one vector to the other.
9154 for (unsigned i = 0; i < NumHalfWords; ++i) {
9155 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9156 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9157 uint32_t MaskOtherElts = ~(0xF << MaskShift);
9158 uint32_t TargetOrder = 0x0;
9160 // If both vector operands for the shuffle are the same vector, the mask
9161 // will contain only elements from the first one and the second one will be
9165 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9166 TargetOrder = OriginalOrderLow;
9168 // Skip if not the correct element or mask of other elements don't equal
9169 // to our expected order.
9170 if (MaskOneElt == VINSERTHSrcElem &&
9171 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9172 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9173 FoundCandidate = true;
9176 } else { // If both operands are defined.
9177 // Target order is [8,15] if the current mask is between [0,7].
9179 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9180 // Skip if mask of other elements don't equal our expected order.
9181 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9182 // We only need the last 3 bits for the number of shifts.
9183 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9184 : BigEndianShifts[MaskOneElt & 0x7];
9185 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9186 Swap = MaskOneElt < NumHalfWords;
9187 FoundCandidate = true;
9193 if (!FoundCandidate)
9196 // Candidate found, construct the proper SDAG sequence with VINSERTH,
9197 // optionally with VECSHL if shift is required.
9202 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9204 // Double ShiftElts because we're left shifting on v16i8 type.
9205 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9206 DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9207 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9208 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9209 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9210 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9212 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9213 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9214 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9215 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9218 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
9219 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
9220 /// return the code it can be lowered into. Worst case, it can always be
9221 /// lowered into a vperm.
9222 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9223 SelectionDAG &DAG) const {
9225 SDValue V1 = Op.getOperand(0);
9226 SDValue V2 = Op.getOperand(1);
9227 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9228 EVT VT = Op.getValueType();
9229 bool isLittleEndian = Subtarget.isLittleEndian();
9231 unsigned ShiftElts, InsertAtByte;
9234 // If this is a load-and-splat, we can do that with a single instruction
9235 // in some cases. However if the load has multiple uses, we don't want to
9236 // combine it because that will just produce multiple loads.
9237 const SDValue *InputLoad = getNormalLoadInput(V1);
9238 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9239 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9240 InputLoad->hasOneUse()) {
9241 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9243 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9245 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9246 // For 4-byte load-and-splat, we need Power9.
9247 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9248 uint64_t Offset = 0;
9250 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9252 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9253 SDValue BasePtr = LD->getBasePtr();
9255 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9256 BasePtr, DAG.getIntPtrConstant(Offset, dl));
9258 LD->getChain(), // Chain
9260 DAG.getValueType(Op.getValueType()) // VT
9263 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9265 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9266 Ops, LD->getMemoryVT(), LD->getMemOperand());
9267 if (LdSplt.getValueType() != SVOp->getValueType(0))
9268 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9272 if (Subtarget.hasP9Vector() &&
9273 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9277 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9278 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9280 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9281 DAG.getConstant(ShiftElts, dl, MVT::i32));
9282 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9283 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9284 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9286 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9287 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9288 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9291 if (Subtarget.hasP9Altivec()) {
9293 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9296 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9300 if (Subtarget.hasVSX() &&
9301 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9304 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9306 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9308 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9309 DAG.getConstant(ShiftElts, dl, MVT::i32));
9310 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9313 if (Subtarget.hasVSX() &&
9314 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9317 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9319 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9321 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9322 DAG.getConstant(ShiftElts, dl, MVT::i32));
9323 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9326 if (Subtarget.hasP9Vector()) {
9327 if (PPC::isXXBRHShuffleMask(SVOp)) {
9328 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9329 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9330 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9331 } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9332 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9333 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9334 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9335 } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9336 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9337 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9338 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9339 } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9340 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9341 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9342 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9346 if (Subtarget.hasVSX()) {
9347 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9348 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9350 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9351 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9352 DAG.getConstant(SplatIdx, dl, MVT::i32));
9353 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9356 // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9357 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9358 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9359 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9360 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9364 if (Subtarget.hasQPX()) {
9365 if (VT.getVectorNumElements() != 4)
9368 if (V2.isUndef()) V2 = V1;
9370 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
9371 if (AlignIdx != -1) {
9372 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
9373 DAG.getConstant(AlignIdx, dl, MVT::i32));
9374 } else if (SVOp->isSplat()) {
9375 int SplatIdx = SVOp->getSplatIndex();
9376 if (SplatIdx >= 4) {
9381 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
9382 DAG.getConstant(SplatIdx, dl, MVT::i32));
9385 // Lower this into a qvgpci/qvfperm pair.
9387 // Compute the qvgpci literal
9389 for (unsigned i = 0; i < 4; ++i) {
9390 int m = SVOp->getMaskElt(i);
9391 unsigned mm = m >= 0 ? (unsigned) m : i;
9392 idx |= mm << (3-i)*3;
9395 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
9396 DAG.getConstant(idx, dl, MVT::i32));
9397 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
9400 // Cases that are handled by instructions that take permute immediates
9401 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9402 // selected by the instruction selector.
9404 if (PPC::isSplatShuffleMask(SVOp, 1) ||
9405 PPC::isSplatShuffleMask(SVOp, 2) ||
9406 PPC::isSplatShuffleMask(SVOp, 4) ||
9407 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9408 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9409 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9410 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9411 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9412 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9413 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9414 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9415 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9416 (Subtarget.hasP8Altivec() && (
9417 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9418 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9419 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9424 // Altivec has a variety of "shuffle immediates" that take two vector inputs
9425 // and produce a fixed permutation. If any of these match, do not lower to
9427 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9428 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9429 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9430 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9431 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9432 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9433 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9434 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9435 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9436 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9437 (Subtarget.hasP8Altivec() && (
9438 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9439 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9440 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9443 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
9444 // perfect shuffle table to emit an optimal matching sequence.
9445 ArrayRef<int> PermMask = SVOp->getMask();
9447 unsigned PFIndexes[4];
9448 bool isFourElementShuffle = true;
9449 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9450 unsigned EltNo = 8; // Start out undef.
9451 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
9452 if (PermMask[i*4+j] < 0)
9453 continue; // Undef, ignore it.
9455 unsigned ByteSource = PermMask[i*4+j];
9456 if ((ByteSource & 3) != j) {
9457 isFourElementShuffle = false;
9462 EltNo = ByteSource/4;
9463 } else if (EltNo != ByteSource/4) {
9464 isFourElementShuffle = false;
9468 PFIndexes[i] = EltNo;
9471 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9472 // perfect shuffle vector to determine if it is cost effective to do this as
9473 // discrete instructions, or whether we should use a vperm.
9474 // For now, we skip this for little endian until such time as we have a
9475 // little-endian perfect shuffle table.
9476 if (isFourElementShuffle && !isLittleEndian) {
9477 // Compute the index in the perfect shuffle table.
9478 unsigned PFTableIndex =
9479 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9481 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9482 unsigned Cost = (PFEntry >> 30);
9484 // Determining when to avoid vperm is tricky. Many things affect the cost
9485 // of vperm, particularly how many times the perm mask needs to be computed.
9486 // For example, if the perm mask can be hoisted out of a loop or is already
9487 // used (perhaps because there are multiple permutes with the same shuffle
9488 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
9489 // the loop requires an extra register.
9491 // As a compromise, we only emit discrete instructions if the shuffle can be
9492 // generated in 3 or fewer operations. When we have loop information
9493 // available, if this block is within a loop, we should avoid using vperm
9494 // for 3-operation perms and use a constant pool load instead.
9496 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9499 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9500 // vector that will get spilled to the constant pool.
9501 if (V2.isUndef()) V2 = V1;
9503 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9504 // that it is in input element units, not in bytes. Convert now.
9506 // For little endian, the order of the input vectors is reversed, and
9507 // the permutation mask is complemented with respect to 31. This is
9508 // necessary to produce proper semantics with the big-endian-biased vperm
9510 EVT EltVT = V1.getValueType().getVectorElementType();
9511 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9513 SmallVector<SDValue, 16> ResultMask;
9514 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9515 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9517 for (unsigned j = 0; j != BytesPerElement; ++j)
9519 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9522 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9526 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9528 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9531 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9535 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9536 /// vector comparison. If it is, return true and fill in Opc/isDot with
9537 /// information about the intrinsic.
9538 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9539 bool &isDot, const PPCSubtarget &Subtarget) {
9540 unsigned IntrinsicID =
9541 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9544 switch (IntrinsicID) {
9547 // Comparison predicates.
9548 case Intrinsic::ppc_altivec_vcmpbfp_p:
9552 case Intrinsic::ppc_altivec_vcmpeqfp_p:
9556 case Intrinsic::ppc_altivec_vcmpequb_p:
9560 case Intrinsic::ppc_altivec_vcmpequh_p:
9564 case Intrinsic::ppc_altivec_vcmpequw_p:
9568 case Intrinsic::ppc_altivec_vcmpequd_p:
9569 if (Subtarget.hasP8Altivec()) {
9575 case Intrinsic::ppc_altivec_vcmpneb_p:
9576 case Intrinsic::ppc_altivec_vcmpneh_p:
9577 case Intrinsic::ppc_altivec_vcmpnew_p:
9578 case Intrinsic::ppc_altivec_vcmpnezb_p:
9579 case Intrinsic::ppc_altivec_vcmpnezh_p:
9580 case Intrinsic::ppc_altivec_vcmpnezw_p:
9581 if (Subtarget.hasP9Altivec()) {
9582 switch (IntrinsicID) {
9584 llvm_unreachable("Unknown comparison intrinsic.");
9585 case Intrinsic::ppc_altivec_vcmpneb_p:
9588 case Intrinsic::ppc_altivec_vcmpneh_p:
9591 case Intrinsic::ppc_altivec_vcmpnew_p:
9594 case Intrinsic::ppc_altivec_vcmpnezb_p:
9597 case Intrinsic::ppc_altivec_vcmpnezh_p:
9600 case Intrinsic::ppc_altivec_vcmpnezw_p:
9608 case Intrinsic::ppc_altivec_vcmpgefp_p:
9612 case Intrinsic::ppc_altivec_vcmpgtfp_p:
9616 case Intrinsic::ppc_altivec_vcmpgtsb_p:
9620 case Intrinsic::ppc_altivec_vcmpgtsh_p:
9624 case Intrinsic::ppc_altivec_vcmpgtsw_p:
9628 case Intrinsic::ppc_altivec_vcmpgtsd_p:
9629 if (Subtarget.hasP8Altivec()) {
9635 case Intrinsic::ppc_altivec_vcmpgtub_p:
9639 case Intrinsic::ppc_altivec_vcmpgtuh_p:
9643 case Intrinsic::ppc_altivec_vcmpgtuw_p:
9647 case Intrinsic::ppc_altivec_vcmpgtud_p:
9648 if (Subtarget.hasP8Altivec()) {
9655 // VSX predicate comparisons use the same infrastructure
9656 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9657 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9658 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9659 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9660 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9661 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9662 if (Subtarget.hasVSX()) {
9663 switch (IntrinsicID) {
9664 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9667 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9670 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9673 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9676 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9679 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
9688 // Normal Comparisons.
9689 case Intrinsic::ppc_altivec_vcmpbfp:
9692 case Intrinsic::ppc_altivec_vcmpeqfp:
9695 case Intrinsic::ppc_altivec_vcmpequb:
9698 case Intrinsic::ppc_altivec_vcmpequh:
9701 case Intrinsic::ppc_altivec_vcmpequw:
9704 case Intrinsic::ppc_altivec_vcmpequd:
9705 if (Subtarget.hasP8Altivec())
9710 case Intrinsic::ppc_altivec_vcmpneb:
9711 case Intrinsic::ppc_altivec_vcmpneh:
9712 case Intrinsic::ppc_altivec_vcmpnew:
9713 case Intrinsic::ppc_altivec_vcmpnezb:
9714 case Intrinsic::ppc_altivec_vcmpnezh:
9715 case Intrinsic::ppc_altivec_vcmpnezw:
9716 if (Subtarget.hasP9Altivec())
9717 switch (IntrinsicID) {
9719 llvm_unreachable("Unknown comparison intrinsic.");
9720 case Intrinsic::ppc_altivec_vcmpneb:
9723 case Intrinsic::ppc_altivec_vcmpneh:
9726 case Intrinsic::ppc_altivec_vcmpnew:
9729 case Intrinsic::ppc_altivec_vcmpnezb:
9732 case Intrinsic::ppc_altivec_vcmpnezh:
9735 case Intrinsic::ppc_altivec_vcmpnezw:
9742 case Intrinsic::ppc_altivec_vcmpgefp:
9745 case Intrinsic::ppc_altivec_vcmpgtfp:
9748 case Intrinsic::ppc_altivec_vcmpgtsb:
9751 case Intrinsic::ppc_altivec_vcmpgtsh:
9754 case Intrinsic::ppc_altivec_vcmpgtsw:
9757 case Intrinsic::ppc_altivec_vcmpgtsd:
9758 if (Subtarget.hasP8Altivec())
9763 case Intrinsic::ppc_altivec_vcmpgtub:
9766 case Intrinsic::ppc_altivec_vcmpgtuh:
9769 case Intrinsic::ppc_altivec_vcmpgtuw:
9772 case Intrinsic::ppc_altivec_vcmpgtud:
9773 if (Subtarget.hasP8Altivec())
9782 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
9783 /// lower, do it, otherwise return null.
9784 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
9785 SelectionDAG &DAG) const {
9786 unsigned IntrinsicID =
9787 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
9791 if (IntrinsicID == Intrinsic::thread_pointer) {
9792 // Reads the thread pointer register, used for __builtin_thread_pointer.
9793 if (Subtarget.isPPC64())
9794 return DAG.getRegister(PPC::X13, MVT::i64);
9795 return DAG.getRegister(PPC::R2, MVT::i32);
9798 // If this is a lowered altivec predicate compare, CompareOpc is set to the
9799 // opcode number of the comparison.
9802 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
9803 return SDValue(); // Don't custom lower most intrinsics.
9805 // If this is a non-dot comparison, make the VCMP node and we are done.
9807 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
9808 Op.getOperand(1), Op.getOperand(2),
9809 DAG.getConstant(CompareOpc, dl, MVT::i32));
9810 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
9813 // Create the PPCISD altivec 'dot' comparison node.
9815 Op.getOperand(2), // LHS
9816 Op.getOperand(3), // RHS
9817 DAG.getConstant(CompareOpc, dl, MVT::i32)
9819 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
9820 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
9822 // Now that we have the comparison, emit a copy from the CR to a GPR.
9823 // This is flagged to the above dot comparison.
9824 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
9825 DAG.getRegister(PPC::CR6, MVT::i32),
9826 CompNode.getValue(1));
9828 // Unpack the result based on how the target uses it.
9829 unsigned BitNo; // Bit # of CR6.
9830 bool InvertBit; // Invert result?
9831 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
9832 default: // Can't happen, don't crash on invalid number though.
9833 case 0: // Return the value of the EQ bit of CR6.
9834 BitNo = 0; InvertBit = false;
9836 case 1: // Return the inverted value of the EQ bit of CR6.
9837 BitNo = 0; InvertBit = true;
9839 case 2: // Return the value of the LT bit of CR6.
9840 BitNo = 2; InvertBit = false;
9842 case 3: // Return the inverted value of the LT bit of CR6.
9843 BitNo = 2; InvertBit = true;
9847 // Shift the bit into the low position.
9848 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
9849 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
9851 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
9852 DAG.getConstant(1, dl, MVT::i32));
9854 // If we are supposed to, toggle the bit.
9856 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
9857 DAG.getConstant(1, dl, MVT::i32));
9861 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9862 SelectionDAG &DAG) const {
9863 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
9864 // the beginning of the argument list.
9865 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
9867 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
9868 case Intrinsic::ppc_cfence: {
9869 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
9870 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
9871 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
9872 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
9873 Op.getOperand(ArgStart + 1)),
9883 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const {
9884 // Check for a DIV with the same operands as this REM.
9885 for (auto UI : Op.getOperand(1)->uses()) {
9886 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) ||
9887 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV))
9888 if (UI->getOperand(0) == Op.getOperand(0) &&
9889 UI->getOperand(1) == Op.getOperand(1))
9895 // Lower scalar BSWAP64 to xxbrd.
9896 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
9899 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
9902 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
9904 int VectorIndex = 0;
9905 if (Subtarget.isLittleEndian())
9907 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
9908 DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
9912 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
9913 // compared to a value that is atomically loaded (atomic loads zero-extend).
9914 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
9915 SelectionDAG &DAG) const {
9916 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
9917 "Expecting an atomic compare-and-swap here.");
9919 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
9920 EVT MemVT = AtomicNode->getMemoryVT();
9921 if (MemVT.getSizeInBits() >= 32)
9924 SDValue CmpOp = Op.getOperand(2);
9925 // If this is already correctly zero-extended, leave it alone.
9926 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
9927 if (DAG.MaskedValueIsZero(CmpOp, HighBits))
9930 // Clear the high bits of the compare operand.
9931 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
9933 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
9934 DAG.getConstant(MaskVal, dl, MVT::i32));
9936 // Replace the existing compare operand with the properly zero-extended one.
9937 SmallVector<SDValue, 4> Ops;
9938 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
9939 Ops.push_back(AtomicNode->getOperand(i));
9941 MachineMemOperand *MMO = AtomicNode->getMemOperand();
9942 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
9944 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
9945 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
9948 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
9949 SelectionDAG &DAG) const {
9951 // Create a stack slot that is 16-byte aligned.
9952 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9953 int FrameIdx = MFI.CreateStackObject(16, 16, false);
9954 EVT PtrVT = getPointerTy(DAG.getDataLayout());
9955 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9957 // Store the input value into Value#0 of the stack slot.
9958 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
9959 MachinePointerInfo());
9961 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
9964 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
9965 SelectionDAG &DAG) const {
9966 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
9967 "Should only be called for ISD::INSERT_VECTOR_ELT");
9969 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
9970 // We have legal lowering for constant indices but not for variable ones.
9974 EVT VT = Op.getValueType();
9976 SDValue V1 = Op.getOperand(0);
9977 SDValue V2 = Op.getOperand(1);
9978 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
9979 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
9980 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
9981 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
9982 unsigned InsertAtElement = C->getZExtValue();
9983 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
9984 if (Subtarget.isLittleEndian()) {
9985 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
9987 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
9988 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9993 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
9994 SelectionDAG &DAG) const {
9996 SDNode *N = Op.getNode();
9998 assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
9999 "Unknown extract_vector_elt type");
10001 SDValue Value = N->getOperand(0);
10003 // The first part of this is like the store lowering except that we don't
10004 // need to track the chain.
10006 // The values are now known to be -1 (false) or 1 (true). To convert this
10007 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10008 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10009 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10011 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10012 // understand how to form the extending load.
10013 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10015 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10017 // Now convert to an integer and store.
10018 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10019 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10022 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10023 int FrameIdx = MFI.CreateStackObject(16, 16, false);
10024 MachinePointerInfo PtrInfo =
10025 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10026 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10027 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10029 SDValue StoreChain = DAG.getEntryNode();
10030 SDValue Ops[] = {StoreChain,
10031 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10033 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10035 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10036 dl, VTs, Ops, MVT::v4i32, PtrInfo);
10038 // Extract the value requested.
10039 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10040 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10041 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10044 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10046 if (!Subtarget.useCRBits())
10049 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10052 /// Lowering for QPX v4i1 loads
10053 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10054 SelectionDAG &DAG) const {
10056 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10057 SDValue LoadChain = LN->getChain();
10058 SDValue BasePtr = LN->getBasePtr();
10060 if (Op.getValueType() == MVT::v4f64 ||
10061 Op.getValueType() == MVT::v4f32) {
10062 EVT MemVT = LN->getMemoryVT();
10063 unsigned Alignment = LN->getAlignment();
10065 // If this load is properly aligned, then it is legal.
10066 if (Alignment >= MemVT.getStoreSize())
10069 EVT ScalarVT = Op.getValueType().getScalarType(),
10070 ScalarMemVT = MemVT.getScalarType();
10071 unsigned Stride = ScalarMemVT.getStoreSize();
10073 SDValue Vals[4], LoadChains[4];
10074 for (unsigned Idx = 0; Idx < 4; ++Idx) {
10076 if (ScalarVT != ScalarMemVT)
10077 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10079 LN->getPointerInfo().getWithOffset(Idx * Stride),
10080 ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10081 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10083 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10084 LN->getPointerInfo().getWithOffset(Idx * Stride),
10085 MinAlign(Alignment, Idx * Stride),
10086 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10088 if (Idx == 0 && LN->isIndexed()) {
10089 assert(LN->getAddressingMode() == ISD::PRE_INC &&
10090 "Unknown addressing mode on vector load");
10091 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10092 LN->getAddressingMode());
10096 LoadChains[Idx] = Load.getValue(1);
10098 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10099 DAG.getConstant(Stride, dl,
10100 BasePtr.getValueType()));
10103 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10104 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10106 if (LN->isIndexed()) {
10107 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10108 return DAG.getMergeValues(RetOps, dl);
10111 SDValue RetOps[] = { Value, TF };
10112 return DAG.getMergeValues(RetOps, dl);
10115 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10116 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10118 // To lower v4i1 from a byte array, we load the byte elements of the
10119 // vector and then reuse the BUILD_VECTOR logic.
10121 SDValue VectElmts[4], VectElmtChains[4];
10122 for (unsigned i = 0; i < 4; ++i) {
10123 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10124 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10126 VectElmts[i] = DAG.getExtLoad(
10127 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10128 LN->getPointerInfo().getWithOffset(i), MVT::i8,
10129 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10130 VectElmtChains[i] = VectElmts[i].getValue(1);
10133 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10134 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10136 SDValue RVals[] = { Value, LoadChain };
10137 return DAG.getMergeValues(RVals, dl);
10140 /// Lowering for QPX v4i1 stores
10141 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10142 SelectionDAG &DAG) const {
10144 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10145 SDValue StoreChain = SN->getChain();
10146 SDValue BasePtr = SN->getBasePtr();
10147 SDValue Value = SN->getValue();
10149 if (Value.getValueType() == MVT::v4f64 ||
10150 Value.getValueType() == MVT::v4f32) {
10151 EVT MemVT = SN->getMemoryVT();
10152 unsigned Alignment = SN->getAlignment();
10154 // If this store is properly aligned, then it is legal.
10155 if (Alignment >= MemVT.getStoreSize())
10158 EVT ScalarVT = Value.getValueType().getScalarType(),
10159 ScalarMemVT = MemVT.getScalarType();
10160 unsigned Stride = ScalarMemVT.getStoreSize();
10163 for (unsigned Idx = 0; Idx < 4; ++Idx) {
10164 SDValue Ex = DAG.getNode(
10165 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10166 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout())));
10168 if (ScalarVT != ScalarMemVT)
10170 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10171 SN->getPointerInfo().getWithOffset(Idx * Stride),
10172 ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10173 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10175 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10176 SN->getPointerInfo().getWithOffset(Idx * Stride),
10177 MinAlign(Alignment, Idx * Stride),
10178 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10180 if (Idx == 0 && SN->isIndexed()) {
10181 assert(SN->getAddressingMode() == ISD::PRE_INC &&
10182 "Unknown addressing mode on vector store");
10183 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10184 SN->getAddressingMode());
10187 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10188 DAG.getConstant(Stride, dl,
10189 BasePtr.getValueType()));
10190 Stores[Idx] = Store;
10193 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10195 if (SN->isIndexed()) {
10196 SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10197 return DAG.getMergeValues(RetOps, dl);
10203 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10204 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10206 // The values are now known to be -1 (false) or 1 (true). To convert this
10207 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10208 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10209 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10211 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10212 // understand how to form the extending load.
10213 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10215 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10217 // Now convert to an integer and store.
10218 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10219 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10222 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10223 int FrameIdx = MFI.CreateStackObject(16, 16, false);
10224 MachinePointerInfo PtrInfo =
10225 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10226 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10227 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10229 SDValue Ops[] = {StoreChain,
10230 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10232 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10234 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10235 dl, VTs, Ops, MVT::v4i32, PtrInfo);
10237 // Move data into the byte array.
10238 SDValue Loads[4], LoadChains[4];
10239 for (unsigned i = 0; i < 4; ++i) {
10240 unsigned Offset = 4*i;
10241 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10242 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10244 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10245 PtrInfo.getWithOffset(Offset));
10246 LoadChains[i] = Loads[i].getValue(1);
10249 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10252 for (unsigned i = 0; i < 4; ++i) {
10253 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10254 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10256 Stores[i] = DAG.getTruncStore(
10257 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10258 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10262 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10267 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10269 if (Op.getValueType() == MVT::v4i32) {
10270 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10272 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl);
10273 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
10275 SDValue RHSSwap = // = vrlw RHS, 16
10276 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10278 // Shrinkify inputs to v8i16.
10279 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10280 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10281 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10283 // Low parts multiplied together, generating 32-bit results (we ignore the
10285 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10286 LHS, RHS, DAG, dl, MVT::v4i32);
10288 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10289 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10290 // Shift the high parts up 16 bits.
10291 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10293 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10294 } else if (Op.getValueType() == MVT::v8i16) {
10295 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10297 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
10299 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
10300 LHS, RHS, Zero, DAG, dl);
10301 } else if (Op.getValueType() == MVT::v16i8) {
10302 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10303 bool isLittleEndian = Subtarget.isLittleEndian();
10305 // Multiply the even 8-bit parts, producing 16-bit sums.
10306 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10307 LHS, RHS, DAG, dl, MVT::v8i16);
10308 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10310 // Multiply the odd 8-bit parts, producing 16-bit sums.
10311 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10312 LHS, RHS, DAG, dl, MVT::v8i16);
10313 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10315 // Merge the results together. Because vmuleub and vmuloub are
10316 // instructions with a big-endian bias, we must reverse the
10317 // element numbering and reverse the meaning of "odd" and "even"
10318 // when generating little endian code.
10320 for (unsigned i = 0; i != 8; ++i) {
10321 if (isLittleEndian) {
10323 Ops[i*2+1] = 2*i+16;
10326 Ops[i*2+1] = 2*i+1+16;
10329 if (isLittleEndian)
10330 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10332 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10334 llvm_unreachable("Unknown mul to lower!");
10338 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
10340 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
10342 EVT VT = Op.getValueType();
10343 assert(VT.isVector() &&
10344 "Only set vector abs as custom, scalar abs shouldn't reach here!");
10345 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
10346 VT == MVT::v16i8) &&
10347 "Unexpected vector element type!");
10348 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
10349 "Current subtarget doesn't support smax v2i64!");
10351 // For vector abs, it can be lowered to:
10358 SDValue X = Op.getOperand(0);
10359 SDValue Zero = DAG.getConstant(0, dl, VT);
10360 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
10362 // SMAX patch https://reviews.llvm.org/D47332
10363 // hasn't landed yet, so use intrinsic first here.
10364 // TODO: Should use SMAX directly once SMAX patch landed
10365 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
10366 if (VT == MVT::v2i64)
10367 BifID = Intrinsic::ppc_altivec_vmaxsd;
10368 else if (VT == MVT::v8i16)
10369 BifID = Intrinsic::ppc_altivec_vmaxsh;
10370 else if (VT == MVT::v16i8)
10371 BifID = Intrinsic::ppc_altivec_vmaxsb;
10373 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
10376 // Custom lowering for fpext vf32 to v2f64
10377 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10379 assert(Op.getOpcode() == ISD::FP_EXTEND &&
10380 "Should only be called for ISD::FP_EXTEND");
10382 // FIXME: handle extends from half precision float vectors on P9.
10383 // We only want to custom lower an extend from v2f32 to v2f64.
10384 if (Op.getValueType() != MVT::v2f64 ||
10385 Op.getOperand(0).getValueType() != MVT::v2f32)
10389 SDValue Op0 = Op.getOperand(0);
10391 switch (Op0.getOpcode()) {
10394 case ISD::EXTRACT_SUBVECTOR: {
10395 assert(Op0.getNumOperands() == 2 &&
10396 isa<ConstantSDNode>(Op0->getOperand(1)) &&
10397 "Node should have 2 operands with second one being a constant!");
10399 if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10402 // Custom lower is only done for high or low doubleword.
10403 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10407 // Since input is v4f32, at this point Idx is either 0 or 2.
10408 // Shift to get the doubleword position we want.
10409 int DWord = Idx >> 1;
10411 // High and low word positions are different on little endian.
10412 if (Subtarget.isLittleEndian())
10415 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10416 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10421 SDValue NewLoad[2];
10422 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10423 // Ensure both input are loads.
10424 SDValue LdOp = Op0.getOperand(i);
10425 if (LdOp.getOpcode() != ISD::LOAD)
10427 // Generate new load node.
10428 LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10429 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10430 NewLoad[i] = DAG.getMemIntrinsicNode(
10431 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10432 LD->getMemoryVT(), LD->getMemOperand());
10435 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10436 NewLoad[1], Op0.getNode()->getFlags());
10437 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10438 DAG.getConstant(0, dl, MVT::i32));
10441 LoadSDNode *LD = cast<LoadSDNode>(Op0);
10442 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10443 SDValue NewLd = DAG.getMemIntrinsicNode(
10444 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10445 LD->getMemoryVT(), LD->getMemOperand());
10446 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10447 DAG.getConstant(0, dl, MVT::i32));
10450 llvm_unreachable("ERROR:Should return for all cases within swtich.");
10453 /// LowerOperation - Provide custom lowering hooks for some operations.
10455 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10456 switch (Op.getOpcode()) {
10457 default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10458 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
10459 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
10460 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
10461 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
10462 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
10463 case ISD::SETCC: return LowerSETCC(Op, DAG);
10464 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
10465 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
10467 // Variable argument lowering.
10468 case ISD::VASTART: return LowerVASTART(Op, DAG);
10469 case ISD::VAARG: return LowerVAARG(Op, DAG);
10470 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
10472 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
10473 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10474 case ISD::GET_DYNAMIC_AREA_OFFSET:
10475 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10477 // Exception handling lowering.
10478 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
10479 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
10480 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
10482 case ISD::LOAD: return LowerLOAD(Op, DAG);
10483 case ISD::STORE: return LowerSTORE(Op, DAG);
10484 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
10485 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
10486 case ISD::FP_TO_UINT:
10487 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10488 case ISD::UINT_TO_FP:
10489 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
10490 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
10492 // Lower 64-bit shifts.
10493 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
10494 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
10495 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
10497 // Vector-related lowering.
10498 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
10499 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
10500 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10501 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
10502 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
10503 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
10504 case ISD::MUL: return LowerMUL(Op, DAG);
10505 case ISD::ABS: return LowerABS(Op, DAG);
10506 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
10508 // For counter-based loop handling.
10509 case ISD::INTRINSIC_W_CHAIN: return SDValue();
10511 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
10513 // Frame & Return address.
10514 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
10515 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
10517 case ISD::INTRINSIC_VOID:
10518 return LowerINTRINSIC_VOID(Op, DAG);
10521 return LowerREM(Op, DAG);
10523 return LowerBSWAP(Op, DAG);
10524 case ISD::ATOMIC_CMP_SWAP:
10525 return LowerATOMIC_CMP_SWAP(Op, DAG);
10529 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10530 SmallVectorImpl<SDValue>&Results,
10531 SelectionDAG &DAG) const {
10533 switch (N->getOpcode()) {
10535 llvm_unreachable("Do not know how to custom type legalize this operation!");
10536 case ISD::READCYCLECOUNTER: {
10537 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10538 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10540 Results.push_back(RTB);
10541 Results.push_back(RTB.getValue(1));
10542 Results.push_back(RTB.getValue(2));
10545 case ISD::INTRINSIC_W_CHAIN: {
10546 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10547 Intrinsic::loop_decrement)
10550 assert(N->getValueType(0) == MVT::i1 &&
10551 "Unexpected result type for CTR decrement intrinsic");
10552 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10553 N->getValueType(0));
10554 SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10555 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10558 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10559 Results.push_back(NewInt.getValue(1));
10563 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10566 EVT VT = N->getValueType(0);
10568 if (VT == MVT::i64) {
10569 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10571 Results.push_back(NewNode);
10572 Results.push_back(NewNode.getValue(1));
10576 case ISD::FP_TO_SINT:
10577 case ISD::FP_TO_UINT:
10578 // LowerFP_TO_INT() can only handle f32 and f64.
10579 if (N->getOperand(0).getValueType() == MVT::ppcf128)
10581 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10583 case ISD::TRUNCATE: {
10584 EVT TrgVT = N->getValueType(0);
10585 EVT OpVT = N->getOperand(0).getValueType();
10586 if (TrgVT.isVector() &&
10587 isOperationCustom(N->getOpcode(), TrgVT) &&
10588 OpVT.getSizeInBits() <= 128 &&
10589 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
10590 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
10594 // Don't handle bitcast here.
10596 case ISD::FP_EXTEND:
10597 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10599 Results.push_back(Lowered);
10604 //===----------------------------------------------------------------------===//
10605 // Other Lowering Code
10606 //===----------------------------------------------------------------------===//
10608 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10609 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10610 Function *Func = Intrinsic::getDeclaration(M, Id);
10611 return Builder.CreateCall(Func, {});
10614 // The mappings for emitLeading/TrailingFence is taken from
10615 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10616 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10618 AtomicOrdering Ord) const {
10619 if (Ord == AtomicOrdering::SequentiallyConsistent)
10620 return callIntrinsic(Builder, Intrinsic::ppc_sync);
10621 if (isReleaseOrStronger(Ord))
10622 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10626 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10628 AtomicOrdering Ord) const {
10629 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10630 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10631 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10632 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10633 if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10634 return Builder.CreateCall(
10635 Intrinsic::getDeclaration(
10636 Builder.GetInsertBlock()->getParent()->getParent(),
10637 Intrinsic::ppc_cfence, {Inst->getType()}),
10639 // FIXME: Can use isync for rmw operation.
10640 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10645 MachineBasicBlock *
10646 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10647 unsigned AtomicSize,
10648 unsigned BinOpcode,
10649 unsigned CmpOpcode,
10650 unsigned CmpPred) const {
10651 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10652 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10654 auto LoadMnemonic = PPC::LDARX;
10655 auto StoreMnemonic = PPC::STDCX;
10656 switch (AtomicSize) {
10658 llvm_unreachable("Unexpected size of atomic entity");
10660 LoadMnemonic = PPC::LBARX;
10661 StoreMnemonic = PPC::STBCX;
10662 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10665 LoadMnemonic = PPC::LHARX;
10666 StoreMnemonic = PPC::STHCX;
10667 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
10670 LoadMnemonic = PPC::LWARX;
10671 StoreMnemonic = PPC::STWCX;
10674 LoadMnemonic = PPC::LDARX;
10675 StoreMnemonic = PPC::STDCX;
10679 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10680 MachineFunction *F = BB->getParent();
10681 MachineFunction::iterator It = ++BB->getIterator();
10683 Register dest = MI.getOperand(0).getReg();
10684 Register ptrA = MI.getOperand(1).getReg();
10685 Register ptrB = MI.getOperand(2).getReg();
10686 Register incr = MI.getOperand(3).getReg();
10687 DebugLoc dl = MI.getDebugLoc();
10689 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10690 MachineBasicBlock *loop2MBB =
10691 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10692 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10693 F->insert(It, loopMBB);
10695 F->insert(It, loop2MBB);
10696 F->insert(It, exitMBB);
10697 exitMBB->splice(exitMBB->begin(), BB,
10698 std::next(MachineBasicBlock::iterator(MI)), BB->end());
10699 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10701 MachineRegisterInfo &RegInfo = F->getRegInfo();
10702 Register TmpReg = (!BinOpcode) ? incr :
10703 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10704 : &PPC::GPRCRegClass);
10708 // fallthrough --> loopMBB
10709 BB->addSuccessor(loopMBB);
10712 // l[wd]arx dest, ptr
10713 // add r0, dest, incr
10714 // st[wd]cx. r0, ptr
10716 // fallthrough --> exitMBB
10720 // l[wd]arx dest, ptr
10721 // cmpl?[wd] incr, dest
10724 // st[wd]cx. dest, ptr
10726 // fallthrough --> exitMBB
10729 BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10730 .addReg(ptrA).addReg(ptrB);
10732 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10734 // Signed comparisons of byte or halfword values must be sign-extended.
10735 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10736 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10737 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10738 ExtReg).addReg(dest);
10739 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10740 .addReg(incr).addReg(ExtReg);
10742 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10743 .addReg(incr).addReg(dest);
10745 BuildMI(BB, dl, TII->get(PPC::BCC))
10746 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10747 BB->addSuccessor(loop2MBB);
10748 BB->addSuccessor(exitMBB);
10751 BuildMI(BB, dl, TII->get(StoreMnemonic))
10752 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10753 BuildMI(BB, dl, TII->get(PPC::BCC))
10754 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10755 BB->addSuccessor(loopMBB);
10756 BB->addSuccessor(exitMBB);
10764 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10765 MachineInstr &MI, MachineBasicBlock *BB,
10766 bool is8bit, // operation
10767 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
10768 // If we support part-word atomic mnemonics, just use them
10769 if (Subtarget.hasPartwordAtomics())
10770 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
10773 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10774 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10775 // In 64 bit mode we have to use 64 bits for addresses, even though the
10776 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
10777 // registers without caring whether they're 32 or 64, but here we're
10778 // doing actual arithmetic on the addresses.
10779 bool is64bit = Subtarget.isPPC64();
10780 bool isLittleEndian = Subtarget.isLittleEndian();
10781 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
10783 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10784 MachineFunction *F = BB->getParent();
10785 MachineFunction::iterator It = ++BB->getIterator();
10787 Register dest = MI.getOperand(0).getReg();
10788 Register ptrA = MI.getOperand(1).getReg();
10789 Register ptrB = MI.getOperand(2).getReg();
10790 Register incr = MI.getOperand(3).getReg();
10791 DebugLoc dl = MI.getDebugLoc();
10793 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10794 MachineBasicBlock *loop2MBB =
10795 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10796 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10797 F->insert(It, loopMBB);
10799 F->insert(It, loop2MBB);
10800 F->insert(It, exitMBB);
10801 exitMBB->splice(exitMBB->begin(), BB,
10802 std::next(MachineBasicBlock::iterator(MI)), BB->end());
10803 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10805 MachineRegisterInfo &RegInfo = F->getRegInfo();
10806 const TargetRegisterClass *RC =
10807 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
10808 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
10810 Register PtrReg = RegInfo.createVirtualRegister(RC);
10811 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10812 Register ShiftReg =
10813 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10814 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10815 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10816 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10817 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10818 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10819 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10820 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10821 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10824 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
10828 // fallthrough --> loopMBB
10829 BB->addSuccessor(loopMBB);
10831 // The 4-byte load must be aligned, while a char or short may be
10832 // anywhere in the word. Hence all this nasty bookkeeping code.
10833 // add ptr1, ptrA, ptrB [copy if ptrA==0]
10834 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
10835 // xori shift, shift1, 24 [16]
10836 // rlwinm ptr, ptr1, 0, 0, 29
10837 // slw incr2, incr, shift
10838 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
10839 // slw mask, mask2, shift
10841 // lwarx tmpDest, ptr
10842 // add tmp, tmpDest, incr2
10843 // andc tmp2, tmpDest, mask
10844 // and tmp3, tmp, mask
10845 // or tmp4, tmp3, tmp2
10846 // stwcx. tmp4, ptr
10848 // fallthrough --> exitMBB
10849 // srw dest, tmpDest, shift
10850 if (ptrA != ZeroReg) {
10851 Ptr1Reg = RegInfo.createVirtualRegister(RC);
10852 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
10858 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
10860 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
10861 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
10864 .addImm(is8bit ? 28 : 27);
10865 if (!isLittleEndian)
10866 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
10868 .addImm(is8bit ? 24 : 16);
10870 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
10875 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
10880 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
10882 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
10884 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
10885 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
10889 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
10894 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
10898 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
10900 .addReg(TmpDestReg);
10901 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
10902 .addReg(TmpDestReg)
10904 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
10906 // For unsigned comparisons, we can directly compare the shifted values.
10907 // For signed comparisons we shift and sign extend.
10908 Register SReg = RegInfo.createVirtualRegister(GPRC);
10909 BuildMI(BB, dl, TII->get(PPC::AND), SReg)
10910 .addReg(TmpDestReg)
10912 unsigned ValueReg = SReg;
10913 unsigned CmpReg = Incr2Reg;
10914 if (CmpOpcode == PPC::CMPW) {
10915 ValueReg = RegInfo.createVirtualRegister(GPRC);
10916 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
10919 Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
10920 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
10922 ValueReg = ValueSReg;
10925 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10928 BuildMI(BB, dl, TII->get(PPC::BCC))
10932 BB->addSuccessor(loop2MBB);
10933 BB->addSuccessor(exitMBB);
10936 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
10937 BuildMI(BB, dl, TII->get(PPC::STWCX))
10941 BuildMI(BB, dl, TII->get(PPC::BCC))
10942 .addImm(PPC::PRED_NE)
10945 BB->addSuccessor(loopMBB);
10946 BB->addSuccessor(exitMBB);
10951 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
10952 .addReg(TmpDestReg)
10957 llvm::MachineBasicBlock *
10958 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
10959 MachineBasicBlock *MBB) const {
10960 DebugLoc DL = MI.getDebugLoc();
10961 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10962 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
10964 MachineFunction *MF = MBB->getParent();
10965 MachineRegisterInfo &MRI = MF->getRegInfo();
10967 const BasicBlock *BB = MBB->getBasicBlock();
10968 MachineFunction::iterator I = ++MBB->getIterator();
10970 Register DstReg = MI.getOperand(0).getReg();
10971 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
10972 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
10973 Register mainDstReg = MRI.createVirtualRegister(RC);
10974 Register restoreDstReg = MRI.createVirtualRegister(RC);
10976 MVT PVT = getPointerTy(MF->getDataLayout());
10977 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
10978 "Invalid Pointer Size!");
10979 // For v = setjmp(buf), we generate
10982 // SjLjSetup mainMBB
10988 // buf[LabelOffset] = LR
10992 // v = phi(main, restore)
10995 MachineBasicBlock *thisMBB = MBB;
10996 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
10997 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
10998 MF->insert(I, mainMBB);
10999 MF->insert(I, sinkMBB);
11001 MachineInstrBuilder MIB;
11003 // Transfer the remainder of BB and its successor edges to sinkMBB.
11004 sinkMBB->splice(sinkMBB->begin(), MBB,
11005 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11006 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11008 // Note that the structure of the jmp_buf used here is not compatible
11009 // with that used by libc, and is not designed to be. Specifically, it
11010 // stores only those 'reserved' registers that LLVM does not otherwise
11011 // understand how to spill. Also, by convention, by the time this
11012 // intrinsic is called, Clang has already stored the frame address in the
11013 // first slot of the buffer and stack address in the third. Following the
11014 // X86 target code, we'll store the jump address in the second slot. We also
11015 // need to save the TOC pointer (R2) to handle jumps between shared
11016 // libraries, and that will be stored in the fourth slot. The thread
11017 // identifier (R13) is not affected.
11020 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11021 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11022 const int64_t BPOffset = 4 * PVT.getStoreSize();
11024 // Prepare IP either in reg.
11025 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11026 Register LabelReg = MRI.createVirtualRegister(PtrRC);
11027 Register BufReg = MI.getOperand(1).getReg();
11029 if (Subtarget.is64BitELFABI()) {
11030 setUsesTOCBasePtr(*MBB->getParent());
11031 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11038 // Naked functions never have a base pointer, and so we use r1. For all
11039 // other functions, this decision must be delayed until during PEI.
11041 if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11042 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11044 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11046 MIB = BuildMI(*thisMBB, MI, DL,
11047 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11054 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11055 MIB.addRegMask(TRI->getNoPreservedMask());
11057 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11059 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11061 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11063 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11064 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11069 BuildMI(mainMBB, DL,
11070 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11073 if (Subtarget.isPPC64()) {
11074 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11076 .addImm(LabelOffset)
11079 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11081 .addImm(LabelOffset)
11084 MIB.cloneMemRefs(MI);
11086 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11087 mainMBB->addSuccessor(sinkMBB);
11090 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11091 TII->get(PPC::PHI), DstReg)
11092 .addReg(mainDstReg).addMBB(mainMBB)
11093 .addReg(restoreDstReg).addMBB(thisMBB);
11095 MI.eraseFromParent();
11099 MachineBasicBlock *
11100 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11101 MachineBasicBlock *MBB) const {
11102 DebugLoc DL = MI.getDebugLoc();
11103 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11105 MachineFunction *MF = MBB->getParent();
11106 MachineRegisterInfo &MRI = MF->getRegInfo();
11108 MVT PVT = getPointerTy(MF->getDataLayout());
11109 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11110 "Invalid Pointer Size!");
11112 const TargetRegisterClass *RC =
11113 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11114 Register Tmp = MRI.createVirtualRegister(RC);
11115 // Since FP is only updated here but NOT referenced, it's treated as GPR.
11116 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11117 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11121 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11124 MachineInstrBuilder MIB;
11126 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11127 const int64_t SPOffset = 2 * PVT.getStoreSize();
11128 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11129 const int64_t BPOffset = 4 * PVT.getStoreSize();
11131 Register BufReg = MI.getOperand(0).getReg();
11133 // Reload FP (the jumped-to function may not have had a
11134 // frame pointer, and if so, then its r31 will be restored
11136 if (PVT == MVT::i64) {
11137 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11141 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11145 MIB.cloneMemRefs(MI);
11148 if (PVT == MVT::i64) {
11149 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11150 .addImm(LabelOffset)
11153 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11154 .addImm(LabelOffset)
11157 MIB.cloneMemRefs(MI);
11160 if (PVT == MVT::i64) {
11161 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11165 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11169 MIB.cloneMemRefs(MI);
11172 if (PVT == MVT::i64) {
11173 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11177 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11181 MIB.cloneMemRefs(MI);
11184 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11185 setUsesTOCBasePtr(*MBB->getParent());
11186 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11193 BuildMI(*MBB, MI, DL,
11194 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11195 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11197 MI.eraseFromParent();
11201 MachineBasicBlock *
11202 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11203 MachineBasicBlock *BB) const {
11204 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11205 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11206 if (Subtarget.is64BitELFABI() &&
11207 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11208 // Call lowering should have added an r2 operand to indicate a dependence
11209 // on the TOC base pointer value. It can't however, because there is no
11210 // way to mark the dependence as implicit there, and so the stackmap code
11211 // will confuse it with a regular operand. Instead, add the dependence
11213 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11216 return emitPatchPoint(MI, BB);
11219 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11220 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11221 return emitEHSjLjSetJmp(MI, BB);
11222 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11223 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11224 return emitEHSjLjLongJmp(MI, BB);
11227 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11229 // To "insert" these instructions we actually have to insert their
11230 // control-flow patterns.
11231 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11232 MachineFunction::iterator It = ++BB->getIterator();
11234 MachineFunction *F = BB->getParent();
11236 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11237 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11238 MI.getOpcode() == PPC::SELECT_I8) {
11239 SmallVector<MachineOperand, 2> Cond;
11240 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11241 MI.getOpcode() == PPC::SELECT_CC_I8)
11242 Cond.push_back(MI.getOperand(4));
11244 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11245 Cond.push_back(MI.getOperand(1));
11247 DebugLoc dl = MI.getDebugLoc();
11248 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11249 MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11250 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11251 MI.getOpcode() == PPC::SELECT_CC_F8 ||
11252 MI.getOpcode() == PPC::SELECT_CC_F16 ||
11253 MI.getOpcode() == PPC::SELECT_CC_QFRC ||
11254 MI.getOpcode() == PPC::SELECT_CC_QSRC ||
11255 MI.getOpcode() == PPC::SELECT_CC_QBRC ||
11256 MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11257 MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11258 MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11259 MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11260 MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11261 MI.getOpcode() == PPC::SELECT_CC_SPE ||
11262 MI.getOpcode() == PPC::SELECT_F4 ||
11263 MI.getOpcode() == PPC::SELECT_F8 ||
11264 MI.getOpcode() == PPC::SELECT_F16 ||
11265 MI.getOpcode() == PPC::SELECT_QFRC ||
11266 MI.getOpcode() == PPC::SELECT_QSRC ||
11267 MI.getOpcode() == PPC::SELECT_QBRC ||
11268 MI.getOpcode() == PPC::SELECT_SPE ||
11269 MI.getOpcode() == PPC::SELECT_SPE4 ||
11270 MI.getOpcode() == PPC::SELECT_VRRC ||
11271 MI.getOpcode() == PPC::SELECT_VSFRC ||
11272 MI.getOpcode() == PPC::SELECT_VSSRC ||
11273 MI.getOpcode() == PPC::SELECT_VSRC) {
11274 // The incoming instruction knows the destination vreg to set, the
11275 // condition code register to branch on, the true/false values to
11276 // select between, and a branch opcode to use.
11281 // cmpTY ccX, r1, r2
11283 // fallthrough --> copy0MBB
11284 MachineBasicBlock *thisMBB = BB;
11285 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11286 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11287 DebugLoc dl = MI.getDebugLoc();
11288 F->insert(It, copy0MBB);
11289 F->insert(It, sinkMBB);
11291 // Transfer the remainder of BB and its successor edges to sinkMBB.
11292 sinkMBB->splice(sinkMBB->begin(), BB,
11293 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11294 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11296 // Next, add the true and fallthrough blocks as its successors.
11297 BB->addSuccessor(copy0MBB);
11298 BB->addSuccessor(sinkMBB);
11300 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11301 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11302 MI.getOpcode() == PPC::SELECT_F16 ||
11303 MI.getOpcode() == PPC::SELECT_SPE4 ||
11304 MI.getOpcode() == PPC::SELECT_SPE ||
11305 MI.getOpcode() == PPC::SELECT_QFRC ||
11306 MI.getOpcode() == PPC::SELECT_QSRC ||
11307 MI.getOpcode() == PPC::SELECT_QBRC ||
11308 MI.getOpcode() == PPC::SELECT_VRRC ||
11309 MI.getOpcode() == PPC::SELECT_VSFRC ||
11310 MI.getOpcode() == PPC::SELECT_VSSRC ||
11311 MI.getOpcode() == PPC::SELECT_VSRC) {
11312 BuildMI(BB, dl, TII->get(PPC::BC))
11313 .addReg(MI.getOperand(1).getReg())
11316 unsigned SelectPred = MI.getOperand(4).getImm();
11317 BuildMI(BB, dl, TII->get(PPC::BCC))
11318 .addImm(SelectPred)
11319 .addReg(MI.getOperand(1).getReg())
11324 // %FalseValue = ...
11325 // # fallthrough to sinkMBB
11328 // Update machine-CFG edges
11329 BB->addSuccessor(sinkMBB);
11332 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11335 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11336 .addReg(MI.getOperand(3).getReg())
11338 .addReg(MI.getOperand(2).getReg())
11340 } else if (MI.getOpcode() == PPC::ReadTB) {
11341 // To read the 64-bit time-base register on a 32-bit target, we read the
11342 // two halves. Should the counter have wrapped while it was being read, we
11343 // need to try again.
11346 // mfspr Rx,TBU # load from TBU
11347 // mfspr Ry,TB # load from TB
11348 // mfspr Rz,TBU # load from TBU
11349 // cmpw crX,Rx,Rz # check if 'old'='new'
11350 // bne readLoop # branch if they're not equal
11353 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11354 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11355 DebugLoc dl = MI.getDebugLoc();
11356 F->insert(It, readMBB);
11357 F->insert(It, sinkMBB);
11359 // Transfer the remainder of BB and its successor edges to sinkMBB.
11360 sinkMBB->splice(sinkMBB->begin(), BB,
11361 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11362 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11364 BB->addSuccessor(readMBB);
11367 MachineRegisterInfo &RegInfo = F->getRegInfo();
11368 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11369 Register LoReg = MI.getOperand(0).getReg();
11370 Register HiReg = MI.getOperand(1).getReg();
11372 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11373 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11374 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11376 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11378 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11380 .addReg(ReadAgainReg);
11381 BuildMI(BB, dl, TII->get(PPC::BCC))
11382 .addImm(PPC::PRED_NE)
11386 BB->addSuccessor(readMBB);
11387 BB->addSuccessor(sinkMBB);
11388 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11389 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11390 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11391 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11392 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11393 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11394 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11395 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11397 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11398 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11399 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11400 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11401 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11402 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11403 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11404 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11406 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11407 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11408 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11409 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11410 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11411 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11412 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11413 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11415 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11416 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11417 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11418 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11419 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11420 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11421 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11422 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11424 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11425 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11426 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11427 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11428 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11429 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11430 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11431 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11433 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11434 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11435 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11436 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11437 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11438 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11439 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11440 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11442 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11443 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11444 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11445 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11446 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11447 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11448 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11449 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11451 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11452 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11453 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11454 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11455 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11456 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11457 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11458 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11460 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11461 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11462 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11463 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11464 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11465 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11466 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11467 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11469 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11470 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11471 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11472 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11473 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11474 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11475 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11476 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11478 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11479 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11480 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11481 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11482 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11483 BB = EmitAtomicBinary(MI, BB, 4, 0);
11484 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11485 BB = EmitAtomicBinary(MI, BB, 8, 0);
11486 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11487 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11488 (Subtarget.hasPartwordAtomics() &&
11489 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11490 (Subtarget.hasPartwordAtomics() &&
11491 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11492 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11494 auto LoadMnemonic = PPC::LDARX;
11495 auto StoreMnemonic = PPC::STDCX;
11496 switch (MI.getOpcode()) {
11498 llvm_unreachable("Compare and swap of unknown size");
11499 case PPC::ATOMIC_CMP_SWAP_I8:
11500 LoadMnemonic = PPC::LBARX;
11501 StoreMnemonic = PPC::STBCX;
11502 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11504 case PPC::ATOMIC_CMP_SWAP_I16:
11505 LoadMnemonic = PPC::LHARX;
11506 StoreMnemonic = PPC::STHCX;
11507 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
11509 case PPC::ATOMIC_CMP_SWAP_I32:
11510 LoadMnemonic = PPC::LWARX;
11511 StoreMnemonic = PPC::STWCX;
11513 case PPC::ATOMIC_CMP_SWAP_I64:
11514 LoadMnemonic = PPC::LDARX;
11515 StoreMnemonic = PPC::STDCX;
11518 Register dest = MI.getOperand(0).getReg();
11519 Register ptrA = MI.getOperand(1).getReg();
11520 Register ptrB = MI.getOperand(2).getReg();
11521 Register oldval = MI.getOperand(3).getReg();
11522 Register newval = MI.getOperand(4).getReg();
11523 DebugLoc dl = MI.getDebugLoc();
11525 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11526 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11527 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11528 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11529 F->insert(It, loop1MBB);
11530 F->insert(It, loop2MBB);
11531 F->insert(It, midMBB);
11532 F->insert(It, exitMBB);
11533 exitMBB->splice(exitMBB->begin(), BB,
11534 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11535 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11539 // fallthrough --> loopMBB
11540 BB->addSuccessor(loop1MBB);
11543 // l[bhwd]arx dest, ptr
11544 // cmp[wd] dest, oldval
11547 // st[bhwd]cx. newval, ptr
11551 // st[bhwd]cx. dest, ptr
11554 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11555 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11558 BuildMI(BB, dl, TII->get(PPC::BCC))
11559 .addImm(PPC::PRED_NE)
11562 BB->addSuccessor(loop2MBB);
11563 BB->addSuccessor(midMBB);
11566 BuildMI(BB, dl, TII->get(StoreMnemonic))
11570 BuildMI(BB, dl, TII->get(PPC::BCC))
11571 .addImm(PPC::PRED_NE)
11574 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11575 BB->addSuccessor(loop1MBB);
11576 BB->addSuccessor(exitMBB);
11579 BuildMI(BB, dl, TII->get(StoreMnemonic))
11583 BB->addSuccessor(exitMBB);
11588 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
11589 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
11590 // We must use 64-bit registers for addresses when targeting 64-bit,
11591 // since we're actually doing arithmetic on them. Other registers
11593 bool is64bit = Subtarget.isPPC64();
11594 bool isLittleEndian = Subtarget.isLittleEndian();
11595 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
11597 Register dest = MI.getOperand(0).getReg();
11598 Register ptrA = MI.getOperand(1).getReg();
11599 Register ptrB = MI.getOperand(2).getReg();
11600 Register oldval = MI.getOperand(3).getReg();
11601 Register newval = MI.getOperand(4).getReg();
11602 DebugLoc dl = MI.getDebugLoc();
11604 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11605 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11606 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11607 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11608 F->insert(It, loop1MBB);
11609 F->insert(It, loop2MBB);
11610 F->insert(It, midMBB);
11611 F->insert(It, exitMBB);
11612 exitMBB->splice(exitMBB->begin(), BB,
11613 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11614 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11616 MachineRegisterInfo &RegInfo = F->getRegInfo();
11617 const TargetRegisterClass *RC =
11618 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11619 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11621 Register PtrReg = RegInfo.createVirtualRegister(RC);
11622 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11623 Register ShiftReg =
11624 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11625 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11626 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11627 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11628 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11629 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11630 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11631 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11632 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11633 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11634 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11636 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11637 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11640 // fallthrough --> loopMBB
11641 BB->addSuccessor(loop1MBB);
11643 // The 4-byte load must be aligned, while a char or short may be
11644 // anywhere in the word. Hence all this nasty bookkeeping code.
11645 // add ptr1, ptrA, ptrB [copy if ptrA==0]
11646 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11647 // xori shift, shift1, 24 [16]
11648 // rlwinm ptr, ptr1, 0, 0, 29
11649 // slw newval2, newval, shift
11650 // slw oldval2, oldval,shift
11651 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11652 // slw mask, mask2, shift
11653 // and newval3, newval2, mask
11654 // and oldval3, oldval2, mask
11656 // lwarx tmpDest, ptr
11657 // and tmp, tmpDest, mask
11658 // cmpw tmp, oldval3
11661 // andc tmp2, tmpDest, mask
11662 // or tmp4, tmp2, newval3
11663 // stwcx. tmp4, ptr
11667 // stwcx. tmpDest, ptr
11669 // srw dest, tmpDest, shift
11670 if (ptrA != ZeroReg) {
11671 Ptr1Reg = RegInfo.createVirtualRegister(RC);
11672 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11679 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11681 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11682 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11685 .addImm(is8bit ? 28 : 27);
11686 if (!isLittleEndian)
11687 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11689 .addImm(is8bit ? 24 : 16);
11691 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11696 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11701 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
11704 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
11708 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11710 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11711 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11715 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11718 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
11719 .addReg(NewVal2Reg)
11721 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
11722 .addReg(OldVal2Reg)
11726 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11729 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
11730 .addReg(TmpDestReg)
11732 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
11734 .addReg(OldVal3Reg);
11735 BuildMI(BB, dl, TII->get(PPC::BCC))
11736 .addImm(PPC::PRED_NE)
11739 BB->addSuccessor(loop2MBB);
11740 BB->addSuccessor(midMBB);
11743 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11744 .addReg(TmpDestReg)
11746 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
11748 .addReg(NewVal3Reg);
11749 BuildMI(BB, dl, TII->get(PPC::STWCX))
11753 BuildMI(BB, dl, TII->get(PPC::BCC))
11754 .addImm(PPC::PRED_NE)
11757 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
11758 BB->addSuccessor(loop1MBB);
11759 BB->addSuccessor(exitMBB);
11762 BuildMI(BB, dl, TII->get(PPC::STWCX))
11763 .addReg(TmpDestReg)
11766 BB->addSuccessor(exitMBB);
11771 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11774 } else if (MI.getOpcode() == PPC::FADDrtz) {
11775 // This pseudo performs an FADD with rounding mode temporarily forced
11776 // to round-to-zero. We emit this via custom inserter since the FPSCR
11777 // is not modeled at the SelectionDAG level.
11778 Register Dest = MI.getOperand(0).getReg();
11779 Register Src1 = MI.getOperand(1).getReg();
11780 Register Src2 = MI.getOperand(2).getReg();
11781 DebugLoc dl = MI.getDebugLoc();
11783 MachineRegisterInfo &RegInfo = F->getRegInfo();
11784 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11786 // Save FPSCR value.
11787 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
11789 // Set rounding mode to round-to-zero.
11790 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
11791 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
11793 // Perform addition.
11794 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
11796 // Restore FPSCR value.
11797 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
11798 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11799 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
11800 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11801 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
11802 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
11803 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
11806 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
11807 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
11809 MachineRegisterInfo &RegInfo = F->getRegInfo();
11810 Register Dest = RegInfo.createVirtualRegister(
11811 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
11813 DebugLoc Dl = MI.getDebugLoc();
11814 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
11815 .addReg(MI.getOperand(1).getReg())
11817 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11818 MI.getOperand(0).getReg())
11819 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
11820 } else if (MI.getOpcode() == PPC::TCHECK_RET) {
11821 DebugLoc Dl = MI.getDebugLoc();
11822 MachineRegisterInfo &RegInfo = F->getRegInfo();
11823 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11824 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
11825 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11826 MI.getOperand(0).getReg())
11828 } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
11829 DebugLoc Dl = MI.getDebugLoc();
11830 unsigned Imm = MI.getOperand(1).getImm();
11831 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
11832 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
11833 MI.getOperand(0).getReg())
11834 .addReg(PPC::CR0EQ);
11835 } else if (MI.getOpcode() == PPC::SETRNDi) {
11836 DebugLoc dl = MI.getDebugLoc();
11837 Register OldFPSCRReg = MI.getOperand(0).getReg();
11839 // Save FPSCR value.
11840 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11842 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
11843 // the following settings:
11844 // 00 Round to nearest
11846 // 10 Round to +inf
11847 // 11 Round to -inf
11849 // When the operand is immediate, using the two least significant bits of
11850 // the immediate to set the bits 62:63 of FPSCR.
11851 unsigned Mode = MI.getOperand(1).getImm();
11852 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
11855 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
11857 } else if (MI.getOpcode() == PPC::SETRND) {
11858 DebugLoc dl = MI.getDebugLoc();
11860 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
11861 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
11862 // If the target doesn't have DirectMove, we should use stack to do the
11863 // conversion, because the target doesn't have the instructions like mtvsrd
11864 // or mfvsrd to do this conversion directly.
11865 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
11866 if (Subtarget.hasDirectMove()) {
11867 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
11870 // Use stack to do the register copy.
11871 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
11872 MachineRegisterInfo &RegInfo = F->getRegInfo();
11873 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
11874 if (RC == &PPC::F8RCRegClass) {
11875 // Copy register from F8RCRegClass to G8RCRegclass.
11876 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
11877 "Unsupported RegClass.");
11879 StoreOp = PPC::STFD;
11882 // Copy register from G8RCRegClass to F8RCRegclass.
11883 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
11884 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
11885 "Unsupported RegClass.");
11888 MachineFrameInfo &MFI = F->getFrameInfo();
11889 int FrameIdx = MFI.CreateStackObject(8, 8, false);
11891 MachineMemOperand *MMOStore = F->getMachineMemOperand(
11892 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11893 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
11894 MFI.getObjectAlignment(FrameIdx));
11896 // Store the SrcReg into the stack.
11897 BuildMI(*BB, MI, dl, TII->get(StoreOp))
11900 .addFrameIndex(FrameIdx)
11901 .addMemOperand(MMOStore);
11903 MachineMemOperand *MMOLoad = F->getMachineMemOperand(
11904 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
11905 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
11906 MFI.getObjectAlignment(FrameIdx));
11908 // Load from the stack where SrcReg is stored, and save to DestReg,
11909 // so we have done the RegClass conversion from RegClass::SrcReg to
11910 // RegClass::DestReg.
11911 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
11913 .addFrameIndex(FrameIdx)
11914 .addMemOperand(MMOLoad);
11918 Register OldFPSCRReg = MI.getOperand(0).getReg();
11920 // Save FPSCR value.
11921 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
11923 // When the operand is gprc register, use two least significant bits of the
11924 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11926 // copy OldFPSCRTmpReg, OldFPSCRReg
11927 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
11928 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
11929 // copy NewFPSCRReg, NewFPSCRTmpReg
11930 // mtfsf 255, NewFPSCRReg
11931 MachineOperand SrcOp = MI.getOperand(1);
11932 MachineRegisterInfo &RegInfo = F->getRegInfo();
11933 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11935 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
11937 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11938 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11940 // The first operand of INSERT_SUBREG should be a register which has
11941 // subregisters, we only care about its RegClass, so we should use an
11942 // IMPLICIT_DEF register.
11943 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
11944 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
11949 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
11950 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
11951 .addReg(OldFPSCRTmpReg)
11956 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
11957 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
11959 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
11961 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
11963 .addReg(NewFPSCRReg)
11967 llvm_unreachable("Unexpected instr type to insert");
11970 MI.eraseFromParent(); // The pseudo instruction is gone now.
11974 //===----------------------------------------------------------------------===//
11975 // Target Optimization Hooks
11976 //===----------------------------------------------------------------------===//
11978 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
11979 // For the estimates, convergence is quadratic, so we essentially double the
11980 // number of digits correct after every iteration. For both FRE and FRSQRTE,
11981 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
11982 // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
11983 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
11984 if (VT.getScalarType() == MVT::f64)
11986 return RefinementSteps;
11989 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
11990 int Enabled, int &RefinementSteps,
11991 bool &UseOneConstNR,
11992 bool Reciprocal) const {
11993 EVT VT = Operand.getValueType();
11994 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
11995 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
11996 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
11997 (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
11998 (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
11999 (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12000 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12001 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12003 // The Newton-Raphson computation with a single constant does not provide
12004 // enough accuracy on some CPUs.
12005 UseOneConstNR = !Subtarget.needsTwoConstNR();
12006 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12011 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12013 int &RefinementSteps) const {
12014 EVT VT = Operand.getValueType();
12015 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12016 (VT == MVT::f64 && Subtarget.hasFRE()) ||
12017 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12018 (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12019 (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12020 (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12021 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12022 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12023 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12028 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12029 // Note: This functionality is used only when unsafe-fp-math is enabled, and
12030 // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12031 // enabled for division), this functionality is redundant with the default
12032 // combiner logic (once the division -> reciprocal/multiply transformation
12033 // has taken place). As a result, this matters more for older cores than for
12036 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12037 // reciprocal if there are two or more FDIVs (for embedded cores with only
12038 // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12039 switch (Subtarget.getCPUDirective()) {
12044 case PPC::DIR_E500:
12045 case PPC::DIR_E500mc:
12046 case PPC::DIR_E5500:
12051 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12052 // collapsed, and so we need to look through chains of them.
12053 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12054 int64_t& Offset, SelectionDAG &DAG) {
12055 if (DAG.isBaseWithConstantOffset(Loc)) {
12056 Base = Loc.getOperand(0);
12057 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12059 // The base might itself be a base plus an offset, and if so, accumulate
12061 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12065 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12066 unsigned Bytes, int Dist,
12067 SelectionDAG &DAG) {
12068 if (VT.getSizeInBits() / 8 != Bytes)
12071 SDValue BaseLoc = Base->getBasePtr();
12072 if (Loc.getOpcode() == ISD::FrameIndex) {
12073 if (BaseLoc.getOpcode() != ISD::FrameIndex)
12075 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12076 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
12077 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12078 int FS = MFI.getObjectSize(FI);
12079 int BFS = MFI.getObjectSize(BFI);
12080 if (FS != BFS || FS != (int)Bytes) return false;
12081 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12084 SDValue Base1 = Loc, Base2 = BaseLoc;
12085 int64_t Offset1 = 0, Offset2 = 0;
12086 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12087 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12088 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12091 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12092 const GlobalValue *GV1 = nullptr;
12093 const GlobalValue *GV2 = nullptr;
12096 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12097 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12098 if (isGA1 && isGA2 && GV1 == GV2)
12099 return Offset1 == (Offset2 + Dist*Bytes);
12103 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12104 // not enforce equality of the chain operands.
12105 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12106 unsigned Bytes, int Dist,
12107 SelectionDAG &DAG) {
12108 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12109 EVT VT = LS->getMemoryVT();
12110 SDValue Loc = LS->getBasePtr();
12111 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12114 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12116 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12117 default: return false;
12118 case Intrinsic::ppc_qpx_qvlfd:
12119 case Intrinsic::ppc_qpx_qvlfda:
12122 case Intrinsic::ppc_qpx_qvlfs:
12123 case Intrinsic::ppc_qpx_qvlfsa:
12126 case Intrinsic::ppc_qpx_qvlfcd:
12127 case Intrinsic::ppc_qpx_qvlfcda:
12130 case Intrinsic::ppc_qpx_qvlfcs:
12131 case Intrinsic::ppc_qpx_qvlfcsa:
12134 case Intrinsic::ppc_qpx_qvlfiwa:
12135 case Intrinsic::ppc_qpx_qvlfiwz:
12136 case Intrinsic::ppc_altivec_lvx:
12137 case Intrinsic::ppc_altivec_lvxl:
12138 case Intrinsic::ppc_vsx_lxvw4x:
12139 case Intrinsic::ppc_vsx_lxvw4x_be:
12142 case Intrinsic::ppc_vsx_lxvd2x:
12143 case Intrinsic::ppc_vsx_lxvd2x_be:
12146 case Intrinsic::ppc_altivec_lvebx:
12149 case Intrinsic::ppc_altivec_lvehx:
12152 case Intrinsic::ppc_altivec_lvewx:
12157 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12160 if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12162 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12163 default: return false;
12164 case Intrinsic::ppc_qpx_qvstfd:
12165 case Intrinsic::ppc_qpx_qvstfda:
12168 case Intrinsic::ppc_qpx_qvstfs:
12169 case Intrinsic::ppc_qpx_qvstfsa:
12172 case Intrinsic::ppc_qpx_qvstfcd:
12173 case Intrinsic::ppc_qpx_qvstfcda:
12176 case Intrinsic::ppc_qpx_qvstfcs:
12177 case Intrinsic::ppc_qpx_qvstfcsa:
12180 case Intrinsic::ppc_qpx_qvstfiw:
12181 case Intrinsic::ppc_qpx_qvstfiwa:
12182 case Intrinsic::ppc_altivec_stvx:
12183 case Intrinsic::ppc_altivec_stvxl:
12184 case Intrinsic::ppc_vsx_stxvw4x:
12187 case Intrinsic::ppc_vsx_stxvd2x:
12190 case Intrinsic::ppc_vsx_stxvw4x_be:
12193 case Intrinsic::ppc_vsx_stxvd2x_be:
12196 case Intrinsic::ppc_altivec_stvebx:
12199 case Intrinsic::ppc_altivec_stvehx:
12202 case Intrinsic::ppc_altivec_stvewx:
12207 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12213 // Return true is there is a nearyby consecutive load to the one provided
12214 // (regardless of alignment). We search up and down the chain, looking though
12215 // token factors and other loads (but nothing else). As a result, a true result
12216 // indicates that it is safe to create a new consecutive load adjacent to the
12218 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12219 SDValue Chain = LD->getChain();
12220 EVT VT = LD->getMemoryVT();
12222 SmallSet<SDNode *, 16> LoadRoots;
12223 SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12224 SmallSet<SDNode *, 16> Visited;
12226 // First, search up the chain, branching to follow all token-factor operands.
12227 // If we find a consecutive load, then we're done, otherwise, record all
12228 // nodes just above the top-level loads and token factors.
12229 while (!Queue.empty()) {
12230 SDNode *ChainNext = Queue.pop_back_val();
12231 if (!Visited.insert(ChainNext).second)
12234 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12235 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12238 if (!Visited.count(ChainLD->getChain().getNode()))
12239 Queue.push_back(ChainLD->getChain().getNode());
12240 } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12241 for (const SDUse &O : ChainNext->ops())
12242 if (!Visited.count(O.getNode()))
12243 Queue.push_back(O.getNode());
12245 LoadRoots.insert(ChainNext);
12248 // Second, search down the chain, starting from the top-level nodes recorded
12249 // in the first phase. These top-level nodes are the nodes just above all
12250 // loads and token factors. Starting with their uses, recursively look though
12251 // all loads (just the chain uses) and token factors to find a consecutive
12256 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12257 IE = LoadRoots.end(); I != IE; ++I) {
12258 Queue.push_back(*I);
12260 while (!Queue.empty()) {
12261 SDNode *LoadRoot = Queue.pop_back_val();
12262 if (!Visited.insert(LoadRoot).second)
12265 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12266 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12269 for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12270 UE = LoadRoot->use_end(); UI != UE; ++UI)
12271 if (((isa<MemSDNode>(*UI) &&
12272 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12273 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12274 Queue.push_back(*UI);
12281 /// This function is called when we have proved that a SETCC node can be replaced
12282 /// by subtraction (and other supporting instructions) so that the result of
12283 /// comparison is kept in a GPR instead of CR. This function is purely for
12284 /// codegen purposes and has some flags to guide the codegen process.
12285 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12286 bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12287 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12289 // Zero extend the operands to the largest legal integer. Originally, they
12290 // must be of a strictly smaller size.
12291 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12292 DAG.getConstant(Size, DL, MVT::i32));
12293 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12294 DAG.getConstant(Size, DL, MVT::i32));
12296 // Swap if needed. Depends on the condition code.
12298 std::swap(Op0, Op1);
12300 // Subtract extended integers.
12301 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12303 // Move the sign bit to the least significant position and zero out the rest.
12304 // Now the least significant bit carries the result of original comparison.
12305 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12306 DAG.getConstant(Size - 1, DL, MVT::i32));
12307 auto Final = Shifted;
12309 // Complement the result if needed. Based on the condition code.
12311 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12312 DAG.getConstant(1, DL, MVT::i64));
12314 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12317 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12318 DAGCombinerInfo &DCI) const {
12319 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12321 SelectionDAG &DAG = DCI.DAG;
12324 // Size of integers being compared has a critical role in the following
12325 // analysis, so we prefer to do this when all types are legal.
12326 if (!DCI.isAfterLegalizeDAG())
12329 // If all users of SETCC extend its value to a legal integer type
12330 // then we replace SETCC with a subtraction
12331 for (SDNode::use_iterator UI = N->use_begin(),
12332 UE = N->use_end(); UI != UE; ++UI) {
12333 if (UI->getOpcode() != ISD::ZERO_EXTEND)
12337 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12338 auto OpSize = N->getOperand(0).getValueSizeInBits();
12340 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12342 if (OpSize < Size) {
12346 return generateEquivalentSub(N, Size, false, false, DL, DAG);
12348 return generateEquivalentSub(N, Size, true, true, DL, DAG);
12350 return generateEquivalentSub(N, Size, false, true, DL, DAG);
12352 return generateEquivalentSub(N, Size, true, false, DL, DAG);
12359 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12360 DAGCombinerInfo &DCI) const {
12361 SelectionDAG &DAG = DCI.DAG;
12364 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12365 // If we're tracking CR bits, we need to be careful that we don't have:
12366 // trunc(binary-ops(zext(x), zext(y)))
12368 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12369 // such that we're unnecessarily moving things into GPRs when it would be
12370 // better to keep them in CR bits.
12372 // Note that trunc here can be an actual i1 trunc, or can be the effective
12373 // truncation that comes from a setcc or select_cc.
12374 if (N->getOpcode() == ISD::TRUNCATE &&
12375 N->getValueType(0) != MVT::i1)
12378 if (N->getOperand(0).getValueType() != MVT::i32 &&
12379 N->getOperand(0).getValueType() != MVT::i64)
12382 if (N->getOpcode() == ISD::SETCC ||
12383 N->getOpcode() == ISD::SELECT_CC) {
12384 // If we're looking at a comparison, then we need to make sure that the
12385 // high bits (all except for the first) don't matter the result.
12387 cast<CondCodeSDNode>(N->getOperand(
12388 N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12389 unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12391 if (ISD::isSignedIntSetCC(CC)) {
12392 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12393 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12395 } else if (ISD::isUnsignedIntSetCC(CC)) {
12396 if (!DAG.MaskedValueIsZero(N->getOperand(0),
12397 APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12398 !DAG.MaskedValueIsZero(N->getOperand(1),
12399 APInt::getHighBitsSet(OpBits, OpBits-1)))
12400 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12403 // This is neither a signed nor an unsigned comparison, just make sure
12404 // that the high bits are equal.
12405 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12406 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12408 // We don't really care about what is known about the first bit (if
12409 // anything), so clear it in all masks prior to comparing them.
12410 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
12411 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
12413 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
12418 // We now know that the higher-order bits are irrelevant, we just need to
12419 // make sure that all of the intermediate operations are bit operations, and
12420 // all inputs are extensions.
12421 if (N->getOperand(0).getOpcode() != ISD::AND &&
12422 N->getOperand(0).getOpcode() != ISD::OR &&
12423 N->getOperand(0).getOpcode() != ISD::XOR &&
12424 N->getOperand(0).getOpcode() != ISD::SELECT &&
12425 N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12426 N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12427 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12428 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12429 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12432 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12433 N->getOperand(1).getOpcode() != ISD::AND &&
12434 N->getOperand(1).getOpcode() != ISD::OR &&
12435 N->getOperand(1).getOpcode() != ISD::XOR &&
12436 N->getOperand(1).getOpcode() != ISD::SELECT &&
12437 N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12438 N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12439 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12440 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12441 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12444 SmallVector<SDValue, 4> Inputs;
12445 SmallVector<SDValue, 8> BinOps, PromOps;
12446 SmallPtrSet<SDNode *, 16> Visited;
12448 for (unsigned i = 0; i < 2; ++i) {
12449 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12450 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12451 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12452 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12453 isa<ConstantSDNode>(N->getOperand(i)))
12454 Inputs.push_back(N->getOperand(i));
12456 BinOps.push_back(N->getOperand(i));
12458 if (N->getOpcode() == ISD::TRUNCATE)
12462 // Visit all inputs, collect all binary operations (and, or, xor and
12463 // select) that are all fed by extensions.
12464 while (!BinOps.empty()) {
12465 SDValue BinOp = BinOps.back();
12468 if (!Visited.insert(BinOp.getNode()).second)
12471 PromOps.push_back(BinOp);
12473 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12474 // The condition of the select is not promoted.
12475 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12477 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12480 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12481 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12482 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12483 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12484 isa<ConstantSDNode>(BinOp.getOperand(i))) {
12485 Inputs.push_back(BinOp.getOperand(i));
12486 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12487 BinOp.getOperand(i).getOpcode() == ISD::OR ||
12488 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12489 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12490 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12491 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12492 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12493 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12494 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12495 BinOps.push_back(BinOp.getOperand(i));
12497 // We have an input that is not an extension or another binary
12498 // operation; we'll abort this transformation.
12504 // Make sure that this is a self-contained cluster of operations (which
12505 // is not quite the same thing as saying that everything has only one
12507 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12508 if (isa<ConstantSDNode>(Inputs[i]))
12511 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12512 UE = Inputs[i].getNode()->use_end();
12514 SDNode *User = *UI;
12515 if (User != N && !Visited.count(User))
12518 // Make sure that we're not going to promote the non-output-value
12519 // operand(s) or SELECT or SELECT_CC.
12520 // FIXME: Although we could sometimes handle this, and it does occur in
12521 // practice that one of the condition inputs to the select is also one of
12522 // the outputs, we currently can't deal with this.
12523 if (User->getOpcode() == ISD::SELECT) {
12524 if (User->getOperand(0) == Inputs[i])
12526 } else if (User->getOpcode() == ISD::SELECT_CC) {
12527 if (User->getOperand(0) == Inputs[i] ||
12528 User->getOperand(1) == Inputs[i])
12534 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12535 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12536 UE = PromOps[i].getNode()->use_end();
12538 SDNode *User = *UI;
12539 if (User != N && !Visited.count(User))
12542 // Make sure that we're not going to promote the non-output-value
12543 // operand(s) or SELECT or SELECT_CC.
12544 // FIXME: Although we could sometimes handle this, and it does occur in
12545 // practice that one of the condition inputs to the select is also one of
12546 // the outputs, we currently can't deal with this.
12547 if (User->getOpcode() == ISD::SELECT) {
12548 if (User->getOperand(0) == PromOps[i])
12550 } else if (User->getOpcode() == ISD::SELECT_CC) {
12551 if (User->getOperand(0) == PromOps[i] ||
12552 User->getOperand(1) == PromOps[i])
12558 // Replace all inputs with the extension operand.
12559 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12560 // Constants may have users outside the cluster of to-be-promoted nodes,
12561 // and so we need to replace those as we do the promotions.
12562 if (isa<ConstantSDNode>(Inputs[i]))
12565 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
12568 std::list<HandleSDNode> PromOpHandles;
12569 for (auto &PromOp : PromOps)
12570 PromOpHandles.emplace_back(PromOp);
12572 // Replace all operations (these are all the same, but have a different
12573 // (i1) return type). DAG.getNode will validate that the types of
12574 // a binary operator match, so go through the list in reverse so that
12575 // we've likely promoted both operands first. Any intermediate truncations or
12576 // extensions disappear.
12577 while (!PromOpHandles.empty()) {
12578 SDValue PromOp = PromOpHandles.back().getValue();
12579 PromOpHandles.pop_back();
12581 if (PromOp.getOpcode() == ISD::TRUNCATE ||
12582 PromOp.getOpcode() == ISD::SIGN_EXTEND ||
12583 PromOp.getOpcode() == ISD::ZERO_EXTEND ||
12584 PromOp.getOpcode() == ISD::ANY_EXTEND) {
12585 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
12586 PromOp.getOperand(0).getValueType() != MVT::i1) {
12587 // The operand is not yet ready (see comment below).
12588 PromOpHandles.emplace_front(PromOp);
12592 SDValue RepValue = PromOp.getOperand(0);
12593 if (isa<ConstantSDNode>(RepValue))
12594 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
12596 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
12601 switch (PromOp.getOpcode()) {
12602 default: C = 0; break;
12603 case ISD::SELECT: C = 1; break;
12604 case ISD::SELECT_CC: C = 2; break;
12607 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12608 PromOp.getOperand(C).getValueType() != MVT::i1) ||
12609 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12610 PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
12611 // The to-be-promoted operands of this node have not yet been
12612 // promoted (this should be rare because we're going through the
12613 // list backward, but if one of the operands has several users in
12614 // this cluster of to-be-promoted nodes, it is possible).
12615 PromOpHandles.emplace_front(PromOp);
12619 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12620 PromOp.getNode()->op_end());
12622 // If there are any constant inputs, make sure they're replaced now.
12623 for (unsigned i = 0; i < 2; ++i)
12624 if (isa<ConstantSDNode>(Ops[C+i]))
12625 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
12627 DAG.ReplaceAllUsesOfValueWith(PromOp,
12628 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
12631 // Now we're left with the initial truncation itself.
12632 if (N->getOpcode() == ISD::TRUNCATE)
12633 return N->getOperand(0);
12635 // Otherwise, this is a comparison. The operands to be compared have just
12636 // changed type (to i1), but everything else is the same.
12637 return SDValue(N, 0);
12640 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
12641 DAGCombinerInfo &DCI) const {
12642 SelectionDAG &DAG = DCI.DAG;
12645 // If we're tracking CR bits, we need to be careful that we don't have:
12646 // zext(binary-ops(trunc(x), trunc(y)))
12648 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
12649 // such that we're unnecessarily moving things into CR bits that can more
12650 // efficiently stay in GPRs. Note that if we're not certain that the high
12651 // bits are set as required by the final extension, we still may need to do
12652 // some masking to get the proper behavior.
12654 // This same functionality is important on PPC64 when dealing with
12655 // 32-to-64-bit extensions; these occur often when 32-bit values are used as
12656 // the return values of functions. Because it is so similar, it is handled
12659 if (N->getValueType(0) != MVT::i32 &&
12660 N->getValueType(0) != MVT::i64)
12663 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
12664 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
12667 if (N->getOperand(0).getOpcode() != ISD::AND &&
12668 N->getOperand(0).getOpcode() != ISD::OR &&
12669 N->getOperand(0).getOpcode() != ISD::XOR &&
12670 N->getOperand(0).getOpcode() != ISD::SELECT &&
12671 N->getOperand(0).getOpcode() != ISD::SELECT_CC)
12674 SmallVector<SDValue, 4> Inputs;
12675 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
12676 SmallPtrSet<SDNode *, 16> Visited;
12678 // Visit all inputs, collect all binary operations (and, or, xor and
12679 // select) that are all fed by truncations.
12680 while (!BinOps.empty()) {
12681 SDValue BinOp = BinOps.back();
12684 if (!Visited.insert(BinOp.getNode()).second)
12687 PromOps.push_back(BinOp);
12689 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12690 // The condition of the select is not promoted.
12691 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12693 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12696 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12697 isa<ConstantSDNode>(BinOp.getOperand(i))) {
12698 Inputs.push_back(BinOp.getOperand(i));
12699 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12700 BinOp.getOperand(i).getOpcode() == ISD::OR ||
12701 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12702 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12703 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
12704 BinOps.push_back(BinOp.getOperand(i));
12706 // We have an input that is not a truncation or another binary
12707 // operation; we'll abort this transformation.
12713 // The operands of a select that must be truncated when the select is
12714 // promoted because the operand is actually part of the to-be-promoted set.
12715 DenseMap<SDNode *, EVT> SelectTruncOp[2];
12717 // Make sure that this is a self-contained cluster of operations (which
12718 // is not quite the same thing as saying that everything has only one
12720 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12721 if (isa<ConstantSDNode>(Inputs[i]))
12724 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12725 UE = Inputs[i].getNode()->use_end();
12727 SDNode *User = *UI;
12728 if (User != N && !Visited.count(User))
12731 // If we're going to promote the non-output-value operand(s) or SELECT or
12732 // SELECT_CC, record them for truncation.
12733 if (User->getOpcode() == ISD::SELECT) {
12734 if (User->getOperand(0) == Inputs[i])
12735 SelectTruncOp[0].insert(std::make_pair(User,
12736 User->getOperand(0).getValueType()));
12737 } else if (User->getOpcode() == ISD::SELECT_CC) {
12738 if (User->getOperand(0) == Inputs[i])
12739 SelectTruncOp[0].insert(std::make_pair(User,
12740 User->getOperand(0).getValueType()));
12741 if (User->getOperand(1) == Inputs[i])
12742 SelectTruncOp[1].insert(std::make_pair(User,
12743 User->getOperand(1).getValueType()));
12748 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12749 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12750 UE = PromOps[i].getNode()->use_end();
12752 SDNode *User = *UI;
12753 if (User != N && !Visited.count(User))
12756 // If we're going to promote the non-output-value operand(s) or SELECT or
12757 // SELECT_CC, record them for truncation.
12758 if (User->getOpcode() == ISD::SELECT) {
12759 if (User->getOperand(0) == PromOps[i])
12760 SelectTruncOp[0].insert(std::make_pair(User,
12761 User->getOperand(0).getValueType()));
12762 } else if (User->getOpcode() == ISD::SELECT_CC) {
12763 if (User->getOperand(0) == PromOps[i])
12764 SelectTruncOp[0].insert(std::make_pair(User,
12765 User->getOperand(0).getValueType()));
12766 if (User->getOperand(1) == PromOps[i])
12767 SelectTruncOp[1].insert(std::make_pair(User,
12768 User->getOperand(1).getValueType()));
12773 unsigned PromBits = N->getOperand(0).getValueSizeInBits();
12774 bool ReallyNeedsExt = false;
12775 if (N->getOpcode() != ISD::ANY_EXTEND) {
12776 // If all of the inputs are not already sign/zero extended, then
12777 // we'll still need to do that at the end.
12778 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12779 if (isa<ConstantSDNode>(Inputs[i]))
12783 Inputs[i].getOperand(0).getValueSizeInBits();
12784 assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
12786 if ((N->getOpcode() == ISD::ZERO_EXTEND &&
12787 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
12788 APInt::getHighBitsSet(OpBits,
12789 OpBits-PromBits))) ||
12790 (N->getOpcode() == ISD::SIGN_EXTEND &&
12791 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
12792 (OpBits-(PromBits-1)))) {
12793 ReallyNeedsExt = true;
12799 // Replace all inputs, either with the truncation operand, or a
12800 // truncation or extension to the final output type.
12801 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12802 // Constant inputs need to be replaced with the to-be-promoted nodes that
12803 // use them because they might have users outside of the cluster of
12805 if (isa<ConstantSDNode>(Inputs[i]))
12808 SDValue InSrc = Inputs[i].getOperand(0);
12809 if (Inputs[i].getValueType() == N->getValueType(0))
12810 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
12811 else if (N->getOpcode() == ISD::SIGN_EXTEND)
12812 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12813 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
12814 else if (N->getOpcode() == ISD::ZERO_EXTEND)
12815 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12816 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
12818 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
12819 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
12822 std::list<HandleSDNode> PromOpHandles;
12823 for (auto &PromOp : PromOps)
12824 PromOpHandles.emplace_back(PromOp);
12826 // Replace all operations (these are all the same, but have a different
12827 // (promoted) return type). DAG.getNode will validate that the types of
12828 // a binary operator match, so go through the list in reverse so that
12829 // we've likely promoted both operands first.
12830 while (!PromOpHandles.empty()) {
12831 SDValue PromOp = PromOpHandles.back().getValue();
12832 PromOpHandles.pop_back();
12835 switch (PromOp.getOpcode()) {
12836 default: C = 0; break;
12837 case ISD::SELECT: C = 1; break;
12838 case ISD::SELECT_CC: C = 2; break;
12841 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
12842 PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
12843 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
12844 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
12845 // The to-be-promoted operands of this node have not yet been
12846 // promoted (this should be rare because we're going through the
12847 // list backward, but if one of the operands has several users in
12848 // this cluster of to-be-promoted nodes, it is possible).
12849 PromOpHandles.emplace_front(PromOp);
12853 // For SELECT and SELECT_CC nodes, we do a similar check for any
12854 // to-be-promoted comparison inputs.
12855 if (PromOp.getOpcode() == ISD::SELECT ||
12856 PromOp.getOpcode() == ISD::SELECT_CC) {
12857 if ((SelectTruncOp[0].count(PromOp.getNode()) &&
12858 PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
12859 (SelectTruncOp[1].count(PromOp.getNode()) &&
12860 PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
12861 PromOpHandles.emplace_front(PromOp);
12866 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
12867 PromOp.getNode()->op_end());
12869 // If this node has constant inputs, then they'll need to be promoted here.
12870 for (unsigned i = 0; i < 2; ++i) {
12871 if (!isa<ConstantSDNode>(Ops[C+i]))
12873 if (Ops[C+i].getValueType() == N->getValueType(0))
12876 if (N->getOpcode() == ISD::SIGN_EXTEND)
12877 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12878 else if (N->getOpcode() == ISD::ZERO_EXTEND)
12879 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12881 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
12884 // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
12885 // truncate them again to the original value type.
12886 if (PromOp.getOpcode() == ISD::SELECT ||
12887 PromOp.getOpcode() == ISD::SELECT_CC) {
12888 auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
12889 if (SI0 != SelectTruncOp[0].end())
12890 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
12891 auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
12892 if (SI1 != SelectTruncOp[1].end())
12893 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
12896 DAG.ReplaceAllUsesOfValueWith(PromOp,
12897 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
12900 // Now we're left with the initial extension itself.
12901 if (!ReallyNeedsExt)
12902 return N->getOperand(0);
12904 // To zero extend, just mask off everything except for the first bit (in the
12906 if (N->getOpcode() == ISD::ZERO_EXTEND)
12907 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
12908 DAG.getConstant(APInt::getLowBitsSet(
12909 N->getValueSizeInBits(0), PromBits),
12910 dl, N->getValueType(0)));
12912 assert(N->getOpcode() == ISD::SIGN_EXTEND &&
12913 "Invalid extension type");
12914 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
12916 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
12917 return DAG.getNode(
12918 ISD::SRA, dl, N->getValueType(0),
12919 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
12923 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
12924 DAGCombinerInfo &DCI) const {
12925 assert(N->getOpcode() == ISD::SETCC &&
12926 "Should be called with a SETCC node");
12928 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12929 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
12930 SDValue LHS = N->getOperand(0);
12931 SDValue RHS = N->getOperand(1);
12933 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
12934 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
12936 std::swap(LHS, RHS);
12938 // x == 0-y --> x+y == 0
12939 // x != 0-y --> x+y != 0
12940 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
12943 SelectionDAG &DAG = DCI.DAG;
12944 EVT VT = N->getValueType(0);
12945 EVT OpVT = LHS.getValueType();
12946 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
12947 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
12951 return DAGCombineTruncBoolExt(N, DCI);
12954 // Is this an extending load from an f32 to an f64?
12955 static bool isFPExtLoad(SDValue Op) {
12956 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
12957 return LD->getExtensionType() == ISD::EXTLOAD &&
12958 Op.getValueType() == MVT::f64;
12962 /// Reduces the number of fp-to-int conversion when building a vector.
12964 /// If this vector is built out of floating to integer conversions,
12965 /// transform it to a vector built out of floating point values followed by a
12966 /// single floating to integer conversion of the vector.
12967 /// Namely (build_vector (fptosi $A), (fptosi $B), ...)
12968 /// becomes (fptosi (build_vector ($A, $B, ...)))
12969 SDValue PPCTargetLowering::
12970 combineElementTruncationToVectorTruncation(SDNode *N,
12971 DAGCombinerInfo &DCI) const {
12972 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
12973 "Should be called with a BUILD_VECTOR node");
12975 SelectionDAG &DAG = DCI.DAG;
12978 SDValue FirstInput = N->getOperand(0);
12979 assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
12980 "The input operand must be an fp-to-int conversion.");
12982 // This combine happens after legalization so the fp_to_[su]i nodes are
12983 // already converted to PPCSISD nodes.
12984 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
12985 if (FirstConversion == PPCISD::FCTIDZ ||
12986 FirstConversion == PPCISD::FCTIDUZ ||
12987 FirstConversion == PPCISD::FCTIWZ ||
12988 FirstConversion == PPCISD::FCTIWUZ) {
12989 bool IsSplat = true;
12990 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
12991 FirstConversion == PPCISD::FCTIWUZ;
12992 EVT SrcVT = FirstInput.getOperand(0).getValueType();
12993 SmallVector<SDValue, 4> Ops;
12994 EVT TargetVT = N->getValueType(0);
12995 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
12996 SDValue NextOp = N->getOperand(i);
12997 if (NextOp.getOpcode() != PPCISD::MFVSR)
12999 unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13000 if (NextConversion != FirstConversion)
13002 // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13003 // This is not valid if the input was originally double precision. It is
13004 // also not profitable to do unless this is an extending load in which
13005 // case doing this combine will allow us to combine consecutive loads.
13006 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13008 if (N->getOperand(i) != FirstInput)
13012 // If this is a splat, we leave it as-is since there will be only a single
13013 // fp-to-int conversion followed by a splat of the integer. This is better
13014 // for 32-bit and smaller ints and neutral for 64-bit ints.
13018 // Now that we know we have the right type of node, get its operands
13019 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13020 SDValue In = N->getOperand(i).getOperand(0);
13022 // For 32-bit values, we need to add an FP_ROUND node (if we made it
13023 // here, we know that all inputs are extending loads so this is safe).
13025 Ops.push_back(DAG.getUNDEF(SrcVT));
13027 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13028 MVT::f32, In.getOperand(0),
13029 DAG.getIntPtrConstant(1, dl));
13030 Ops.push_back(Trunc);
13033 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13037 if (FirstConversion == PPCISD::FCTIDZ ||
13038 FirstConversion == PPCISD::FCTIWZ)
13039 Opcode = ISD::FP_TO_SINT;
13041 Opcode = ISD::FP_TO_UINT;
13043 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13044 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13045 return DAG.getNode(Opcode, dl, TargetVT, BV);
13050 /// Reduce the number of loads when building a vector.
13052 /// Building a vector out of multiple loads can be converted to a load
13053 /// of the vector type if the loads are consecutive. If the loads are
13054 /// consecutive but in descending order, a shuffle is added at the end
13055 /// to reorder the vector.
13056 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13057 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13058 "Should be called with a BUILD_VECTOR node");
13062 // Return early for non byte-sized type, as they can't be consecutive.
13063 if (!N->getValueType(0).getVectorElementType().isByteSized())
13066 bool InputsAreConsecutiveLoads = true;
13067 bool InputsAreReverseConsecutive = true;
13068 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13069 SDValue FirstInput = N->getOperand(0);
13070 bool IsRoundOfExtLoad = false;
13072 if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13073 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13074 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13075 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13077 // Not a build vector of (possibly fp_rounded) loads.
13078 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13079 N->getNumOperands() == 1)
13082 for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13083 // If any inputs are fp_round(extload), they all must be.
13084 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13087 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13089 if (NextInput.getOpcode() != ISD::LOAD)
13092 SDValue PreviousInput =
13093 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13094 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13095 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13097 // If any inputs are fp_round(extload), they all must be.
13098 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13101 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13102 InputsAreConsecutiveLoads = false;
13103 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13104 InputsAreReverseConsecutive = false;
13106 // Exit early if the loads are neither consecutive nor reverse consecutive.
13107 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13111 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13112 "The loads cannot be both consecutive and reverse consecutive.");
13114 SDValue FirstLoadOp =
13115 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13116 SDValue LastLoadOp =
13117 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13118 N->getOperand(N->getNumOperands()-1);
13120 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13121 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13122 if (InputsAreConsecutiveLoads) {
13123 assert(LD1 && "Input needs to be a LoadSDNode.");
13124 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13125 LD1->getBasePtr(), LD1->getPointerInfo(),
13126 LD1->getAlignment());
13128 if (InputsAreReverseConsecutive) {
13129 assert(LDL && "Input needs to be a LoadSDNode.");
13130 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13131 LDL->getBasePtr(), LDL->getPointerInfo(),
13132 LDL->getAlignment());
13133 SmallVector<int, 16> Ops;
13134 for (int i = N->getNumOperands() - 1; i >= 0; i--)
13137 return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13138 DAG.getUNDEF(N->getValueType(0)), Ops);
13143 // This function adds the required vector_shuffle needed to get
13144 // the elements of the vector extract in the correct position
13145 // as specified by the CorrectElems encoding.
13146 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13147 SDValue Input, uint64_t Elems,
13148 uint64_t CorrectElems) {
13151 unsigned NumElems = Input.getValueType().getVectorNumElements();
13152 SmallVector<int, 16> ShuffleMask(NumElems, -1);
13154 // Knowing the element indices being extracted from the original
13155 // vector and the order in which they're being inserted, just put
13156 // them at element indices required for the instruction.
13157 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13158 if (DAG.getDataLayout().isLittleEndian())
13159 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13161 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13162 CorrectElems = CorrectElems >> 8;
13163 Elems = Elems >> 8;
13167 DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13168 DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13170 EVT Ty = N->getValueType(0);
13171 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle);
13175 // Look for build vector patterns where input operands come from sign
13176 // extended vector_extract elements of specific indices. If the correct indices
13177 // aren't used, add a vector shuffle to fix up the indices and create a new
13178 // PPCISD:SExtVElems node which selects the vector sign extend instructions
13179 // during instruction selection.
13180 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13181 // This array encodes the indices that the vector sign extend instructions
13182 // extract from when extending from one type to another for both BE and LE.
13183 // The right nibble of each byte corresponds to the LE incides.
13184 // and the left nibble of each byte corresponds to the BE incides.
13185 // For example: 0x3074B8FC byte->word
13186 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13187 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13188 // For example: 0x000070F8 byte->double word
13189 // For LE: the allowed indices are: 0x0,0x8
13190 // For BE: the allowed indices are: 0x7,0xF
13191 uint64_t TargetElems[] = {
13192 0x3074B8FC, // b->w
13193 0x000070F8, // b->d
13194 0x10325476, // h->w
13195 0x00003074, // h->d
13196 0x00001032, // w->d
13199 uint64_t Elems = 0;
13203 auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13206 if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13207 Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13210 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13211 // of the right width.
13212 SDValue Extract = Op.getOperand(0);
13213 if (Extract.getOpcode() == ISD::ANY_EXTEND)
13214 Extract = Extract.getOperand(0);
13215 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13218 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13222 Index = ExtOp->getZExtValue();
13223 if (Input && Input != Extract.getOperand(0))
13227 Input = Extract.getOperand(0);
13229 Elems = Elems << 8;
13230 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13236 // If the build vector operands aren't sign extended vector extracts,
13237 // of the same input vector, then return.
13238 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13239 if (!isSExtOfVecExtract(N->getOperand(i))) {
13244 // If the vector extract indicies are not correct, add the appropriate
13246 int TgtElemArrayIdx;
13247 int InputSize = Input.getValueType().getScalarSizeInBits();
13248 int OutputSize = N->getValueType(0).getScalarSizeInBits();
13249 if (InputSize + OutputSize == 40)
13250 TgtElemArrayIdx = 0;
13251 else if (InputSize + OutputSize == 72)
13252 TgtElemArrayIdx = 1;
13253 else if (InputSize + OutputSize == 48)
13254 TgtElemArrayIdx = 2;
13255 else if (InputSize + OutputSize == 80)
13256 TgtElemArrayIdx = 3;
13257 else if (InputSize + OutputSize == 96)
13258 TgtElemArrayIdx = 4;
13262 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13263 CorrectElems = DAG.getDataLayout().isLittleEndian()
13264 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13265 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13266 if (Elems != CorrectElems) {
13267 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13270 // Regular lowering will catch cases where a shuffle is not needed.
13274 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13275 DAGCombinerInfo &DCI) const {
13276 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13277 "Should be called with a BUILD_VECTOR node");
13279 SelectionDAG &DAG = DCI.DAG;
13282 if (!Subtarget.hasVSX())
13285 // The target independent DAG combiner will leave a build_vector of
13286 // float-to-int conversions intact. We can generate MUCH better code for
13287 // a float-to-int conversion of a vector of floats.
13288 SDValue FirstInput = N->getOperand(0);
13289 if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13290 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13295 // If we're building a vector out of consecutive loads, just load that
13297 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13301 // If we're building a vector out of extended elements from another vector
13302 // we have P9 vector integer extend instructions. The code assumes legal
13303 // input types (i.e. it can't handle things like v4i16) so do not run before
13305 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13306 Reduced = combineBVOfVecSExt(N, DAG);
13312 if (N->getValueType(0) != MVT::v2f64)
13316 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13317 if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13318 FirstInput.getOpcode() != ISD::UINT_TO_FP)
13320 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13321 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13323 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13326 SDValue Ext1 = FirstInput.getOperand(0);
13327 SDValue Ext2 = N->getOperand(1).getOperand(0);
13328 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13329 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13332 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13333 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13334 if (!Ext1Op || !Ext2Op)
13336 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13337 Ext1.getOperand(0) != Ext2.getOperand(0))
13340 int FirstElem = Ext1Op->getZExtValue();
13341 int SecondElem = Ext2Op->getZExtValue();
13343 if (FirstElem == 0 && SecondElem == 1)
13344 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13345 else if (FirstElem == 2 && SecondElem == 3)
13346 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13350 SDValue SrcVec = Ext1.getOperand(0);
13351 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13352 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13353 return DAG.getNode(NodeType, dl, MVT::v2f64,
13354 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13357 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13358 DAGCombinerInfo &DCI) const {
13359 assert((N->getOpcode() == ISD::SINT_TO_FP ||
13360 N->getOpcode() == ISD::UINT_TO_FP) &&
13361 "Need an int -> FP conversion node here");
13363 if (useSoftFloat() || !Subtarget.has64BitSupport())
13366 SelectionDAG &DAG = DCI.DAG;
13370 // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13371 // from the hardware.
13372 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13374 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13375 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13378 SDValue FirstOperand(Op.getOperand(0));
13379 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13380 (FirstOperand.getValueType() == MVT::i8 ||
13381 FirstOperand.getValueType() == MVT::i16);
13382 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13383 bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13384 bool DstDouble = Op.getValueType() == MVT::f64;
13385 unsigned ConvOp = Signed ?
13386 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
13387 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13388 SDValue WidthConst =
13389 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13391 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13392 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13393 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13394 DAG.getVTList(MVT::f64, MVT::Other),
13395 Ops, MVT::i8, LDN->getMemOperand());
13397 // For signed conversion, we need to sign-extend the value in the VSR
13399 SDValue ExtOps[] = { Ld, WidthConst };
13400 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13401 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13403 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13407 // For i32 intermediate values, unfortunately, the conversion functions
13408 // leave the upper 32 bits of the value are undefined. Within the set of
13409 // scalar instructions, we have no method for zero- or sign-extending the
13410 // value. Thus, we cannot handle i32 intermediate values here.
13411 if (Op.getOperand(0).getValueType() == MVT::i32)
13414 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
13415 "UINT_TO_FP is supported only with FPCVT");
13417 // If we have FCFIDS, then use it when converting to single-precision.
13418 // Otherwise, convert to double-precision and then round.
13419 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13420 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13422 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13424 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13428 // If we're converting from a float, to an int, and back to a float again,
13429 // then we don't need the store/load pair at all.
13430 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13431 Subtarget.hasFPCVT()) ||
13432 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13433 SDValue Src = Op.getOperand(0).getOperand(0);
13434 if (Src.getValueType() == MVT::f32) {
13435 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13436 DCI.AddToWorklist(Src.getNode());
13437 } else if (Src.getValueType() != MVT::f64) {
13438 // Make sure that we don't pick up a ppc_fp128 source value.
13443 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13446 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13447 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13449 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13450 FP = DAG.getNode(ISD::FP_ROUND, dl,
13451 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13452 DCI.AddToWorklist(FP.getNode());
13461 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13462 // builtins) into loads with swaps.
13463 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13464 DAGCombinerInfo &DCI) const {
13465 SelectionDAG &DAG = DCI.DAG;
13469 MachineMemOperand *MMO;
13471 switch (N->getOpcode()) {
13473 llvm_unreachable("Unexpected opcode for little endian VSX load");
13475 LoadSDNode *LD = cast<LoadSDNode>(N);
13476 Chain = LD->getChain();
13477 Base = LD->getBasePtr();
13478 MMO = LD->getMemOperand();
13479 // If the MMO suggests this isn't a load of a full vector, leave
13480 // things alone. For a built-in, we have to make the change for
13481 // correctness, so if there is a size problem that will be a bug.
13482 if (MMO->getSize() < 16)
13486 case ISD::INTRINSIC_W_CHAIN: {
13487 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13488 Chain = Intrin->getChain();
13489 // Similarly to the store case below, Intrin->getBasePtr() doesn't get
13490 // us what we want. Get operand 2 instead.
13491 Base = Intrin->getOperand(2);
13492 MMO = Intrin->getMemOperand();
13497 MVT VecTy = N->getValueType(0).getSimpleVT();
13499 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
13500 // aligned and the type is a vector with elements up to 4 bytes
13501 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
13502 && VecTy.getScalarSizeInBits() <= 32 ) {
13506 SDValue LoadOps[] = { Chain, Base };
13507 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
13508 DAG.getVTList(MVT::v2f64, MVT::Other),
13509 LoadOps, MVT::v2f64, MMO);
13511 DCI.AddToWorklist(Load.getNode());
13512 Chain = Load.getValue(1);
13513 SDValue Swap = DAG.getNode(
13514 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
13515 DCI.AddToWorklist(Swap.getNode());
13517 // Add a bitcast if the resulting load type doesn't match v2f64.
13518 if (VecTy != MVT::v2f64) {
13519 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
13520 DCI.AddToWorklist(N.getNode());
13521 // Package {bitcast value, swap's chain} to match Load's shape.
13522 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
13523 N, Swap.getValue(1));
13529 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
13530 // builtins) into stores with swaps.
13531 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
13532 DAGCombinerInfo &DCI) const {
13533 SelectionDAG &DAG = DCI.DAG;
13538 MachineMemOperand *MMO;
13540 switch (N->getOpcode()) {
13542 llvm_unreachable("Unexpected opcode for little endian VSX store");
13544 StoreSDNode *ST = cast<StoreSDNode>(N);
13545 Chain = ST->getChain();
13546 Base = ST->getBasePtr();
13547 MMO = ST->getMemOperand();
13549 // If the MMO suggests this isn't a store of a full vector, leave
13550 // things alone. For a built-in, we have to make the change for
13551 // correctness, so if there is a size problem that will be a bug.
13552 if (MMO->getSize() < 16)
13556 case ISD::INTRINSIC_VOID: {
13557 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
13558 Chain = Intrin->getChain();
13559 // Intrin->getBasePtr() oddly does not get what we want.
13560 Base = Intrin->getOperand(3);
13561 MMO = Intrin->getMemOperand();
13567 SDValue Src = N->getOperand(SrcOpnd);
13568 MVT VecTy = Src.getValueType().getSimpleVT();
13570 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
13571 // aligned and the type is a vector with elements up to 4 bytes
13572 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
13573 && VecTy.getScalarSizeInBits() <= 32 ) {
13577 // All stores are done as v2f64 and possible bit cast.
13578 if (VecTy != MVT::v2f64) {
13579 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
13580 DCI.AddToWorklist(Src.getNode());
13583 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
13584 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
13585 DCI.AddToWorklist(Swap.getNode());
13586 Chain = Swap.getValue(1);
13587 SDValue StoreOps[] = { Chain, Swap, Base };
13588 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
13589 DAG.getVTList(MVT::Other),
13590 StoreOps, VecTy, MMO);
13591 DCI.AddToWorklist(Store.getNode());
13595 // Handle DAG combine for STORE (FP_TO_INT F).
13596 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
13597 DAGCombinerInfo &DCI) const {
13599 SelectionDAG &DAG = DCI.DAG;
13601 unsigned Opcode = N->getOperand(1).getOpcode();
13603 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
13604 && "Not a FP_TO_INT Instruction!");
13606 SDValue Val = N->getOperand(1).getOperand(0);
13607 EVT Op1VT = N->getOperand(1).getValueType();
13608 EVT ResVT = Val.getValueType();
13610 // Floating point types smaller than 32 bits are not legal on Power.
13611 if (ResVT.getScalarSizeInBits() < 32)
13614 // Only perform combine for conversion to i64/i32 or power9 i16/i8.
13615 bool ValidTypeForStoreFltAsInt =
13616 (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
13617 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
13619 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
13620 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
13623 // Extend f32 values to f64
13624 if (ResVT.getScalarSizeInBits() == 32) {
13625 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
13626 DCI.AddToWorklist(Val.getNode());
13629 // Set signed or unsigned conversion opcode.
13630 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
13631 PPCISD::FP_TO_SINT_IN_VSR :
13632 PPCISD::FP_TO_UINT_IN_VSR;
13634 Val = DAG.getNode(ConvOpcode,
13635 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
13636 DCI.AddToWorklist(Val.getNode());
13638 // Set number of bytes being converted.
13639 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
13640 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
13641 DAG.getIntPtrConstant(ByteSize, dl, false),
13642 DAG.getValueType(Op1VT) };
13644 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
13645 DAG.getVTList(MVT::Other), Ops,
13646 cast<StoreSDNode>(N)->getMemoryVT(),
13647 cast<StoreSDNode>(N)->getMemOperand());
13649 DCI.AddToWorklist(Val.getNode());
13653 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
13654 LSBaseSDNode *LSBase,
13655 DAGCombinerInfo &DCI) const {
13656 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
13657 "Not a reverse memop pattern!");
13659 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
13660 auto Mask = SVN->getMask();
13662 auto I = Mask.rbegin();
13663 auto E = Mask.rend();
13665 for (; I != E; ++I) {
13673 SelectionDAG &DAG = DCI.DAG;
13674 EVT VT = SVN->getValueType(0);
13676 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
13679 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
13680 // See comment in PPCVSXSwapRemoval.cpp.
13681 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
13682 if (!Subtarget.hasP9Vector())
13685 if(!IsElementReverse(SVN))
13688 if (LSBase->getOpcode() == ISD::LOAD) {
13690 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
13691 return DAG.getMemIntrinsicNode(
13692 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
13693 LSBase->getMemoryVT(), LSBase->getMemOperand());
13696 if (LSBase->getOpcode() == ISD::STORE) {
13698 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
13699 LSBase->getBasePtr()};
13700 return DAG.getMemIntrinsicNode(
13701 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
13702 LSBase->getMemoryVT(), LSBase->getMemOperand());
13705 llvm_unreachable("Expected a load or store node here");
13708 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
13709 DAGCombinerInfo &DCI) const {
13710 SelectionDAG &DAG = DCI.DAG;
13712 switch (N->getOpcode()) {
13715 return combineADD(N, DCI);
13717 return combineSHL(N, DCI);
13719 return combineSRA(N, DCI);
13721 return combineSRL(N, DCI);
13723 return combineMUL(N, DCI);
13725 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
13726 return N->getOperand(0);
13729 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
13730 return N->getOperand(0);
13733 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
13734 if (C->isNullValue() || // 0 >>s V -> 0.
13735 C->isAllOnesValue()) // -1 >>s V -> -1.
13736 return N->getOperand(0);
13739 case ISD::SIGN_EXTEND:
13740 case ISD::ZERO_EXTEND:
13741 case ISD::ANY_EXTEND:
13742 return DAGCombineExtBoolTrunc(N, DCI);
13743 case ISD::TRUNCATE:
13744 return combineTRUNCATE(N, DCI);
13746 if (SDValue CSCC = combineSetCC(N, DCI))
13749 case ISD::SELECT_CC:
13750 return DAGCombineTruncBoolExt(N, DCI);
13751 case ISD::SINT_TO_FP:
13752 case ISD::UINT_TO_FP:
13753 return combineFPToIntToFP(N, DCI);
13754 case ISD::VECTOR_SHUFFLE:
13755 if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
13756 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
13757 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
13762 EVT Op1VT = N->getOperand(1).getValueType();
13763 unsigned Opcode = N->getOperand(1).getOpcode();
13765 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
13766 SDValue Val= combineStoreFPToInt(N, DCI);
13771 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
13772 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
13773 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
13778 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
13779 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
13780 N->getOperand(1).getNode()->hasOneUse() &&
13781 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
13782 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
13784 // STBRX can only handle simple types and it makes no sense to store less
13785 // two bytes in byte-reversed order.
13786 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
13787 if (mVT.isExtended() || mVT.getSizeInBits() < 16)
13790 SDValue BSwapOp = N->getOperand(1).getOperand(0);
13791 // Do an any-extend to 32-bits if this is a half-word input.
13792 if (BSwapOp.getValueType() == MVT::i16)
13793 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
13795 // If the type of BSWAP operand is wider than stored memory width
13796 // it need to be shifted to the right side before STBRX.
13797 if (Op1VT.bitsGT(mVT)) {
13798 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
13799 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
13800 DAG.getConstant(Shift, dl, MVT::i32));
13801 // Need to truncate if this is a bswap of i64 stored as i32/i16.
13802 if (Op1VT == MVT::i64)
13803 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
13807 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
13810 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
13811 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
13812 cast<StoreSDNode>(N)->getMemOperand());
13815 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0>
13816 // So it can increase the chance of CSE constant construction.
13817 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
13818 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
13819 // Need to sign-extended to 64-bits to handle negative values.
13820 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
13821 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
13822 MemVT.getSizeInBits());
13823 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
13825 // DAG.getTruncStore() can't be used here because it doesn't accept
13826 // the general (base + offset) addressing mode.
13827 // So we use UpdateNodeOperands and setTruncatingStore instead.
13828 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
13830 cast<StoreSDNode>(N)->setTruncatingStore(true);
13831 return SDValue(N, 0);
13834 // For little endian, VSX stores require generating xxswapd/lxvd2x.
13835 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
13836 if (Op1VT.isSimple()) {
13837 MVT StoreVT = Op1VT.getSimpleVT();
13838 if (Subtarget.needsSwapsForVSXMemOps() &&
13839 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
13840 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
13841 return expandVSXStoreForLE(N, DCI);
13846 LoadSDNode *LD = cast<LoadSDNode>(N);
13847 EVT VT = LD->getValueType(0);
13849 // For little endian, VSX loads require generating lxvd2x/xxswapd.
13850 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
13851 if (VT.isSimple()) {
13852 MVT LoadVT = VT.getSimpleVT();
13853 if (Subtarget.needsSwapsForVSXMemOps() &&
13854 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
13855 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
13856 return expandVSXLoadForLE(N, DCI);
13859 // We sometimes end up with a 64-bit integer load, from which we extract
13860 // two single-precision floating-point numbers. This happens with
13861 // std::complex<float>, and other similar structures, because of the way we
13862 // canonicalize structure copies. However, if we lack direct moves,
13863 // then the final bitcasts from the extracted integer values to the
13864 // floating-point numbers turn into store/load pairs. Even with direct moves,
13865 // just loading the two floating-point numbers is likely better.
13866 auto ReplaceTwoFloatLoad = [&]() {
13867 if (VT != MVT::i64)
13870 if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
13874 // We're looking for a sequence like this:
13875 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
13876 // t16: i64 = srl t13, Constant:i32<32>
13877 // t17: i32 = truncate t16
13878 // t18: f32 = bitcast t17
13879 // t19: i32 = truncate t13
13880 // t20: f32 = bitcast t19
13882 if (!LD->hasNUsesOfValue(2, 0))
13885 auto UI = LD->use_begin();
13886 while (UI.getUse().getResNo() != 0) ++UI;
13887 SDNode *Trunc = *UI++;
13888 while (UI.getUse().getResNo() != 0) ++UI;
13889 SDNode *RightShift = *UI;
13890 if (Trunc->getOpcode() != ISD::TRUNCATE)
13891 std::swap(Trunc, RightShift);
13893 if (Trunc->getOpcode() != ISD::TRUNCATE ||
13894 Trunc->getValueType(0) != MVT::i32 ||
13895 !Trunc->hasOneUse())
13897 if (RightShift->getOpcode() != ISD::SRL ||
13898 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
13899 RightShift->getConstantOperandVal(1) != 32 ||
13900 !RightShift->hasOneUse())
13903 SDNode *Trunc2 = *RightShift->use_begin();
13904 if (Trunc2->getOpcode() != ISD::TRUNCATE ||
13905 Trunc2->getValueType(0) != MVT::i32 ||
13906 !Trunc2->hasOneUse())
13909 SDNode *Bitcast = *Trunc->use_begin();
13910 SDNode *Bitcast2 = *Trunc2->use_begin();
13912 if (Bitcast->getOpcode() != ISD::BITCAST ||
13913 Bitcast->getValueType(0) != MVT::f32)
13915 if (Bitcast2->getOpcode() != ISD::BITCAST ||
13916 Bitcast2->getValueType(0) != MVT::f32)
13919 if (Subtarget.isLittleEndian())
13920 std::swap(Bitcast, Bitcast2);
13922 // Bitcast has the second float (in memory-layout order) and Bitcast2
13923 // has the first one.
13925 SDValue BasePtr = LD->getBasePtr();
13926 if (LD->isIndexed()) {
13927 assert(LD->getAddressingMode() == ISD::PRE_INC &&
13928 "Non-pre-inc AM on PPC?");
13930 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
13935 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
13936 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
13937 LD->getPointerInfo(), LD->getAlignment(),
13938 MMOFlags, LD->getAAInfo());
13940 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
13941 BasePtr, DAG.getIntPtrConstant(4, dl));
13942 SDValue FloatLoad2 = DAG.getLoad(
13943 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
13944 LD->getPointerInfo().getWithOffset(4),
13945 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
13947 if (LD->isIndexed()) {
13948 // Note that DAGCombine should re-form any pre-increment load(s) from
13949 // what is produced here if that makes sense.
13950 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
13953 DCI.CombineTo(Bitcast2, FloatLoad);
13954 DCI.CombineTo(Bitcast, FloatLoad2);
13956 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
13957 SDValue(FloatLoad2.getNode(), 1));
13961 if (ReplaceTwoFloatLoad())
13962 return SDValue(N, 0);
13964 EVT MemVT = LD->getMemoryVT();
13965 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
13966 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
13967 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
13968 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
13969 if (LD->isUnindexed() && VT.isVector() &&
13970 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
13971 // P8 and later hardware should just use LOAD.
13972 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 ||
13973 VT == MVT::v4i32 || VT == MVT::v4f32)) ||
13974 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
13975 LD->getAlignment() >= ScalarABIAlignment)) &&
13976 LD->getAlignment() < ABIAlignment) {
13977 // This is a type-legal unaligned Altivec or QPX load.
13978 SDValue Chain = LD->getChain();
13979 SDValue Ptr = LD->getBasePtr();
13980 bool isLittleEndian = Subtarget.isLittleEndian();
13982 // This implements the loading of unaligned vectors as described in
13983 // the venerable Apple Velocity Engine overview. Specifically:
13984 // https://developer.apple.com/hardwaredrivers/ve/alignment.html
13985 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
13987 // The general idea is to expand a sequence of one or more unaligned
13988 // loads into an alignment-based permutation-control instruction (lvsl
13989 // or lvsr), a series of regular vector loads (which always truncate
13990 // their input address to an aligned address), and a series of
13991 // permutations. The results of these permutations are the requested
13992 // loaded values. The trick is that the last "extra" load is not taken
13993 // from the address you might suspect (sizeof(vector) bytes after the
13994 // last requested load), but rather sizeof(vector) - 1 bytes after the
13995 // last requested vector. The point of this is to avoid a page fault if
13996 // the base address happened to be aligned. This works because if the
13997 // base address is aligned, then adding less than a full vector length
13998 // will cause the last vector in the sequence to be (re)loaded.
13999 // Otherwise, the next vector will be fetched as you might suspect was
14002 // We might be able to reuse the permutation generation from
14003 // a different base address offset from this one by an aligned amount.
14004 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14005 // optimization later.
14006 Intrinsic::ID Intr, IntrLD, IntrPerm;
14007 MVT PermCntlTy, PermTy, LDTy;
14008 if (Subtarget.hasAltivec()) {
14009 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr :
14010 Intrinsic::ppc_altivec_lvsl;
14011 IntrLD = Intrinsic::ppc_altivec_lvx;
14012 IntrPerm = Intrinsic::ppc_altivec_vperm;
14013 PermCntlTy = MVT::v16i8;
14014 PermTy = MVT::v4i32;
14017 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
14018 Intrinsic::ppc_qpx_qvlpcls;
14019 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
14020 Intrinsic::ppc_qpx_qvlfs;
14021 IntrPerm = Intrinsic::ppc_qpx_qvfperm;
14022 PermCntlTy = MVT::v4f64;
14023 PermTy = MVT::v4f64;
14024 LDTy = MemVT.getSimpleVT();
14027 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14029 // Create the new MMO for the new base load. It is like the original MMO,
14030 // but represents an area in memory almost twice the vector size centered
14031 // on the original address. If the address is unaligned, we might start
14032 // reading up to (sizeof(vector)-1) bytes below the address of the
14033 // original unaligned load.
14034 MachineFunction &MF = DAG.getMachineFunction();
14035 MachineMemOperand *BaseMMO =
14036 MF.getMachineMemOperand(LD->getMemOperand(),
14037 -(long)MemVT.getStoreSize()+1,
14038 2*MemVT.getStoreSize()-1);
14040 // Create the new base load.
14042 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14043 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14045 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14046 DAG.getVTList(PermTy, MVT::Other),
14047 BaseLoadOps, LDTy, BaseMMO);
14049 // Note that the value of IncOffset (which is provided to the next
14050 // load's pointer info offset value, and thus used to calculate the
14051 // alignment), and the value of IncValue (which is actually used to
14052 // increment the pointer value) are different! This is because we
14053 // require the next load to appear to be aligned, even though it
14054 // is actually offset from the base pointer by a lesser amount.
14055 int IncOffset = VT.getSizeInBits() / 8;
14056 int IncValue = IncOffset;
14058 // Walk (both up and down) the chain looking for another load at the real
14059 // (aligned) offset (the alignment of the other load does not matter in
14060 // this case). If found, then do not use the offset reduction trick, as
14061 // that will prevent the loads from being later combined (as they would
14062 // otherwise be duplicates).
14063 if (!findConsecutiveLoad(LD, DAG))
14066 SDValue Increment =
14067 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14068 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14070 MachineMemOperand *ExtraMMO =
14071 MF.getMachineMemOperand(LD->getMemOperand(),
14072 1, 2*MemVT.getStoreSize()-1);
14073 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14074 SDValue ExtraLoad =
14075 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14076 DAG.getVTList(PermTy, MVT::Other),
14077 ExtraLoadOps, LDTy, ExtraMMO);
14079 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14080 BaseLoad.getValue(1), ExtraLoad.getValue(1));
14082 // Because vperm has a big-endian bias, we must reverse the order
14083 // of the input vectors and complement the permute control vector
14084 // when generating little endian code. We have already handled the
14085 // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14086 // and ExtraLoad here.
14088 if (isLittleEndian)
14089 Perm = BuildIntrinsicOp(IntrPerm,
14090 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14092 Perm = BuildIntrinsicOp(IntrPerm,
14093 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14096 Perm = Subtarget.hasAltivec() ?
14097 DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
14098 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
14099 DAG.getTargetConstant(1, dl, MVT::i64));
14100 // second argument is 1 because this rounding
14101 // is always exact.
14103 // The output of the permutation is our loaded result, the TokenFactor is
14105 DCI.CombineTo(N, Perm, TF);
14106 return SDValue(N, 0);
14110 case ISD::INTRINSIC_WO_CHAIN: {
14111 bool isLittleEndian = Subtarget.isLittleEndian();
14112 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14113 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14114 : Intrinsic::ppc_altivec_lvsl);
14115 if ((IID == Intr ||
14116 IID == Intrinsic::ppc_qpx_qvlpcld ||
14117 IID == Intrinsic::ppc_qpx_qvlpcls) &&
14118 N->getOperand(1)->getOpcode() == ISD::ADD) {
14119 SDValue Add = N->getOperand(1);
14121 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
14122 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
14124 if (DAG.MaskedValueIsZero(Add->getOperand(1),
14125 APInt::getAllOnesValue(Bits /* alignment */)
14126 .zext(Add.getScalarValueSizeInBits()))) {
14127 SDNode *BasePtr = Add->getOperand(0).getNode();
14128 for (SDNode::use_iterator UI = BasePtr->use_begin(),
14129 UE = BasePtr->use_end();
14131 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14132 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
14133 // We've found another LVSL/LVSR, and this address is an aligned
14134 // multiple of that one. The results will be the same, so use the
14135 // one we've just found instead.
14137 return SDValue(*UI, 0);
14142 if (isa<ConstantSDNode>(Add->getOperand(1))) {
14143 SDNode *BasePtr = Add->getOperand(0).getNode();
14144 for (SDNode::use_iterator UI = BasePtr->use_begin(),
14145 UE = BasePtr->use_end(); UI != UE; ++UI) {
14146 if (UI->getOpcode() == ISD::ADD &&
14147 isa<ConstantSDNode>(UI->getOperand(1)) &&
14148 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14149 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14150 (1ULL << Bits) == 0) {
14151 SDNode *OtherAdd = *UI;
14152 for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14153 VE = OtherAdd->use_end(); VI != VE; ++VI) {
14154 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14155 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14156 return SDValue(*VI, 0);
14164 // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14165 // Expose the vabsduw/h/b opportunity for down stream
14166 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14167 (IID == Intrinsic::ppc_altivec_vmaxsw ||
14168 IID == Intrinsic::ppc_altivec_vmaxsh ||
14169 IID == Intrinsic::ppc_altivec_vmaxsb)) {
14170 SDValue V1 = N->getOperand(1);
14171 SDValue V2 = N->getOperand(2);
14172 if ((V1.getSimpleValueType() == MVT::v4i32 ||
14173 V1.getSimpleValueType() == MVT::v8i16 ||
14174 V1.getSimpleValueType() == MVT::v16i8) &&
14175 V1.getSimpleValueType() == V2.getSimpleValueType()) {
14177 if (V1.getOpcode() == ISD::SUB &&
14178 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14179 V1.getOperand(1) == V2) {
14180 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14183 if (V2.getOpcode() == ISD::SUB &&
14184 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14185 V2.getOperand(1) == V1) {
14186 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14189 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14190 V1.getOperand(0) == V2.getOperand(1) &&
14191 V1.getOperand(1) == V2.getOperand(0)) {
14192 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14199 case ISD::INTRINSIC_W_CHAIN:
14200 // For little endian, VSX loads require generating lxvd2x/xxswapd.
14201 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14202 if (Subtarget.needsSwapsForVSXMemOps()) {
14203 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14206 case Intrinsic::ppc_vsx_lxvw4x:
14207 case Intrinsic::ppc_vsx_lxvd2x:
14208 return expandVSXLoadForLE(N, DCI);
14212 case ISD::INTRINSIC_VOID:
14213 // For little endian, VSX stores require generating xxswapd/stxvd2x.
14214 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14215 if (Subtarget.needsSwapsForVSXMemOps()) {
14216 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14219 case Intrinsic::ppc_vsx_stxvw4x:
14220 case Intrinsic::ppc_vsx_stxvd2x:
14221 return expandVSXStoreForLE(N, DCI);
14226 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14227 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14228 N->getOperand(0).hasOneUse() &&
14229 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14230 (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14231 N->getValueType(0) == MVT::i64))) {
14232 SDValue Load = N->getOperand(0);
14233 LoadSDNode *LD = cast<LoadSDNode>(Load);
14234 // Create the byte-swapping load.
14236 LD->getChain(), // Chain
14237 LD->getBasePtr(), // Ptr
14238 DAG.getValueType(N->getValueType(0)) // VT
14241 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14242 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14243 MVT::i64 : MVT::i32, MVT::Other),
14244 Ops, LD->getMemoryVT(), LD->getMemOperand());
14246 // If this is an i16 load, insert the truncate.
14247 SDValue ResVal = BSLoad;
14248 if (N->getValueType(0) == MVT::i16)
14249 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14251 // First, combine the bswap away. This makes the value produced by the
14253 DCI.CombineTo(N, ResVal);
14255 // Next, combine the load away, we give it a bogus result value but a real
14256 // chain result. The result value is dead because the bswap is dead.
14257 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14259 // Return N so it doesn't get rechecked!
14260 return SDValue(N, 0);
14264 // If a VCMPo node already exists with exactly the same operands as this
14265 // node, use its result instead of this node (VCMPo computes both a CR6 and
14266 // a normal output).
14268 if (!N->getOperand(0).hasOneUse() &&
14269 !N->getOperand(1).hasOneUse() &&
14270 !N->getOperand(2).hasOneUse()) {
14272 // Scan all of the users of the LHS, looking for VCMPo's that match.
14273 SDNode *VCMPoNode = nullptr;
14275 SDNode *LHSN = N->getOperand(0).getNode();
14276 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14278 if (UI->getOpcode() == PPCISD::VCMPo &&
14279 UI->getOperand(1) == N->getOperand(1) &&
14280 UI->getOperand(2) == N->getOperand(2) &&
14281 UI->getOperand(0) == N->getOperand(0)) {
14286 // If there is no VCMPo node, or if the flag value has a single use, don't
14288 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
14291 // Look at the (necessarily single) use of the flag value. If it has a
14292 // chain, this transformation is more complex. Note that multiple things
14293 // could use the value result, which we should ignore.
14294 SDNode *FlagUser = nullptr;
14295 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
14296 FlagUser == nullptr; ++UI) {
14297 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
14298 SDNode *User = *UI;
14299 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
14300 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
14307 // If the user is a MFOCRF instruction, we know this is safe.
14308 // Otherwise we give up for right now.
14309 if (FlagUser->getOpcode() == PPCISD::MFOCRF)
14310 return SDValue(VCMPoNode, 0);
14313 case ISD::BRCOND: {
14314 SDValue Cond = N->getOperand(1);
14315 SDValue Target = N->getOperand(2);
14317 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14318 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
14319 Intrinsic::loop_decrement) {
14321 // We now need to make the intrinsic dead (it cannot be instruction
14323 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
14324 assert(Cond.getNode()->hasOneUse() &&
14325 "Counter decrement has more than one use");
14327 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
14328 N->getOperand(0), Target);
14333 // If this is a branch on an altivec predicate comparison, lower this so
14334 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
14335 // lowering is done pre-legalize, because the legalizer lowers the predicate
14336 // compare down to code that is difficult to reassemble.
14337 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14338 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
14340 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
14341 // value. If so, pass-through the AND to get to the intrinsic.
14342 if (LHS.getOpcode() == ISD::AND &&
14343 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14344 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
14345 Intrinsic::loop_decrement &&
14346 isa<ConstantSDNode>(LHS.getOperand(1)) &&
14347 !isNullConstant(LHS.getOperand(1)))
14348 LHS = LHS.getOperand(0);
14350 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
14351 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
14352 Intrinsic::loop_decrement &&
14353 isa<ConstantSDNode>(RHS)) {
14354 assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
14355 "Counter decrement comparison is not EQ or NE");
14357 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14358 bool isBDNZ = (CC == ISD::SETEQ && Val) ||
14359 (CC == ISD::SETNE && !Val);
14361 // We now need to make the intrinsic dead (it cannot be instruction
14363 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
14364 assert(LHS.getNode()->hasOneUse() &&
14365 "Counter decrement has more than one use");
14367 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
14368 N->getOperand(0), N->getOperand(4));
14374 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14375 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
14376 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
14377 assert(isDot && "Can't compare against a vector result!");
14379 // If this is a comparison against something other than 0/1, then we know
14380 // that the condition is never/always true.
14381 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
14382 if (Val != 0 && Val != 1) {
14383 if (CC == ISD::SETEQ) // Cond never true, remove branch.
14384 return N->getOperand(0);
14385 // Always !=, turn it into an unconditional branch.
14386 return DAG.getNode(ISD::BR, dl, MVT::Other,
14387 N->getOperand(0), N->getOperand(4));
14390 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
14392 // Create the PPCISD altivec 'dot' comparison node.
14394 LHS.getOperand(2), // LHS of compare
14395 LHS.getOperand(3), // RHS of compare
14396 DAG.getConstant(CompareOpc, dl, MVT::i32)
14398 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
14399 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
14401 // Unpack the result based on how the target uses it.
14402 PPC::Predicate CompOpc;
14403 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
14404 default: // Can't happen, don't crash on invalid number though.
14405 case 0: // Branch on the value of the EQ bit of CR6.
14406 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
14408 case 1: // Branch on the inverted value of the EQ bit of CR6.
14409 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
14411 case 2: // Branch on the value of the LT bit of CR6.
14412 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
14414 case 3: // Branch on the inverted value of the LT bit of CR6.
14415 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
14419 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
14420 DAG.getConstant(CompOpc, dl, MVT::i32),
14421 DAG.getRegister(PPC::CR6, MVT::i32),
14422 N->getOperand(4), CompNode.getValue(1));
14426 case ISD::BUILD_VECTOR:
14427 return DAGCombineBuildVector(N, DCI);
14429 return combineABS(N, DCI);
14431 return combineVSelect(N, DCI);
14438 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
14440 SmallVectorImpl<SDNode *> &Created) const {
14441 // fold (sdiv X, pow2)
14442 EVT VT = N->getValueType(0);
14443 if (VT == MVT::i64 && !Subtarget.isPPC64())
14445 if ((VT != MVT::i32 && VT != MVT::i64) ||
14446 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
14450 SDValue N0 = N->getOperand(0);
14452 bool IsNegPow2 = (-Divisor).isPowerOf2();
14453 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
14454 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
14456 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
14457 Created.push_back(Op.getNode());
14460 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
14461 Created.push_back(Op.getNode());
14467 //===----------------------------------------------------------------------===//
14468 // Inline Assembly Support
14469 //===----------------------------------------------------------------------===//
14471 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
14473 const APInt &DemandedElts,
14474 const SelectionDAG &DAG,
14475 unsigned Depth) const {
14477 switch (Op.getOpcode()) {
14479 case PPCISD::LBRX: {
14480 // lhbrx is known to have the top bits cleared out.
14481 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
14482 Known.Zero = 0xFFFF0000;
14485 case ISD::INTRINSIC_WO_CHAIN: {
14486 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
14488 case Intrinsic::ppc_altivec_vcmpbfp_p:
14489 case Intrinsic::ppc_altivec_vcmpeqfp_p:
14490 case Intrinsic::ppc_altivec_vcmpequb_p:
14491 case Intrinsic::ppc_altivec_vcmpequh_p:
14492 case Intrinsic::ppc_altivec_vcmpequw_p:
14493 case Intrinsic::ppc_altivec_vcmpequd_p:
14494 case Intrinsic::ppc_altivec_vcmpgefp_p:
14495 case Intrinsic::ppc_altivec_vcmpgtfp_p:
14496 case Intrinsic::ppc_altivec_vcmpgtsb_p:
14497 case Intrinsic::ppc_altivec_vcmpgtsh_p:
14498 case Intrinsic::ppc_altivec_vcmpgtsw_p:
14499 case Intrinsic::ppc_altivec_vcmpgtsd_p:
14500 case Intrinsic::ppc_altivec_vcmpgtub_p:
14501 case Intrinsic::ppc_altivec_vcmpgtuh_p:
14502 case Intrinsic::ppc_altivec_vcmpgtuw_p:
14503 case Intrinsic::ppc_altivec_vcmpgtud_p:
14504 Known.Zero = ~1U; // All bits but the low one are known to be zero.
14511 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
14512 switch (Subtarget.getCPUDirective()) {
14515 case PPC::DIR_PWR4:
14516 case PPC::DIR_PWR5:
14517 case PPC::DIR_PWR5X:
14518 case PPC::DIR_PWR6:
14519 case PPC::DIR_PWR6X:
14520 case PPC::DIR_PWR7:
14521 case PPC::DIR_PWR8:
14522 case PPC::DIR_PWR9:
14523 case PPC::DIR_PWR_FUTURE: {
14527 if (!DisableInnermostLoopAlign32) {
14528 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
14529 // so that we can decrease cache misses and branch-prediction misses.
14530 // Actual alignment of the loop will depend on the hotness check and other
14531 // logic in alignBlocks.
14532 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
14536 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
14538 // For small loops (between 5 and 8 instructions), align to a 32-byte
14539 // boundary so that the entire loop fits in one instruction-cache line.
14540 uint64_t LoopSize = 0;
14541 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
14542 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
14543 LoopSize += TII->getInstSizeInBytes(*J);
14548 if (LoopSize > 16 && LoopSize <= 32)
14555 return TargetLowering::getPrefLoopAlignment(ML);
14558 /// getConstraintType - Given a constraint, return the type of
14559 /// constraint it is for this target.
14560 PPCTargetLowering::ConstraintType
14561 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
14562 if (Constraint.size() == 1) {
14563 switch (Constraint[0]) {
14571 return C_RegisterClass;
14573 // FIXME: While Z does indicate a memory constraint, it specifically
14574 // indicates an r+r address (used in conjunction with the 'y' modifier
14575 // in the replacement string). Currently, we're forcing the base
14576 // register to be r0 in the asm printer (which is interpreted as zero)
14577 // and forming the complete address in the second register. This is
14581 } else if (Constraint == "wc") { // individual CR bits.
14582 return C_RegisterClass;
14583 } else if (Constraint == "wa" || Constraint == "wd" ||
14584 Constraint == "wf" || Constraint == "ws" ||
14585 Constraint == "wi" || Constraint == "ww") {
14586 return C_RegisterClass; // VSX registers.
14588 return TargetLowering::getConstraintType(Constraint);
14591 /// Examine constraint type and operand type and determine a weight value.
14592 /// This object must already have been set up with the operand type
14593 /// and the current alternative constraint selected.
14594 TargetLowering::ConstraintWeight
14595 PPCTargetLowering::getSingleConstraintMatchWeight(
14596 AsmOperandInfo &info, const char *constraint) const {
14597 ConstraintWeight weight = CW_Invalid;
14598 Value *CallOperandVal = info.CallOperandVal;
14599 // If we don't have a value, we can't do a match,
14600 // but allow it at the lowest weight.
14601 if (!CallOperandVal)
14603 Type *type = CallOperandVal->getType();
14605 // Look at the constraint type.
14606 if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
14607 return CW_Register; // an individual CR bit.
14608 else if ((StringRef(constraint) == "wa" ||
14609 StringRef(constraint) == "wd" ||
14610 StringRef(constraint) == "wf") &&
14611 type->isVectorTy())
14612 return CW_Register;
14613 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
14614 return CW_Register; // just hold 64-bit integers data.
14615 else if (StringRef(constraint) == "ws" && type->isDoubleTy())
14616 return CW_Register;
14617 else if (StringRef(constraint) == "ww" && type->isFloatTy())
14618 return CW_Register;
14620 switch (*constraint) {
14622 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
14625 if (type->isIntegerTy())
14626 weight = CW_Register;
14629 if (type->isFloatTy())
14630 weight = CW_Register;
14633 if (type->isDoubleTy())
14634 weight = CW_Register;
14637 if (type->isVectorTy())
14638 weight = CW_Register;
14641 weight = CW_Register;
14644 weight = CW_Memory;
14650 std::pair<unsigned, const TargetRegisterClass *>
14651 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
14652 StringRef Constraint,
14654 if (Constraint.size() == 1) {
14655 // GCC RS6000 Constraint Letters
14656 switch (Constraint[0]) {
14657 case 'b': // R1-R31
14658 if (VT == MVT::i64 && Subtarget.isPPC64())
14659 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
14660 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
14661 case 'r': // R0-R31
14662 if (VT == MVT::i64 && Subtarget.isPPC64())
14663 return std::make_pair(0U, &PPC::G8RCRegClass);
14664 return std::make_pair(0U, &PPC::GPRCRegClass);
14665 // 'd' and 'f' constraints are both defined to be "the floating point
14666 // registers", where one is for 32-bit and the other for 64-bit. We don't
14667 // really care overly much here so just give them all the same reg classes.
14670 if (Subtarget.hasSPE()) {
14671 if (VT == MVT::f32 || VT == MVT::i32)
14672 return std::make_pair(0U, &PPC::GPRCRegClass);
14673 if (VT == MVT::f64 || VT == MVT::i64)
14674 return std::make_pair(0U, &PPC::SPERCRegClass);
14676 if (VT == MVT::f32 || VT == MVT::i32)
14677 return std::make_pair(0U, &PPC::F4RCRegClass);
14678 if (VT == MVT::f64 || VT == MVT::i64)
14679 return std::make_pair(0U, &PPC::F8RCRegClass);
14680 if (VT == MVT::v4f64 && Subtarget.hasQPX())
14681 return std::make_pair(0U, &PPC::QFRCRegClass);
14682 if (VT == MVT::v4f32 && Subtarget.hasQPX())
14683 return std::make_pair(0U, &PPC::QSRCRegClass);
14687 if (VT == MVT::v4f64 && Subtarget.hasQPX())
14688 return std::make_pair(0U, &PPC::QFRCRegClass);
14689 if (VT == MVT::v4f32 && Subtarget.hasQPX())
14690 return std::make_pair(0U, &PPC::QSRCRegClass);
14691 if (Subtarget.hasAltivec())
14692 return std::make_pair(0U, &PPC::VRRCRegClass);
14695 return std::make_pair(0U, &PPC::CRRCRegClass);
14697 } else if (Constraint == "wc" && Subtarget.useCRBits()) {
14698 // An individual CR bit.
14699 return std::make_pair(0U, &PPC::CRBITRCRegClass);
14700 } else if ((Constraint == "wa" || Constraint == "wd" ||
14701 Constraint == "wf" || Constraint == "wi") &&
14702 Subtarget.hasVSX()) {
14703 return std::make_pair(0U, &PPC::VSRCRegClass);
14704 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
14705 if (VT == MVT::f32 && Subtarget.hasP8Vector())
14706 return std::make_pair(0U, &PPC::VSSRCRegClass);
14708 return std::make_pair(0U, &PPC::VSFRCRegClass);
14711 // If we name a VSX register, we can't defer to the base class because it
14712 // will not recognize the correct register (their names will be VSL{0-31}
14713 // and V{0-31} so they won't match). So we match them here.
14714 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
14715 int VSNum = atoi(Constraint.data() + 3);
14716 assert(VSNum >= 0 && VSNum <= 63 &&
14717 "Attempted to access a vsr out of range");
14719 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
14720 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
14722 std::pair<unsigned, const TargetRegisterClass *> R =
14723 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
14725 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
14726 // (which we call X[0-9]+). If a 64-bit value has been requested, and a
14727 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
14729 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
14730 // the AsmName field from *RegisterInfo.td, then this would not be necessary.
14731 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
14732 PPC::GPRCRegClass.contains(R.first))
14733 return std::make_pair(TRI->getMatchingSuperReg(R.first,
14734 PPC::sub_32, &PPC::G8RCRegClass),
14735 &PPC::G8RCRegClass);
14737 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
14738 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
14739 R.first = PPC::CR0;
14740 R.second = &PPC::CRRCRegClass;
14746 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
14747 /// vector. If it is invalid, don't add anything to Ops.
14748 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
14749 std::string &Constraint,
14750 std::vector<SDValue>&Ops,
14751 SelectionDAG &DAG) const {
14754 // Only support length 1 constraints.
14755 if (Constraint.length() > 1) return;
14757 char Letter = Constraint[0];
14768 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
14769 if (!CST) return; // Must be an immediate to match.
14771 int64_t Value = CST->getSExtValue();
14772 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
14773 // numbers are printed as such.
14775 default: llvm_unreachable("Unknown constraint letter!");
14776 case 'I': // "I" is a signed 16-bit constant.
14777 if (isInt<16>(Value))
14778 Result = DAG.getTargetConstant(Value, dl, TCVT);
14780 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
14781 if (isShiftedUInt<16, 16>(Value))
14782 Result = DAG.getTargetConstant(Value, dl, TCVT);
14784 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
14785 if (isShiftedInt<16, 16>(Value))
14786 Result = DAG.getTargetConstant(Value, dl, TCVT);
14788 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
14789 if (isUInt<16>(Value))
14790 Result = DAG.getTargetConstant(Value, dl, TCVT);
14792 case 'M': // "M" is a constant that is greater than 31.
14794 Result = DAG.getTargetConstant(Value, dl, TCVT);
14796 case 'N': // "N" is a positive constant that is an exact power of two.
14797 if (Value > 0 && isPowerOf2_64(Value))
14798 Result = DAG.getTargetConstant(Value, dl, TCVT);
14800 case 'O': // "O" is the constant zero.
14802 Result = DAG.getTargetConstant(Value, dl, TCVT);
14804 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
14805 if (isInt<16>(-Value))
14806 Result = DAG.getTargetConstant(Value, dl, TCVT);
14813 if (Result.getNode()) {
14814 Ops.push_back(Result);
14818 // Handle standard constraint letters.
14819 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
14822 // isLegalAddressingMode - Return true if the addressing mode represented
14823 // by AM is legal for this target, for a load/store of the specified type.
14824 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
14825 const AddrMode &AM, Type *Ty,
14826 unsigned AS, Instruction *I) const {
14827 // PPC does not allow r+i addressing modes for vectors!
14828 if (Ty->isVectorTy() && AM.BaseOffs != 0)
14831 // PPC allows a sign-extended 16-bit immediate field.
14832 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
14835 // No global is ever allowed as a base.
14839 // PPC only support r+r,
14840 switch (AM.Scale) {
14841 case 0: // "r+i" or just "i", depending on HasBaseReg.
14844 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
14846 // Otherwise we have r+r or r+i.
14849 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
14851 // Allow 2*r as r+r.
14854 // No other scales are supported.
14861 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
14862 SelectionDAG &DAG) const {
14863 MachineFunction &MF = DAG.getMachineFunction();
14864 MachineFrameInfo &MFI = MF.getFrameInfo();
14865 MFI.setReturnAddressIsTaken(true);
14867 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
14871 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14873 // Make sure the function does not optimize away the store of the RA to
14875 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
14876 FuncInfo->setLRStoreRequired();
14877 bool isPPC64 = Subtarget.isPPC64();
14878 auto PtrVT = getPointerTy(MF.getDataLayout());
14881 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
14883 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
14884 isPPC64 ? MVT::i64 : MVT::i32);
14885 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
14886 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
14887 MachinePointerInfo());
14890 // Just load the return address off the stack.
14891 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
14892 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
14893 MachinePointerInfo());
14896 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
14897 SelectionDAG &DAG) const {
14899 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14901 MachineFunction &MF = DAG.getMachineFunction();
14902 MachineFrameInfo &MFI = MF.getFrameInfo();
14903 MFI.setFrameAddressIsTaken(true);
14905 EVT PtrVT = getPointerTy(MF.getDataLayout());
14906 bool isPPC64 = PtrVT == MVT::i64;
14908 // Naked functions never have a frame pointer, and so we use r1. For all
14909 // other functions, this decision must be delayed until during PEI.
14911 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
14912 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
14914 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
14916 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
14919 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
14920 FrameAddr, MachinePointerInfo());
14924 // FIXME? Maybe this could be a TableGen attribute on some registers and
14925 // this table could be generated automatically from RegInfo.
14926 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
14927 const MachineFunction &MF) const {
14928 bool isPPC64 = Subtarget.isPPC64();
14929 bool IsDarwinABI = Subtarget.isDarwinABI();
14931 bool is64Bit = isPPC64 && VT == LLT::scalar(64);
14932 if (!is64Bit && VT != LLT::scalar(32))
14933 report_fatal_error("Invalid register global variable type");
14935 Register Reg = StringSwitch<Register>(RegName)
14936 .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
14937 .Case("r2", (IsDarwinABI || isPPC64) ? Register() : PPC::R2)
14938 .Case("r13", (!isPPC64 && IsDarwinABI) ? Register() :
14939 (is64Bit ? PPC::X13 : PPC::R13))
14940 .Default(Register());
14944 report_fatal_error("Invalid register name global variable");
14947 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
14948 // 32-bit SVR4 ABI access everything as got-indirect.
14949 if (Subtarget.is32BitELFABI())
14952 // AIX accesses everything indirectly through the TOC, which is similar to
14954 if (Subtarget.isAIXABI())
14957 CodeModel::Model CModel = getTargetMachine().getCodeModel();
14958 // If it is small or large code model, module locals are accessed
14959 // indirectly by loading their address from .toc/.got.
14960 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
14963 // JumpTable and BlockAddress are accessed as got-indirect.
14964 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
14967 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
14968 return Subtarget.isGVIndirectSymbol(G->getGlobal());
14974 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
14975 // The PowerPC target isn't yet aware of offsets.
14979 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
14981 MachineFunction &MF,
14982 unsigned Intrinsic) const {
14983 switch (Intrinsic) {
14984 case Intrinsic::ppc_qpx_qvlfd:
14985 case Intrinsic::ppc_qpx_qvlfs:
14986 case Intrinsic::ppc_qpx_qvlfcd:
14987 case Intrinsic::ppc_qpx_qvlfcs:
14988 case Intrinsic::ppc_qpx_qvlfiwa:
14989 case Intrinsic::ppc_qpx_qvlfiwz:
14990 case Intrinsic::ppc_altivec_lvx:
14991 case Intrinsic::ppc_altivec_lvxl:
14992 case Intrinsic::ppc_altivec_lvebx:
14993 case Intrinsic::ppc_altivec_lvehx:
14994 case Intrinsic::ppc_altivec_lvewx:
14995 case Intrinsic::ppc_vsx_lxvd2x:
14996 case Intrinsic::ppc_vsx_lxvw4x: {
14998 switch (Intrinsic) {
14999 case Intrinsic::ppc_altivec_lvebx:
15002 case Intrinsic::ppc_altivec_lvehx:
15005 case Intrinsic::ppc_altivec_lvewx:
15008 case Intrinsic::ppc_vsx_lxvd2x:
15011 case Intrinsic::ppc_qpx_qvlfd:
15014 case Intrinsic::ppc_qpx_qvlfs:
15017 case Intrinsic::ppc_qpx_qvlfcd:
15020 case Intrinsic::ppc_qpx_qvlfcs:
15028 Info.opc = ISD::INTRINSIC_W_CHAIN;
15030 Info.ptrVal = I.getArgOperand(0);
15031 Info.offset = -VT.getStoreSize()+1;
15032 Info.size = 2*VT.getStoreSize()-1;
15033 Info.align = Align::None();
15034 Info.flags = MachineMemOperand::MOLoad;
15037 case Intrinsic::ppc_qpx_qvlfda:
15038 case Intrinsic::ppc_qpx_qvlfsa:
15039 case Intrinsic::ppc_qpx_qvlfcda:
15040 case Intrinsic::ppc_qpx_qvlfcsa:
15041 case Intrinsic::ppc_qpx_qvlfiwaa:
15042 case Intrinsic::ppc_qpx_qvlfiwza: {
15044 switch (Intrinsic) {
15045 case Intrinsic::ppc_qpx_qvlfda:
15048 case Intrinsic::ppc_qpx_qvlfsa:
15051 case Intrinsic::ppc_qpx_qvlfcda:
15054 case Intrinsic::ppc_qpx_qvlfcsa:
15062 Info.opc = ISD::INTRINSIC_W_CHAIN;
15064 Info.ptrVal = I.getArgOperand(0);
15066 Info.size = VT.getStoreSize();
15067 Info.align = Align::None();
15068 Info.flags = MachineMemOperand::MOLoad;
15071 case Intrinsic::ppc_qpx_qvstfd:
15072 case Intrinsic::ppc_qpx_qvstfs:
15073 case Intrinsic::ppc_qpx_qvstfcd:
15074 case Intrinsic::ppc_qpx_qvstfcs:
15075 case Intrinsic::ppc_qpx_qvstfiw:
15076 case Intrinsic::ppc_altivec_stvx:
15077 case Intrinsic::ppc_altivec_stvxl:
15078 case Intrinsic::ppc_altivec_stvebx:
15079 case Intrinsic::ppc_altivec_stvehx:
15080 case Intrinsic::ppc_altivec_stvewx:
15081 case Intrinsic::ppc_vsx_stxvd2x:
15082 case Intrinsic::ppc_vsx_stxvw4x: {
15084 switch (Intrinsic) {
15085 case Intrinsic::ppc_altivec_stvebx:
15088 case Intrinsic::ppc_altivec_stvehx:
15091 case Intrinsic::ppc_altivec_stvewx:
15094 case Intrinsic::ppc_vsx_stxvd2x:
15097 case Intrinsic::ppc_qpx_qvstfd:
15100 case Intrinsic::ppc_qpx_qvstfs:
15103 case Intrinsic::ppc_qpx_qvstfcd:
15106 case Intrinsic::ppc_qpx_qvstfcs:
15114 Info.opc = ISD::INTRINSIC_VOID;
15116 Info.ptrVal = I.getArgOperand(1);
15117 Info.offset = -VT.getStoreSize()+1;
15118 Info.size = 2*VT.getStoreSize()-1;
15119 Info.align = Align::None();
15120 Info.flags = MachineMemOperand::MOStore;
15123 case Intrinsic::ppc_qpx_qvstfda:
15124 case Intrinsic::ppc_qpx_qvstfsa:
15125 case Intrinsic::ppc_qpx_qvstfcda:
15126 case Intrinsic::ppc_qpx_qvstfcsa:
15127 case Intrinsic::ppc_qpx_qvstfiwa: {
15129 switch (Intrinsic) {
15130 case Intrinsic::ppc_qpx_qvstfda:
15133 case Intrinsic::ppc_qpx_qvstfsa:
15136 case Intrinsic::ppc_qpx_qvstfcda:
15139 case Intrinsic::ppc_qpx_qvstfcsa:
15147 Info.opc = ISD::INTRINSIC_VOID;
15149 Info.ptrVal = I.getArgOperand(1);
15151 Info.size = VT.getStoreSize();
15152 Info.align = Align::None();
15153 Info.flags = MachineMemOperand::MOStore;
15163 /// getOptimalMemOpType - Returns the target specific optimal type for load
15164 /// and store operations as a result of memset, memcpy, and memmove
15165 /// lowering. If DstAlign is zero that means it's safe to destination
15166 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
15167 /// means there isn't a need to check it against alignment requirement,
15168 /// probably because the source does not need to be loaded. If 'IsMemset' is
15169 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
15170 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
15171 /// source is constant so it does not need to be loaded.
15172 /// It returns EVT::Other if the type should be determined using generic
15173 /// target-independent logic.
15174 EVT PPCTargetLowering::getOptimalMemOpType(
15175 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
15176 bool ZeroMemset, bool MemcpyStrSrc,
15177 const AttributeList &FuncAttributes) const {
15178 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15179 // When expanding a memset, require at least two QPX instructions to cover
15180 // the cost of loading the value to be stored from the constant pool.
15181 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
15182 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
15183 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
15187 // We should use Altivec/VSX loads and stores when available. For unaligned
15188 // addresses, unaligned VSX loads are only fast starting with the P8.
15189 if (Subtarget.hasAltivec() && Size >= 16 &&
15190 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
15191 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15195 if (Subtarget.isPPC64()) {
15202 /// Returns true if it is beneficial to convert a load of a constant
15203 /// to just the constant itself.
15204 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15206 assert(Ty->isIntegerTy());
15208 unsigned BitSize = Ty->getPrimitiveSizeInBits();
15209 return !(BitSize == 0 || BitSize > 64);
15212 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15213 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15215 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15216 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15217 return NumBits1 == 64 && NumBits2 == 32;
15220 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15221 if (!VT1.isInteger() || !VT2.isInteger())
15223 unsigned NumBits1 = VT1.getSizeInBits();
15224 unsigned NumBits2 = VT2.getSizeInBits();
15225 return NumBits1 == 64 && NumBits2 == 32;
15228 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15229 // Generally speaking, zexts are not free, but they are free when they can be
15230 // folded with other operations.
15231 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15232 EVT MemVT = LD->getMemoryVT();
15233 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15234 (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15235 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15236 LD->getExtensionType() == ISD::ZEXTLOAD))
15240 // FIXME: Add other cases...
15241 // - 32-bit shifts with a zext to i64
15242 // - zext after ctlz, bswap, etc.
15243 // - zext after and by a constant mask
15245 return TargetLowering::isZExtFree(Val, VT2);
15248 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15249 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
15250 "invalid fpext types");
15251 // Extending to float128 is not free.
15252 if (DestVT == MVT::f128)
15257 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15258 return isInt<16>(Imm) || isUInt<16>(Imm);
15261 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15262 return isInt<16>(Imm) || isUInt<16>(Imm);
15265 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
15268 MachineMemOperand::Flags,
15269 bool *Fast) const {
15270 if (DisablePPCUnaligned)
15273 // PowerPC supports unaligned memory access for simple non-vector types.
15274 // Although accessing unaligned addresses is not as efficient as accessing
15275 // aligned addresses, it is generally more efficient than manual expansion,
15276 // and generally only traps for software emulation when crossing page
15279 if (!VT.isSimple())
15282 if (VT.isFloatingPoint() && !VT.isVector() &&
15283 !Subtarget.allowsUnalignedFPAccess())
15286 if (VT.getSimpleVT().isVector()) {
15287 if (Subtarget.hasVSX()) {
15288 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15289 VT != MVT::v4f32 && VT != MVT::v4i32)
15296 if (VT == MVT::ppcf128)
15305 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15307 VT = VT.getScalarType();
15309 if (!VT.isSimple())
15312 switch (VT.getSimpleVT().SimpleTy) {
15317 return (EnableQuadPrecision && Subtarget.hasP9Vector());
15326 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
15327 // LR is a callee-save register, but we must treat it as clobbered by any call
15328 // site. Hence we include LR in the scratch registers, which are in turn added
15329 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
15330 // to CTR, which is used by any indirect call.
15331 static const MCPhysReg ScratchRegs[] = {
15332 PPC::X12, PPC::LR8, PPC::CTR8, 0
15335 return ScratchRegs;
15338 unsigned PPCTargetLowering::getExceptionPointerRegister(
15339 const Constant *PersonalityFn) const {
15340 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
15343 unsigned PPCTargetLowering::getExceptionSelectorRegister(
15344 const Constant *PersonalityFn) const {
15345 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
15349 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
15350 EVT VT , unsigned DefinedValues) const {
15351 if (VT == MVT::v2i64)
15352 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
15354 if (Subtarget.hasVSX() || Subtarget.hasQPX())
15357 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
15360 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
15361 if (DisableILPPref || Subtarget.enableMachineScheduler())
15362 return TargetLowering::getSchedulingPreference(N);
15367 // Create a fast isel object.
15369 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
15370 const TargetLibraryInfo *LibInfo) const {
15371 return PPC::createFastISel(FuncInfo, LibInfo);
15374 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
15375 if (Subtarget.isDarwinABI()) return;
15376 if (!Subtarget.isPPC64()) return;
15378 // Update IsSplitCSR in PPCFunctionInfo
15379 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>();
15380 PFI->setIsSplitCSR(true);
15383 void PPCTargetLowering::insertCopiesSplitCSR(
15384 MachineBasicBlock *Entry,
15385 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
15386 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
15387 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
15391 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
15392 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
15393 MachineBasicBlock::iterator MBBI = Entry->begin();
15394 for (const MCPhysReg *I = IStart; *I; ++I) {
15395 const TargetRegisterClass *RC = nullptr;
15396 if (PPC::G8RCRegClass.contains(*I))
15397 RC = &PPC::G8RCRegClass;
15398 else if (PPC::F8RCRegClass.contains(*I))
15399 RC = &PPC::F8RCRegClass;
15400 else if (PPC::CRRCRegClass.contains(*I))
15401 RC = &PPC::CRRCRegClass;
15402 else if (PPC::VRRCRegClass.contains(*I))
15403 RC = &PPC::VRRCRegClass;
15405 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
15407 Register NewVR = MRI->createVirtualRegister(RC);
15408 // Create copy from CSR to a virtual register.
15409 // FIXME: this currently does not emit CFI pseudo-instructions, it works
15410 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
15411 // nounwind. If we want to generalize this later, we may need to emit
15412 // CFI pseudo-instructions.
15413 assert(Entry->getParent()->getFunction().hasFnAttribute(
15414 Attribute::NoUnwind) &&
15415 "Function should be nounwind in insertCopiesSplitCSR!");
15416 Entry->addLiveIn(*I);
15417 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
15420 // Insert the copy-back instructions right before the terminator.
15421 for (auto *Exit : Exits)
15422 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
15423 TII->get(TargetOpcode::COPY), *I)
15428 // Override to enable LOAD_STACK_GUARD lowering on Linux.
15429 bool PPCTargetLowering::useLoadStackGuardNode() const {
15430 if (!Subtarget.isTargetLinux())
15431 return TargetLowering::useLoadStackGuardNode();
15435 // Override to disable global variable loading on Linux.
15436 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
15437 if (!Subtarget.isTargetLinux())
15438 return TargetLowering::insertSSPDeclarations(M);
15441 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
15442 bool ForCodeSize) const {
15443 if (!VT.isSimple() || !Subtarget.hasVSX())
15446 switch(VT.getSimpleVT().SimpleTy) {
15448 // For FP types that are currently not supported by PPC backend, return
15449 // false. Examples: f16, f80.
15454 return Imm.isPosZero();
15458 // For vector shift operation op, fold
15459 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
15460 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
15461 SelectionDAG &DAG) {
15462 SDValue N0 = N->getOperand(0);
15463 SDValue N1 = N->getOperand(1);
15464 EVT VT = N0.getValueType();
15465 unsigned OpSizeInBits = VT.getScalarSizeInBits();
15466 unsigned Opcode = N->getOpcode();
15467 unsigned TargetOpcode;
15471 llvm_unreachable("Unexpected shift operation");
15473 TargetOpcode = PPCISD::SHL;
15476 TargetOpcode = PPCISD::SRL;
15479 TargetOpcode = PPCISD::SRA;
15483 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
15484 N1->getOpcode() == ISD::AND)
15485 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
15486 if (Mask->getZExtValue() == OpSizeInBits - 1)
15487 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
15492 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
15493 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15496 SDValue N0 = N->getOperand(0);
15497 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
15498 if (!Subtarget.isISA3_0() ||
15499 N0.getOpcode() != ISD::SIGN_EXTEND ||
15500 N0.getOperand(0).getValueType() != MVT::i32 ||
15501 CN1 == nullptr || N->getValueType(0) != MVT::i64)
15504 // We can't save an operation here if the value is already extended, and
15505 // the existing shift is easier to combine.
15506 SDValue ExtsSrc = N0.getOperand(0);
15507 if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
15508 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
15512 SDValue ShiftBy = SDValue(CN1, 0);
15513 // We want the shift amount to be i32 on the extswli, but the shift could
15515 if (ShiftBy.getValueType() == MVT::i64)
15516 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
15518 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
15522 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
15523 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15529 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
15530 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
15536 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
15537 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
15538 // When C is zero, the equation (addi Z, -C) can be simplified to Z
15539 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
15540 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
15541 const PPCSubtarget &Subtarget) {
15542 if (!Subtarget.isPPC64())
15545 SDValue LHS = N->getOperand(0);
15546 SDValue RHS = N->getOperand(1);
15548 auto isZextOfCompareWithConstant = [](SDValue Op) {
15549 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
15550 Op.getValueType() != MVT::i64)
15553 SDValue Cmp = Op.getOperand(0);
15554 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
15555 Cmp.getOperand(0).getValueType() != MVT::i64)
15558 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
15559 int64_t NegConstant = 0 - Constant->getSExtValue();
15560 // Due to the limitations of the addi instruction,
15561 // -C is required to be [-32768, 32767].
15562 return isInt<16>(NegConstant);
15568 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
15569 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
15571 // If there is a pattern, canonicalize a zext operand to the RHS.
15572 if (LHSHasPattern && !RHSHasPattern)
15573 std::swap(LHS, RHS);
15574 else if (!LHSHasPattern && !RHSHasPattern)
15578 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
15579 SDValue Cmp = RHS.getOperand(0);
15580 SDValue Z = Cmp.getOperand(0);
15581 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
15583 assert(Constant && "Constant Should not be a null pointer.");
15584 int64_t NegConstant = 0 - Constant->getSExtValue();
15586 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
15590 // --> addze X, (addic Z, -1).carry
15592 // add X, (zext(setne Z, C))--
15593 // \ when -32768 <= -C <= 32767 && C != 0
15594 // --> addze X, (addic (addi Z, -C), -1).carry
15595 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15596 DAG.getConstant(NegConstant, DL, MVT::i64));
15597 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15598 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15599 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
15600 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15601 SDValue(Addc.getNode(), 1));
15605 // --> addze X, (subfic Z, 0).carry
15607 // add X, (zext(sete Z, C))--
15608 // \ when -32768 <= -C <= 32767 && C != 0
15609 // --> addze X, (subfic (addi Z, -C), 0).carry
15610 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
15611 DAG.getConstant(NegConstant, DL, MVT::i64));
15612 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
15613 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
15614 DAG.getConstant(0, DL, MVT::i64), AddOrZ);
15615 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
15616 SDValue(Subc.getNode(), 1));
15623 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
15624 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
15630 // Detect TRUNCATE operations on bitcasts of float128 values.
15631 // What we are looking for here is the situtation where we extract a subset
15632 // of bits from a 128 bit float.
15633 // This can be of two forms:
15634 // 1) BITCAST of f128 feeding TRUNCATE
15635 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
15636 // The reason this is required is because we do not have a legal i128 type
15637 // and so we want to prevent having to store the f128 and then reload part
15639 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
15640 DAGCombinerInfo &DCI) const {
15641 // If we are using CRBits then try that first.
15642 if (Subtarget.useCRBits()) {
15643 // Check if CRBits did anything and return that if it did.
15644 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
15645 return CRTruncValue;
15649 SDValue Op0 = N->getOperand(0);
15651 // Looking for a truncate of i128 to i64.
15652 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
15655 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
15657 // SRL feeding TRUNCATE.
15658 if (Op0.getOpcode() == ISD::SRL) {
15659 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
15660 // The right shift has to be by 64 bits.
15661 if (!ConstNode || ConstNode->getZExtValue() != 64)
15664 // Switch the element number to extract.
15665 EltToExtract = EltToExtract ? 0 : 1;
15666 // Update Op0 past the SRL.
15667 Op0 = Op0.getOperand(0);
15670 // BITCAST feeding a TRUNCATE possibly via SRL.
15671 if (Op0.getOpcode() == ISD::BITCAST &&
15672 Op0.getValueType() == MVT::i128 &&
15673 Op0.getOperand(0).getValueType() == MVT::f128) {
15674 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
15675 return DCI.DAG.getNode(
15676 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
15677 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
15682 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
15683 SelectionDAG &DAG = DCI.DAG;
15685 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
15686 if (!ConstOpOrElement)
15689 // An imul is usually smaller than the alternative sequence for legal type.
15690 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
15691 isOperationLegal(ISD::MUL, N->getValueType(0)))
15694 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
15695 switch (this->Subtarget.getCPUDirective()) {
15697 // TODO: enhance the condition for subtarget before pwr8
15699 case PPC::DIR_PWR8:
15700 // type mul add shl
15704 case PPC::DIR_PWR9:
15705 case PPC::DIR_PWR_FUTURE:
15706 // type mul add shl
15710 // The cycle RATIO of related operations are showed as a table above.
15711 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
15712 // scalar and vector type. For 2 instrs patterns, add/sub + shl
15713 // are 4, it is always profitable; but for 3 instrs patterns
15714 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
15715 // So we should only do it for vector type.
15716 return IsAddOne && IsNeg ? VT.isVector() : true;
15720 EVT VT = N->getValueType(0);
15723 const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
15724 bool IsNeg = MulAmt.isNegative();
15725 APInt MulAmtAbs = MulAmt.abs();
15727 if ((MulAmtAbs - 1).isPowerOf2()) {
15728 // (mul x, 2^N + 1) => (add (shl x, N), x)
15729 // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
15731 if (!IsProfitable(IsNeg, true, VT))
15734 SDValue Op0 = N->getOperand(0);
15736 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15737 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
15738 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
15743 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
15744 } else if ((MulAmtAbs + 1).isPowerOf2()) {
15745 // (mul x, 2^N - 1) => (sub (shl x, N), x)
15746 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
15748 if (!IsProfitable(IsNeg, false, VT))
15751 SDValue Op0 = N->getOperand(0);
15753 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
15754 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
15757 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
15759 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
15766 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
15767 // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
15768 if (!Subtarget.is64BitELFABI())
15771 // If not a tail call then no need to proceed.
15772 if (!CI->isTailCall())
15775 // If sibling calls have been disabled and tail-calls aren't guaranteed
15776 // there is no reason to duplicate.
15777 auto &TM = getTargetMachine();
15778 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
15781 // Can't tail call a function called indirectly, or if it has variadic args.
15782 const Function *Callee = CI->getCalledFunction();
15783 if (!Callee || Callee->isVarArg())
15786 // Make sure the callee and caller calling conventions are eligible for tco.
15787 const Function *Caller = CI->getParent()->getParent();
15788 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
15789 CI->getCallingConv()))
15792 // If the function is local then we have a good chance at tail-calling it
15793 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
15796 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
15797 if (!Subtarget.hasVSX())
15799 if (Subtarget.hasP9Vector() && VT == MVT::f128)
15801 return VT == MVT::f32 || VT == MVT::f64 ||
15802 VT == MVT::v4f32 || VT == MVT::v2f64;
15805 bool PPCTargetLowering::
15806 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
15807 const Value *Mask = AndI.getOperand(1);
15808 // If the mask is suitable for andi. or andis. we should sink the and.
15809 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
15810 // Can't handle constants wider than 64-bits.
15811 if (CI->getBitWidth() > 64)
15813 int64_t ConstVal = CI->getZExtValue();
15814 return isUInt<16>(ConstVal) ||
15815 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
15818 // For non-constant masks, we can always use the record-form and.
15822 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
15823 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
15824 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
15825 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
15826 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
15827 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
15828 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
15829 assert(Subtarget.hasP9Altivec() &&
15830 "Only combine this when P9 altivec supported!");
15831 EVT VT = N->getValueType(0);
15832 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15835 SelectionDAG &DAG = DCI.DAG;
15837 if (N->getOperand(0).getOpcode() == ISD::SUB) {
15838 // Even for signed integers, if it's known to be positive (as signed
15839 // integer) due to zero-extended inputs.
15840 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
15841 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
15842 if ((SubOpcd0 == ISD::ZERO_EXTEND ||
15843 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
15844 (SubOpcd1 == ISD::ZERO_EXTEND ||
15845 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
15846 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
15847 N->getOperand(0)->getOperand(0),
15848 N->getOperand(0)->getOperand(1),
15849 DAG.getTargetConstant(0, dl, MVT::i32));
15852 // For type v4i32, it can be optimized with xvnegsp + vabsduw
15853 if (N->getOperand(0).getValueType() == MVT::v4i32 &&
15854 N->getOperand(0).hasOneUse()) {
15855 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
15856 N->getOperand(0)->getOperand(0),
15857 N->getOperand(0)->getOperand(1),
15858 DAG.getTargetConstant(1, dl, MVT::i32));
15865 // For type v4i32/v8ii16/v16i8, transform
15866 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
15867 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
15868 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
15869 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
15870 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
15871 DAGCombinerInfo &DCI) const {
15872 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
15873 assert(Subtarget.hasP9Altivec() &&
15874 "Only combine this when P9 altivec supported!");
15876 SelectionDAG &DAG = DCI.DAG;
15878 SDValue Cond = N->getOperand(0);
15879 SDValue TrueOpnd = N->getOperand(1);
15880 SDValue FalseOpnd = N->getOperand(2);
15881 EVT VT = N->getOperand(1).getValueType();
15883 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
15884 FalseOpnd.getOpcode() != ISD::SUB)
15887 // ABSD only available for type v4i32/v8i16/v16i8
15888 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
15891 // At least to save one more dependent computation
15892 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
15895 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
15897 // Can only handle unsigned comparison here
15906 std::swap(TrueOpnd, FalseOpnd);
15910 SDValue CmpOpnd1 = Cond.getOperand(0);
15911 SDValue CmpOpnd2 = Cond.getOperand(1);
15913 // SETCC CmpOpnd1 CmpOpnd2 cond
15914 // TrueOpnd = CmpOpnd1 - CmpOpnd2
15915 // FalseOpnd = CmpOpnd2 - CmpOpnd1
15916 if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
15917 TrueOpnd.getOperand(1) == CmpOpnd2 &&
15918 FalseOpnd.getOperand(0) == CmpOpnd2 &&
15919 FalseOpnd.getOperand(1) == CmpOpnd1) {
15920 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
15921 CmpOpnd1, CmpOpnd2,
15922 DAG.getTargetConstant(0, dl, MVT::i32));