1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86TargetMachine.h"
21 #include "X86TargetObjectFile.h"
22 #include "llvm/ADT/SmallBitVector.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/VariadicFunction.h"
28 #include "llvm/CodeGen/IntrinsicLowering.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/MC/MCAsmInfo.h"
45 #include "llvm/MC/MCContext.h"
46 #include "llvm/MC/MCExpr.h"
47 #include "llvm/MC/MCSymbol.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Target/TargetOptions.h"
53 #include "X86IntrinsicsInfo.h"
59 #define DEBUG_TYPE "x86-isel"
61 STATISTIC(NumTailCalls, "Number of tail calls");
63 static cl::opt<bool> ExperimentalVectorWideningLegalization(
64 "x86-experimental-vector-widening-legalization", cl::init(false),
65 cl::desc("Enable an experimental vector type legalization through widening "
66 "rather than promotion."),
69 static cl::opt<bool> ExperimentalVectorShuffleLowering(
70 "x86-experimental-vector-shuffle-lowering", cl::init(true),
71 cl::desc("Enable an experimental vector shuffle lowering code path."),
74 static cl::opt<bool> ExperimentalVectorShuffleLegality(
75 "x86-experimental-vector-shuffle-legality", cl::init(false),
76 cl::desc("Enable experimental shuffle legality based on the experimental "
77 "shuffle lowering. Should only be used with the experimental "
81 static cl::opt<int> ReciprocalEstimateRefinementSteps(
82 "x86-recip-refinement-steps", cl::init(1),
83 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
84 "result of the hardware reciprocal estimate instruction."),
87 // Forward declarations.
88 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
91 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
92 SelectionDAG &DAG, SDLoc dl,
93 unsigned vectorWidth) {
94 assert((vectorWidth == 128 || vectorWidth == 256) &&
95 "Unsupported vector width");
96 EVT VT = Vec.getValueType();
97 EVT ElVT = VT.getVectorElementType();
98 unsigned Factor = VT.getSizeInBits()/vectorWidth;
99 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
100 VT.getVectorNumElements()/Factor);
102 // Extract from UNDEF is UNDEF.
103 if (Vec.getOpcode() == ISD::UNDEF)
104 return DAG.getUNDEF(ResultVT);
106 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
107 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
109 // This is the index of the first element of the vectorWidth-bit chunk
111 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
114 // If the input is a buildvector just emit a smaller one.
115 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
116 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
117 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
120 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
121 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
124 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
125 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
126 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
127 /// instructions or a simple subregister reference. Idx is an index in the
128 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
129 /// lowering EXTRACT_VECTOR_ELT operations easier.
130 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
131 SelectionDAG &DAG, SDLoc dl) {
132 assert((Vec.getValueType().is256BitVector() ||
133 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
134 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
137 /// Generate a DAG to grab 256-bits from a 512-bit vector.
138 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
139 SelectionDAG &DAG, SDLoc dl) {
140 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
141 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
144 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
145 unsigned IdxVal, SelectionDAG &DAG,
146 SDLoc dl, unsigned vectorWidth) {
147 assert((vectorWidth == 128 || vectorWidth == 256) &&
148 "Unsupported vector width");
149 // Inserting UNDEF is Result
150 if (Vec.getOpcode() == ISD::UNDEF)
152 EVT VT = Vec.getValueType();
153 EVT ElVT = VT.getVectorElementType();
154 EVT ResultVT = Result.getValueType();
156 // Insert the relevant vectorWidth bits.
157 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
159 // This is the index of the first element of the vectorWidth-bit chunk
161 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
164 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
165 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
168 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
169 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
170 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
171 /// simple superregister reference. Idx is an index in the 128 bits
172 /// we want. It need not be aligned to a 128-bit boundary. That makes
173 /// lowering INSERT_VECTOR_ELT operations easier.
174 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
175 SelectionDAG &DAG,SDLoc dl) {
176 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
177 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
180 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
181 SelectionDAG &DAG, SDLoc dl) {
182 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
183 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
186 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
187 /// instructions. This is used because creating CONCAT_VECTOR nodes of
188 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
189 /// large BUILD_VECTORS.
190 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
191 unsigned NumElems, SelectionDAG &DAG,
193 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
194 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
197 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
198 unsigned NumElems, SelectionDAG &DAG,
200 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
201 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
204 // FIXME: This should stop caching the target machine as soon as
205 // we can remove resetOperationActions et al.
206 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM)
207 : TargetLowering(TM) {
208 Subtarget = &TM.getSubtarget<X86Subtarget>();
209 X86ScalarSSEf64 = Subtarget->hasSSE2();
210 X86ScalarSSEf32 = Subtarget->hasSSE1();
211 TD = getDataLayout();
213 resetOperationActions();
216 void X86TargetLowering::resetOperationActions() {
217 const TargetMachine &TM = getTargetMachine();
218 static bool FirstTimeThrough = true;
220 // If none of the target options have changed, then we don't need to reset the
221 // operation actions.
222 if (!FirstTimeThrough && TO == TM.Options) return;
224 if (!FirstTimeThrough) {
225 // Reinitialize the actions.
227 FirstTimeThrough = false;
232 // Set up the TargetLowering object.
233 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
235 // X86 is weird. It always uses i8 for shift amounts and setcc results.
236 setBooleanContents(ZeroOrOneBooleanContent);
237 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
238 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
240 // For 64-bit, since we have so many registers, use the ILP scheduler.
241 // For 32-bit, use the register pressure specific scheduling.
242 // For Atom, always use ILP scheduling.
243 if (Subtarget->isAtom())
244 setSchedulingPreference(Sched::ILP);
245 else if (Subtarget->is64Bit())
246 setSchedulingPreference(Sched::ILP);
248 setSchedulingPreference(Sched::RegPressure);
249 const X86RegisterInfo *RegInfo =
250 TM.getSubtarget<X86Subtarget>().getRegisterInfo();
251 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
253 // Bypass expensive divides on Atom when compiling with O2.
254 if (TM.getOptLevel() >= CodeGenOpt::Default) {
255 if (Subtarget->hasSlowDivide32())
256 addBypassSlowDiv(32, 8);
257 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
258 addBypassSlowDiv(64, 16);
261 if (Subtarget->isTargetKnownWindowsMSVC()) {
262 // Setup Windows compiler runtime calls.
263 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
264 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
265 setLibcallName(RTLIB::SREM_I64, "_allrem");
266 setLibcallName(RTLIB::UREM_I64, "_aullrem");
267 setLibcallName(RTLIB::MUL_I64, "_allmul");
268 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
269 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
270 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
271 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
272 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
274 // The _ftol2 runtime function has an unusual calling conv, which
275 // is modeled by a special pseudo-instruction.
276 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
277 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
278 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
279 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
282 if (Subtarget->isTargetDarwin()) {
283 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
284 setUseUnderscoreSetJmp(false);
285 setUseUnderscoreLongJmp(false);
286 } else if (Subtarget->isTargetWindowsGNU()) {
287 // MS runtime is weird: it exports _setjmp, but longjmp!
288 setUseUnderscoreSetJmp(true);
289 setUseUnderscoreLongJmp(false);
291 setUseUnderscoreSetJmp(true);
292 setUseUnderscoreLongJmp(true);
295 // Set up the register classes.
296 addRegisterClass(MVT::i8, &X86::GR8RegClass);
297 addRegisterClass(MVT::i16, &X86::GR16RegClass);
298 addRegisterClass(MVT::i32, &X86::GR32RegClass);
299 if (Subtarget->is64Bit())
300 addRegisterClass(MVT::i64, &X86::GR64RegClass);
302 for (MVT VT : MVT::integer_valuetypes())
303 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
305 // We don't accept any truncstore of integer registers.
306 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
307 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
308 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
309 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
310 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
311 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
313 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
315 // SETOEQ and SETUNE require checking two conditions.
316 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
317 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
318 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
319 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
320 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
321 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
323 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
325 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
326 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
327 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
329 if (Subtarget->is64Bit()) {
330 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
331 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
332 } else if (!TM.Options.UseSoftFloat) {
333 // We have an algorithm for SSE2->double, and we turn this into a
334 // 64-bit FILD followed by conditional FADD for other targets.
335 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
336 // We have an algorithm for SSE2, and we turn this into a 64-bit
337 // FILD for other targets.
338 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
341 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
343 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
344 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
346 if (!TM.Options.UseSoftFloat) {
347 // SSE has no i16 to fp conversion, only i32
348 if (X86ScalarSSEf32) {
349 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
350 // f32 and f64 cases are Legal, f80 case is not
351 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
353 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
354 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
357 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
358 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
361 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
362 // are Legal, f80 is custom lowered.
363 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
364 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
366 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
368 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
369 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
371 if (X86ScalarSSEf32) {
372 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
373 // f32 and f64 cases are Legal, f80 case is not
374 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
376 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
377 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
380 // Handle FP_TO_UINT by promoting the destination to a larger signed
382 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
383 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
384 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
386 if (Subtarget->is64Bit()) {
387 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
388 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
389 } else if (!TM.Options.UseSoftFloat) {
390 // Since AVX is a superset of SSE3, only check for SSE here.
391 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
392 // Expand FP_TO_UINT into a select.
393 // FIXME: We would like to use a Custom expander here eventually to do
394 // the optimal thing for SSE vs. the default expansion in the legalizer.
395 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
397 // With SSE3 we can use fisttpll to convert to a signed i64; without
398 // SSE, we're stuck with a fistpll.
399 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
402 if (isTargetFTOL()) {
403 // Use the _ftol2 runtime function, which has a pseudo-instruction
404 // to handle its weird calling convention.
405 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
408 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
409 if (!X86ScalarSSEf64) {
410 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
411 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
412 if (Subtarget->is64Bit()) {
413 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
414 // Without SSE, i64->f64 goes through memory.
415 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
419 // Scalar integer divide and remainder are lowered to use operations that
420 // produce two results, to match the available instructions. This exposes
421 // the two-result form to trivial CSE, which is able to combine x/y and x%y
422 // into a single instruction.
424 // Scalar integer multiply-high is also lowered to use two-result
425 // operations, to match the available instructions. However, plain multiply
426 // (low) operations are left as Legal, as there are single-result
427 // instructions for this in x86. Using the two-result multiply instructions
428 // when both high and low results are needed must be arranged by dagcombine.
429 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
431 setOperationAction(ISD::MULHS, VT, Expand);
432 setOperationAction(ISD::MULHU, VT, Expand);
433 setOperationAction(ISD::SDIV, VT, Expand);
434 setOperationAction(ISD::UDIV, VT, Expand);
435 setOperationAction(ISD::SREM, VT, Expand);
436 setOperationAction(ISD::UREM, VT, Expand);
438 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
439 setOperationAction(ISD::ADDC, VT, Custom);
440 setOperationAction(ISD::ADDE, VT, Custom);
441 setOperationAction(ISD::SUBC, VT, Custom);
442 setOperationAction(ISD::SUBE, VT, Custom);
445 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
446 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
447 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
448 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
449 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
450 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
451 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
452 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
453 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
454 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
455 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
456 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
457 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
458 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
459 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
460 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
461 if (Subtarget->is64Bit())
462 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
463 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
464 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
465 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
466 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
467 setOperationAction(ISD::FREM , MVT::f32 , Expand);
468 setOperationAction(ISD::FREM , MVT::f64 , Expand);
469 setOperationAction(ISD::FREM , MVT::f80 , Expand);
470 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
472 // Promote the i8 variants and force them on up to i32 which has a shorter
474 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
477 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
478 if (Subtarget->hasBMI()) {
479 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
480 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
481 if (Subtarget->is64Bit())
482 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
484 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
485 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
486 if (Subtarget->is64Bit())
487 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
490 if (Subtarget->hasLZCNT()) {
491 // When promoting the i8 variants, force them to i32 for a shorter
493 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
494 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
495 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
496 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
497 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
498 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
499 if (Subtarget->is64Bit())
500 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
502 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
503 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
504 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
505 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
506 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
507 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
508 if (Subtarget->is64Bit()) {
509 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
510 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
514 // Special handling for half-precision floating point conversions.
515 // If we don't have F16C support, then lower half float conversions
516 // into library calls.
517 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
518 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
519 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
522 // There's never any support for operations beyond MVT::f32.
523 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
524 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
525 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
526 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
528 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
529 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
530 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
531 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
532 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
533 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
535 if (Subtarget->hasPOPCNT()) {
536 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
538 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
539 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
540 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
541 if (Subtarget->is64Bit())
542 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
545 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
547 if (!Subtarget->hasMOVBE())
548 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
550 // These should be promoted to a larger select which is supported.
551 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
552 // X86 wants to expand cmov itself.
553 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
554 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
555 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
556 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
557 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
558 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
559 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
560 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
561 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
562 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
563 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
564 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
565 if (Subtarget->is64Bit()) {
566 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
567 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
569 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
570 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
571 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
572 // support continuation, user-level threading, and etc.. As a result, no
573 // other SjLj exception interfaces are implemented and please don't build
574 // your own exception handling based on them.
575 // LLVM/Clang supports zero-cost DWARF exception handling.
576 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
577 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
580 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
581 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
582 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
583 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
584 if (Subtarget->is64Bit())
585 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
586 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
587 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
588 if (Subtarget->is64Bit()) {
589 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
590 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
591 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
592 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
593 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
595 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
596 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
597 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
598 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
599 if (Subtarget->is64Bit()) {
600 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
601 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
602 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
605 if (Subtarget->hasSSE1())
606 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
608 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
610 // Expand certain atomics
611 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
613 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
614 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
615 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
618 if (Subtarget->hasCmpxchg16b()) {
619 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
622 // FIXME - use subtarget debug flags
623 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
624 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
625 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
628 if (Subtarget->is64Bit()) {
629 setExceptionPointerRegister(X86::RAX);
630 setExceptionSelectorRegister(X86::RDX);
632 setExceptionPointerRegister(X86::EAX);
633 setExceptionSelectorRegister(X86::EDX);
635 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
636 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
638 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
639 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
641 setOperationAction(ISD::TRAP, MVT::Other, Legal);
642 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
644 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
645 setOperationAction(ISD::VASTART , MVT::Other, Custom);
646 setOperationAction(ISD::VAEND , MVT::Other, Expand);
647 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
648 // TargetInfo::X86_64ABIBuiltinVaList
649 setOperationAction(ISD::VAARG , MVT::Other, Custom);
650 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
652 // TargetInfo::CharPtrBuiltinVaList
653 setOperationAction(ISD::VAARG , MVT::Other, Expand);
654 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
657 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
658 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
660 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
662 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
663 // f32 and f64 use SSE.
664 // Set up the FP register classes.
665 addRegisterClass(MVT::f32, &X86::FR32RegClass);
666 addRegisterClass(MVT::f64, &X86::FR64RegClass);
668 // Use ANDPD to simulate FABS.
669 setOperationAction(ISD::FABS , MVT::f64, Custom);
670 setOperationAction(ISD::FABS , MVT::f32, Custom);
672 // Use XORP to simulate FNEG.
673 setOperationAction(ISD::FNEG , MVT::f64, Custom);
674 setOperationAction(ISD::FNEG , MVT::f32, Custom);
676 // Use ANDPD and ORPD to simulate FCOPYSIGN.
677 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
678 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
680 // Lower this to FGETSIGNx86 plus an AND.
681 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
682 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
684 // We don't support sin/cos/fmod
685 setOperationAction(ISD::FSIN , MVT::f64, Expand);
686 setOperationAction(ISD::FCOS , MVT::f64, Expand);
687 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
688 setOperationAction(ISD::FSIN , MVT::f32, Expand);
689 setOperationAction(ISD::FCOS , MVT::f32, Expand);
690 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
692 // Expand FP immediates into loads from the stack, except for the special
694 addLegalFPImmediate(APFloat(+0.0)); // xorpd
695 addLegalFPImmediate(APFloat(+0.0f)); // xorps
696 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
697 // Use SSE for f32, x87 for f64.
698 // Set up the FP register classes.
699 addRegisterClass(MVT::f32, &X86::FR32RegClass);
700 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
702 // Use ANDPS to simulate FABS.
703 setOperationAction(ISD::FABS , MVT::f32, Custom);
705 // Use XORP to simulate FNEG.
706 setOperationAction(ISD::FNEG , MVT::f32, Custom);
708 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
710 // Use ANDPS and ORPS to simulate FCOPYSIGN.
711 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
712 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
714 // We don't support sin/cos/fmod
715 setOperationAction(ISD::FSIN , MVT::f32, Expand);
716 setOperationAction(ISD::FCOS , MVT::f32, Expand);
717 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
719 // Special cases we handle for FP constants.
720 addLegalFPImmediate(APFloat(+0.0f)); // xorps
721 addLegalFPImmediate(APFloat(+0.0)); // FLD0
722 addLegalFPImmediate(APFloat(+1.0)); // FLD1
723 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
724 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
726 if (!TM.Options.UnsafeFPMath) {
727 setOperationAction(ISD::FSIN , MVT::f64, Expand);
728 setOperationAction(ISD::FCOS , MVT::f64, Expand);
729 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
731 } else if (!TM.Options.UseSoftFloat) {
732 // f32 and f64 in x87.
733 // Set up the FP register classes.
734 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
735 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
737 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
738 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
739 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
740 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
742 if (!TM.Options.UnsafeFPMath) {
743 setOperationAction(ISD::FSIN , MVT::f64, Expand);
744 setOperationAction(ISD::FSIN , MVT::f32, Expand);
745 setOperationAction(ISD::FCOS , MVT::f64, Expand);
746 setOperationAction(ISD::FCOS , MVT::f32, Expand);
747 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
748 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
750 addLegalFPImmediate(APFloat(+0.0)); // FLD0
751 addLegalFPImmediate(APFloat(+1.0)); // FLD1
752 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
753 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
754 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
755 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
756 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
757 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
760 // We don't support FMA.
761 setOperationAction(ISD::FMA, MVT::f64, Expand);
762 setOperationAction(ISD::FMA, MVT::f32, Expand);
764 // Long double always uses X87.
765 if (!TM.Options.UseSoftFloat) {
766 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
767 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
768 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
770 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
771 addLegalFPImmediate(TmpFlt); // FLD0
773 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
776 APFloat TmpFlt2(+1.0);
777 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
779 addLegalFPImmediate(TmpFlt2); // FLD1
780 TmpFlt2.changeSign();
781 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
784 if (!TM.Options.UnsafeFPMath) {
785 setOperationAction(ISD::FSIN , MVT::f80, Expand);
786 setOperationAction(ISD::FCOS , MVT::f80, Expand);
787 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
790 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
791 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
792 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
793 setOperationAction(ISD::FRINT, MVT::f80, Expand);
794 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
795 setOperationAction(ISD::FMA, MVT::f80, Expand);
798 // Always use a library call for pow.
799 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
800 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
801 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
803 setOperationAction(ISD::FLOG, MVT::f80, Expand);
804 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
805 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
806 setOperationAction(ISD::FEXP, MVT::f80, Expand);
807 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
808 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
809 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
811 // First set operation action for all vector types to either promote
812 // (for widening) or expand (for scalarization). Then we will selectively
813 // turn on ones that can be effectively codegen'd.
814 for (MVT VT : MVT::vector_valuetypes()) {
815 setOperationAction(ISD::ADD , VT, Expand);
816 setOperationAction(ISD::SUB , VT, Expand);
817 setOperationAction(ISD::FADD, VT, Expand);
818 setOperationAction(ISD::FNEG, VT, Expand);
819 setOperationAction(ISD::FSUB, VT, Expand);
820 setOperationAction(ISD::MUL , VT, Expand);
821 setOperationAction(ISD::FMUL, VT, Expand);
822 setOperationAction(ISD::SDIV, VT, Expand);
823 setOperationAction(ISD::UDIV, VT, Expand);
824 setOperationAction(ISD::FDIV, VT, Expand);
825 setOperationAction(ISD::SREM, VT, Expand);
826 setOperationAction(ISD::UREM, VT, Expand);
827 setOperationAction(ISD::LOAD, VT, Expand);
828 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
829 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
830 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
831 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
832 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
833 setOperationAction(ISD::FABS, VT, Expand);
834 setOperationAction(ISD::FSIN, VT, Expand);
835 setOperationAction(ISD::FSINCOS, VT, Expand);
836 setOperationAction(ISD::FCOS, VT, Expand);
837 setOperationAction(ISD::FSINCOS, VT, Expand);
838 setOperationAction(ISD::FREM, VT, Expand);
839 setOperationAction(ISD::FMA, VT, Expand);
840 setOperationAction(ISD::FPOWI, VT, Expand);
841 setOperationAction(ISD::FSQRT, VT, Expand);
842 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
843 setOperationAction(ISD::FFLOOR, VT, Expand);
844 setOperationAction(ISD::FCEIL, VT, Expand);
845 setOperationAction(ISD::FTRUNC, VT, Expand);
846 setOperationAction(ISD::FRINT, VT, Expand);
847 setOperationAction(ISD::FNEARBYINT, VT, Expand);
848 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
849 setOperationAction(ISD::MULHS, VT, Expand);
850 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
851 setOperationAction(ISD::MULHU, VT, Expand);
852 setOperationAction(ISD::SDIVREM, VT, Expand);
853 setOperationAction(ISD::UDIVREM, VT, Expand);
854 setOperationAction(ISD::FPOW, VT, Expand);
855 setOperationAction(ISD::CTPOP, VT, Expand);
856 setOperationAction(ISD::CTTZ, VT, Expand);
857 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
858 setOperationAction(ISD::CTLZ, VT, Expand);
859 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
860 setOperationAction(ISD::SHL, VT, Expand);
861 setOperationAction(ISD::SRA, VT, Expand);
862 setOperationAction(ISD::SRL, VT, Expand);
863 setOperationAction(ISD::ROTL, VT, Expand);
864 setOperationAction(ISD::ROTR, VT, Expand);
865 setOperationAction(ISD::BSWAP, VT, Expand);
866 setOperationAction(ISD::SETCC, VT, Expand);
867 setOperationAction(ISD::FLOG, VT, Expand);
868 setOperationAction(ISD::FLOG2, VT, Expand);
869 setOperationAction(ISD::FLOG10, VT, Expand);
870 setOperationAction(ISD::FEXP, VT, Expand);
871 setOperationAction(ISD::FEXP2, VT, Expand);
872 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
873 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
874 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
875 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
876 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
877 setOperationAction(ISD::TRUNCATE, VT, Expand);
878 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
879 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
880 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
881 setOperationAction(ISD::VSELECT, VT, Expand);
882 setOperationAction(ISD::SELECT_CC, VT, Expand);
883 for (MVT InnerVT : MVT::vector_valuetypes()) {
884 setTruncStoreAction(InnerVT, VT, Expand);
886 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
887 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
889 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
890 // types, we have to deal with them whether we ask for Expansion or not.
891 // Setting Expand causes its own optimisation problems though, so leave
893 if (VT.getVectorElementType() == MVT::i1)
894 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
898 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
899 // with -msoft-float, disable use of MMX as well.
900 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
901 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
902 // No operations on x86mmx supported, everything uses intrinsics.
905 // MMX-sized vectors (other than x86mmx) are expected to be expanded
906 // into smaller operations.
907 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
908 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
909 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
910 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
911 setOperationAction(ISD::AND, MVT::v8i8, Expand);
912 setOperationAction(ISD::AND, MVT::v4i16, Expand);
913 setOperationAction(ISD::AND, MVT::v2i32, Expand);
914 setOperationAction(ISD::AND, MVT::v1i64, Expand);
915 setOperationAction(ISD::OR, MVT::v8i8, Expand);
916 setOperationAction(ISD::OR, MVT::v4i16, Expand);
917 setOperationAction(ISD::OR, MVT::v2i32, Expand);
918 setOperationAction(ISD::OR, MVT::v1i64, Expand);
919 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
920 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
921 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
922 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
923 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
924 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
925 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
926 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
927 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
928 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
929 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
930 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
931 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
932 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
933 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
934 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
935 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
937 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
938 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
940 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
941 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
942 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
943 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
944 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
945 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
946 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
947 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
948 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
949 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
950 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
951 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
952 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
955 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
956 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
958 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
959 // registers cannot be used even for integer operations.
960 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
961 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
962 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
963 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
965 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
966 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
967 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
968 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
969 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
970 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
971 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
972 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
973 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
974 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
975 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
976 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
977 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
978 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
979 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
980 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
981 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
982 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
983 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
984 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
985 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
986 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
988 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
989 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
990 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
991 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
993 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
994 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
995 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
996 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
997 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
999 // Only provide customized ctpop vector bit twiddling for vector types we
1000 // know to perform better than using the popcnt instructions on each vector
1001 // element. If popcnt isn't supported, always provide the custom version.
1002 if (!Subtarget->hasPOPCNT()) {
1003 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
1004 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
1007 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
1008 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1009 MVT VT = (MVT::SimpleValueType)i;
1010 // Do not attempt to custom lower non-power-of-2 vectors
1011 if (!isPowerOf2_32(VT.getVectorNumElements()))
1013 // Do not attempt to custom lower non-128-bit vectors
1014 if (!VT.is128BitVector())
1016 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1017 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1018 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1021 // We support custom legalizing of sext and anyext loads for specific
1022 // memory vector types which we can load as a scalar (or sequence of
1023 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1024 // loads these must work with a single scalar load.
1025 for (MVT VT : MVT::integer_vector_valuetypes()) {
1026 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1027 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1028 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1029 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1030 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1031 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1032 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1033 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1034 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1037 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1038 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1039 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1040 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1041 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1042 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1044 if (Subtarget->is64Bit()) {
1045 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1046 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1049 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1050 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1051 MVT VT = (MVT::SimpleValueType)i;
1053 // Do not attempt to promote non-128-bit vectors
1054 if (!VT.is128BitVector())
1057 setOperationAction(ISD::AND, VT, Promote);
1058 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1059 setOperationAction(ISD::OR, VT, Promote);
1060 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1061 setOperationAction(ISD::XOR, VT, Promote);
1062 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1063 setOperationAction(ISD::LOAD, VT, Promote);
1064 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1065 setOperationAction(ISD::SELECT, VT, Promote);
1066 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1069 // Custom lower v2i64 and v2f64 selects.
1070 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1071 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1072 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1073 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1075 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1076 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1078 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1079 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1080 // As there is no 64-bit GPR available, we need build a special custom
1081 // sequence to convert from v2i32 to v2f32.
1082 if (!Subtarget->is64Bit())
1083 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1085 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1086 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1088 for (MVT VT : MVT::fp_vector_valuetypes())
1089 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1091 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1092 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1093 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1096 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1097 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1098 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1099 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1100 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1101 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1102 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1103 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1104 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1105 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1106 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1108 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1109 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1110 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1111 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1112 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1113 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1114 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1115 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1116 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1117 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1119 // FIXME: Do we need to handle scalar-to-vector here?
1120 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1122 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1123 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1124 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1125 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1126 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1127 // There is no BLENDI for byte vectors. We don't need to custom lower
1128 // some vselects for now.
1129 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1131 // SSE41 brings specific instructions for doing vector sign extend even in
1132 // cases where we don't have SRA.
1133 for (MVT VT : MVT::integer_vector_valuetypes()) {
1134 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1135 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1136 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1139 // i8 and i16 vectors are custom because the source register and source
1140 // source memory operand types are not the same width. f32 vectors are
1141 // custom since the immediate controlling the insert encodes additional
1143 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1144 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1145 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1146 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1148 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1149 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1150 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1153 // FIXME: these should be Legal, but that's only for the case where
1154 // the index is constant. For now custom expand to deal with that.
1155 if (Subtarget->is64Bit()) {
1156 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1157 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1161 if (Subtarget->hasSSE2()) {
1162 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1165 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1166 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1168 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1169 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1171 // In the customized shift lowering, the legal cases in AVX2 will be
1173 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1174 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1176 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1177 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1179 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1182 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1183 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1184 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1185 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1186 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1187 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1188 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1190 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1191 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1192 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1194 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1199 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1200 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1201 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1202 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1203 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1204 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1205 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1207 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1212 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1213 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1214 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1215 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1216 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1217 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1218 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1220 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1221 // even though v8i16 is a legal type.
1222 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1223 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1224 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1226 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1227 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1228 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1230 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1231 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1233 for (MVT VT : MVT::fp_vector_valuetypes())
1234 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1236 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1240 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1242 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1243 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1245 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1246 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1247 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1248 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1250 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1251 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1252 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1254 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1255 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1256 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1257 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1259 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1263 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1264 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1265 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1266 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1267 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1268 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1269 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1270 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1272 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1273 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1274 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1275 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1276 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1277 setOperationAction(ISD::FMA, MVT::f32, Legal);
1278 setOperationAction(ISD::FMA, MVT::f64, Legal);
1281 if (Subtarget->hasInt256()) {
1282 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1283 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1284 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1285 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1287 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1288 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1289 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1290 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1292 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1293 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1294 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1295 // Don't lower v32i8 because there is no 128-bit byte mul
1297 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1298 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1299 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1300 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1302 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1303 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1305 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1306 // when we have a 256bit-wide blend with immediate.
1307 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1309 // Only provide customized ctpop vector bit twiddling for vector types we
1310 // know to perform better than using the popcnt instructions on each
1311 // vector element. If popcnt isn't supported, always provide the custom
1313 if (!Subtarget->hasPOPCNT())
1314 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1316 // Custom CTPOP always performs better on natively supported v8i32
1317 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1319 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1320 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1321 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1322 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1324 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1325 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1326 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1327 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1329 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1330 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1331 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1332 // Don't lower v32i8 because there is no 128-bit byte mul
1335 // In the customized shift lowering, the legal cases in AVX2 will be
1337 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1338 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1341 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1343 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1345 // Custom lower several nodes for 256-bit types.
1346 for (MVT VT : MVT::vector_valuetypes()) {
1347 if (VT.getScalarSizeInBits() >= 32) {
1348 setOperationAction(ISD::MLOAD, VT, Legal);
1349 setOperationAction(ISD::MSTORE, VT, Legal);
1351 // Extract subvector is special because the value type
1352 // (result) is 128-bit but the source is 256-bit wide.
1353 if (VT.is128BitVector()) {
1354 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1356 // Do not attempt to custom lower other non-256-bit vectors
1357 if (!VT.is256BitVector())
1360 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1361 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1362 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1363 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1364 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1365 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1366 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1369 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1370 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1371 MVT VT = (MVT::SimpleValueType)i;
1373 // Do not attempt to promote non-256-bit vectors
1374 if (!VT.is256BitVector())
1377 setOperationAction(ISD::AND, VT, Promote);
1378 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1379 setOperationAction(ISD::OR, VT, Promote);
1380 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1381 setOperationAction(ISD::XOR, VT, Promote);
1382 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1383 setOperationAction(ISD::LOAD, VT, Promote);
1384 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1385 setOperationAction(ISD::SELECT, VT, Promote);
1386 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1390 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1391 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1392 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1393 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1394 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1396 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1397 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1398 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1400 for (MVT VT : MVT::fp_vector_valuetypes())
1401 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1403 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1404 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1405 setOperationAction(ISD::XOR, MVT::i1, Legal);
1406 setOperationAction(ISD::OR, MVT::i1, Legal);
1407 setOperationAction(ISD::AND, MVT::i1, Legal);
1408 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1409 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1410 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1411 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1412 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1414 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1415 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1416 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1417 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1418 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1419 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1421 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1422 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1423 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1424 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1425 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1426 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1427 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1428 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1430 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1431 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1432 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1433 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1434 if (Subtarget->is64Bit()) {
1435 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1436 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1437 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1438 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1440 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1441 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1442 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1443 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1444 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1445 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1447 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1448 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1449 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1450 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1451 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1452 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1453 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1455 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1456 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1457 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1458 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1459 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1460 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1461 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1462 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1463 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1464 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1465 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1466 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1467 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1469 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1470 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1471 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1472 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1473 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1474 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1476 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1477 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1479 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1481 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1482 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1483 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1484 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1485 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1486 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1487 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1488 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1489 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1491 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1492 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1494 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1495 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1497 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1499 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1500 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1502 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1503 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1505 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1506 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1508 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1509 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1510 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1511 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1512 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1513 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1515 if (Subtarget->hasCDI()) {
1516 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1517 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1520 // Custom lower several nodes.
1521 for (MVT VT : MVT::vector_valuetypes()) {
1522 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1523 // Extract subvector is special because the value type
1524 // (result) is 256/128-bit but the source is 512-bit wide.
1525 if (VT.is128BitVector() || VT.is256BitVector()) {
1526 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1528 if (VT.getVectorElementType() == MVT::i1)
1529 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1531 // Do not attempt to custom lower other non-512-bit vectors
1532 if (!VT.is512BitVector())
1535 if ( EltSize >= 32) {
1536 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1537 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1538 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1539 setOperationAction(ISD::VSELECT, VT, Legal);
1540 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1541 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1542 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1543 setOperationAction(ISD::MLOAD, VT, Legal);
1544 setOperationAction(ISD::MSTORE, VT, Legal);
1547 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1548 MVT VT = (MVT::SimpleValueType)i;
1550 // Do not attempt to promote non-512-bit vectors.
1551 if (!VT.is512BitVector())
1554 setOperationAction(ISD::SELECT, VT, Promote);
1555 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1559 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1560 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1561 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1563 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1564 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1566 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1567 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1568 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1569 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1570 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1571 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1572 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1573 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1574 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1576 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1577 const MVT VT = (MVT::SimpleValueType)i;
1579 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1581 // Do not attempt to promote non-512-bit vectors.
1582 if (!VT.is512BitVector())
1586 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1587 setOperationAction(ISD::VSELECT, VT, Legal);
1592 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1593 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1594 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1596 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1597 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1598 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1600 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1601 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1602 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1603 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1604 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1605 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1608 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1609 // of this type with custom code.
1610 for (MVT VT : MVT::vector_valuetypes())
1611 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1613 // We want to custom lower some of our intrinsics.
1614 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1615 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1616 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1617 if (!Subtarget->is64Bit())
1618 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1620 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1621 // handle type legalization for these operations here.
1623 // FIXME: We really should do custom legalization for addition and
1624 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1625 // than generic legalization for 64-bit multiplication-with-overflow, though.
1626 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1627 // Add/Sub/Mul with overflow operations are custom lowered.
1629 setOperationAction(ISD::SADDO, VT, Custom);
1630 setOperationAction(ISD::UADDO, VT, Custom);
1631 setOperationAction(ISD::SSUBO, VT, Custom);
1632 setOperationAction(ISD::USUBO, VT, Custom);
1633 setOperationAction(ISD::SMULO, VT, Custom);
1634 setOperationAction(ISD::UMULO, VT, Custom);
1638 if (!Subtarget->is64Bit()) {
1639 // These libcalls are not available in 32-bit.
1640 setLibcallName(RTLIB::SHL_I128, nullptr);
1641 setLibcallName(RTLIB::SRL_I128, nullptr);
1642 setLibcallName(RTLIB::SRA_I128, nullptr);
1645 // Combine sin / cos into one node or libcall if possible.
1646 if (Subtarget->hasSinCos()) {
1647 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1648 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1649 if (Subtarget->isTargetDarwin()) {
1650 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1651 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1652 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1653 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1657 if (Subtarget->isTargetWin64()) {
1658 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1659 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1660 setOperationAction(ISD::SREM, MVT::i128, Custom);
1661 setOperationAction(ISD::UREM, MVT::i128, Custom);
1662 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1663 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1666 // We have target-specific dag combine patterns for the following nodes:
1667 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1668 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1669 setTargetDAGCombine(ISD::VSELECT);
1670 setTargetDAGCombine(ISD::SELECT);
1671 setTargetDAGCombine(ISD::SHL);
1672 setTargetDAGCombine(ISD::SRA);
1673 setTargetDAGCombine(ISD::SRL);
1674 setTargetDAGCombine(ISD::OR);
1675 setTargetDAGCombine(ISD::AND);
1676 setTargetDAGCombine(ISD::ADD);
1677 setTargetDAGCombine(ISD::FADD);
1678 setTargetDAGCombine(ISD::FSUB);
1679 setTargetDAGCombine(ISD::FMA);
1680 setTargetDAGCombine(ISD::SUB);
1681 setTargetDAGCombine(ISD::LOAD);
1682 setTargetDAGCombine(ISD::MLOAD);
1683 setTargetDAGCombine(ISD::STORE);
1684 setTargetDAGCombine(ISD::MSTORE);
1685 setTargetDAGCombine(ISD::ZERO_EXTEND);
1686 setTargetDAGCombine(ISD::ANY_EXTEND);
1687 setTargetDAGCombine(ISD::SIGN_EXTEND);
1688 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1689 setTargetDAGCombine(ISD::TRUNCATE);
1690 setTargetDAGCombine(ISD::SINT_TO_FP);
1691 setTargetDAGCombine(ISD::SETCC);
1692 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1693 setTargetDAGCombine(ISD::BUILD_VECTOR);
1694 if (Subtarget->is64Bit())
1695 setTargetDAGCombine(ISD::MUL);
1696 setTargetDAGCombine(ISD::XOR);
1698 computeRegisterProperties();
1700 // On Darwin, -Os means optimize for size without hurting performance,
1701 // do not reduce the limit.
1702 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1703 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1704 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1705 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1706 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1707 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1708 setPrefLoopAlignment(4); // 2^4 bytes.
1710 // Predictable cmov don't hurt on atom because it's in-order.
1711 PredictableSelectIsExpensive = !Subtarget->isAtom();
1712 EnableExtLdPromotion = true;
1713 setPrefFunctionAlignment(4); // 2^4 bytes.
1715 verifyIntrinsicTables();
1718 // This has so far only been implemented for 64-bit MachO.
1719 bool X86TargetLowering::useLoadStackGuardNode() const {
1720 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1723 TargetLoweringBase::LegalizeTypeAction
1724 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1725 if (ExperimentalVectorWideningLegalization &&
1726 VT.getVectorNumElements() != 1 &&
1727 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1728 return TypeWidenVector;
1730 return TargetLoweringBase::getPreferredVectorAction(VT);
1733 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1735 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1737 const unsigned NumElts = VT.getVectorNumElements();
1738 const EVT EltVT = VT.getVectorElementType();
1739 if (VT.is512BitVector()) {
1740 if (Subtarget->hasAVX512())
1741 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1742 EltVT == MVT::f32 || EltVT == MVT::f64)
1744 case 8: return MVT::v8i1;
1745 case 16: return MVT::v16i1;
1747 if (Subtarget->hasBWI())
1748 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1750 case 32: return MVT::v32i1;
1751 case 64: return MVT::v64i1;
1755 if (VT.is256BitVector() || VT.is128BitVector()) {
1756 if (Subtarget->hasVLX())
1757 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1758 EltVT == MVT::f32 || EltVT == MVT::f64)
1760 case 2: return MVT::v2i1;
1761 case 4: return MVT::v4i1;
1762 case 8: return MVT::v8i1;
1764 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1765 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1767 case 8: return MVT::v8i1;
1768 case 16: return MVT::v16i1;
1769 case 32: return MVT::v32i1;
1773 return VT.changeVectorElementTypeToInteger();
1776 /// Helper for getByValTypeAlignment to determine
1777 /// the desired ByVal argument alignment.
1778 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1781 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1782 if (VTy->getBitWidth() == 128)
1784 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1785 unsigned EltAlign = 0;
1786 getMaxByValAlign(ATy->getElementType(), EltAlign);
1787 if (EltAlign > MaxAlign)
1788 MaxAlign = EltAlign;
1789 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1790 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1791 unsigned EltAlign = 0;
1792 getMaxByValAlign(STy->getElementType(i), EltAlign);
1793 if (EltAlign > MaxAlign)
1794 MaxAlign = EltAlign;
1801 /// Return the desired alignment for ByVal aggregate
1802 /// function arguments in the caller parameter area. For X86, aggregates
1803 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1804 /// are at 4-byte boundaries.
1805 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1806 if (Subtarget->is64Bit()) {
1807 // Max of 8 and alignment of type.
1808 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1815 if (Subtarget->hasSSE1())
1816 getMaxByValAlign(Ty, Align);
1820 /// Returns the target specific optimal type for load
1821 /// and store operations as a result of memset, memcpy, and memmove
1822 /// lowering. If DstAlign is zero that means it's safe to destination
1823 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1824 /// means there isn't a need to check it against alignment requirement,
1825 /// probably because the source does not need to be loaded. If 'IsMemset' is
1826 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1827 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1828 /// source is constant so it does not need to be loaded.
1829 /// It returns EVT::Other if the type should be determined using generic
1830 /// target-independent logic.
1832 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1833 unsigned DstAlign, unsigned SrcAlign,
1834 bool IsMemset, bool ZeroMemset,
1836 MachineFunction &MF) const {
1837 const Function *F = MF.getFunction();
1838 if ((!IsMemset || ZeroMemset) &&
1839 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1840 Attribute::NoImplicitFloat)) {
1842 (Subtarget->isUnalignedMemAccessFast() ||
1843 ((DstAlign == 0 || DstAlign >= 16) &&
1844 (SrcAlign == 0 || SrcAlign >= 16)))) {
1846 if (Subtarget->hasInt256())
1848 if (Subtarget->hasFp256())
1851 if (Subtarget->hasSSE2())
1853 if (Subtarget->hasSSE1())
1855 } else if (!MemcpyStrSrc && Size >= 8 &&
1856 !Subtarget->is64Bit() &&
1857 Subtarget->hasSSE2()) {
1858 // Do not use f64 to lower memcpy if source is string constant. It's
1859 // better to use i32 to avoid the loads.
1863 if (Subtarget->is64Bit() && Size >= 8)
1868 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1870 return X86ScalarSSEf32;
1871 else if (VT == MVT::f64)
1872 return X86ScalarSSEf64;
1877 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1882 *Fast = Subtarget->isUnalignedMemAccessFast();
1886 /// Return the entry encoding for a jump table in the
1887 /// current function. The returned value is a member of the
1888 /// MachineJumpTableInfo::JTEntryKind enum.
1889 unsigned X86TargetLowering::getJumpTableEncoding() const {
1890 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1892 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1893 Subtarget->isPICStyleGOT())
1894 return MachineJumpTableInfo::EK_Custom32;
1896 // Otherwise, use the normal jump table encoding heuristics.
1897 return TargetLowering::getJumpTableEncoding();
1901 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1902 const MachineBasicBlock *MBB,
1903 unsigned uid,MCContext &Ctx) const{
1904 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1905 Subtarget->isPICStyleGOT());
1906 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1908 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1909 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1912 /// Returns relocation base for the given PIC jumptable.
1913 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1914 SelectionDAG &DAG) const {
1915 if (!Subtarget->is64Bit())
1916 // This doesn't have SDLoc associated with it, but is not really the
1917 // same as a Register.
1918 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1922 /// This returns the relocation base for the given PIC jumptable,
1923 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1924 const MCExpr *X86TargetLowering::
1925 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1926 MCContext &Ctx) const {
1927 // X86-64 uses RIP relative addressing based on the jump table label.
1928 if (Subtarget->isPICStyleRIPRel())
1929 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1931 // Otherwise, the reference is relative to the PIC base.
1932 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1935 // FIXME: Why this routine is here? Move to RegInfo!
1936 std::pair<const TargetRegisterClass*, uint8_t>
1937 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1938 const TargetRegisterClass *RRC = nullptr;
1940 switch (VT.SimpleTy) {
1942 return TargetLowering::findRepresentativeClass(VT);
1943 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1944 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1947 RRC = &X86::VR64RegClass;
1949 case MVT::f32: case MVT::f64:
1950 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1951 case MVT::v4f32: case MVT::v2f64:
1952 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1954 RRC = &X86::VR128RegClass;
1957 return std::make_pair(RRC, Cost);
1960 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1961 unsigned &Offset) const {
1962 if (!Subtarget->isTargetLinux())
1965 if (Subtarget->is64Bit()) {
1966 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1968 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1980 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1981 unsigned DestAS) const {
1982 assert(SrcAS != DestAS && "Expected different address spaces!");
1984 return SrcAS < 256 && DestAS < 256;
1987 //===----------------------------------------------------------------------===//
1988 // Return Value Calling Convention Implementation
1989 //===----------------------------------------------------------------------===//
1991 #include "X86GenCallingConv.inc"
1994 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
1995 MachineFunction &MF, bool isVarArg,
1996 const SmallVectorImpl<ISD::OutputArg> &Outs,
1997 LLVMContext &Context) const {
1998 SmallVector<CCValAssign, 16> RVLocs;
1999 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2000 return CCInfo.CheckReturn(Outs, RetCC_X86);
2003 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2004 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2009 X86TargetLowering::LowerReturn(SDValue Chain,
2010 CallingConv::ID CallConv, bool isVarArg,
2011 const SmallVectorImpl<ISD::OutputArg> &Outs,
2012 const SmallVectorImpl<SDValue> &OutVals,
2013 SDLoc dl, SelectionDAG &DAG) const {
2014 MachineFunction &MF = DAG.getMachineFunction();
2015 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2017 SmallVector<CCValAssign, 16> RVLocs;
2018 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2019 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2022 SmallVector<SDValue, 6> RetOps;
2023 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2024 // Operand #1 = Bytes To Pop
2025 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2028 // Copy the result values into the output registers.
2029 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2030 CCValAssign &VA = RVLocs[i];
2031 assert(VA.isRegLoc() && "Can only return in registers!");
2032 SDValue ValToCopy = OutVals[i];
2033 EVT ValVT = ValToCopy.getValueType();
2035 // Promote values to the appropriate types.
2036 if (VA.getLocInfo() == CCValAssign::SExt)
2037 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2038 else if (VA.getLocInfo() == CCValAssign::ZExt)
2039 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2040 else if (VA.getLocInfo() == CCValAssign::AExt)
2041 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2042 else if (VA.getLocInfo() == CCValAssign::BCvt)
2043 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2045 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2046 "Unexpected FP-extend for return value.");
2048 // If this is x86-64, and we disabled SSE, we can't return FP values,
2049 // or SSE or MMX vectors.
2050 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2051 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2052 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2053 report_fatal_error("SSE register return with SSE disabled");
2055 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2056 // llvm-gcc has never done it right and no one has noticed, so this
2057 // should be OK for now.
2058 if (ValVT == MVT::f64 &&
2059 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2060 report_fatal_error("SSE2 register return with SSE2 disabled");
2062 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2063 // the RET instruction and handled by the FP Stackifier.
2064 if (VA.getLocReg() == X86::FP0 ||
2065 VA.getLocReg() == X86::FP1) {
2066 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2067 // change the value to the FP stack register class.
2068 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2069 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2070 RetOps.push_back(ValToCopy);
2071 // Don't emit a copytoreg.
2075 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2076 // which is returned in RAX / RDX.
2077 if (Subtarget->is64Bit()) {
2078 if (ValVT == MVT::x86mmx) {
2079 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2080 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2081 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2083 // If we don't have SSE2 available, convert to v4f32 so the generated
2084 // register is legal.
2085 if (!Subtarget->hasSSE2())
2086 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2091 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2092 Flag = Chain.getValue(1);
2093 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2096 // The x86-64 ABIs require that for returning structs by value we copy
2097 // the sret argument into %rax/%eax (depending on ABI) for the return.
2098 // Win32 requires us to put the sret argument to %eax as well.
2099 // We saved the argument into a virtual register in the entry block,
2100 // so now we copy the value out and into %rax/%eax.
2101 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
2102 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
2103 MachineFunction &MF = DAG.getMachineFunction();
2104 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2105 unsigned Reg = FuncInfo->getSRetReturnReg();
2107 "SRetReturnReg should have been set in LowerFormalArguments().");
2108 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
2111 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2112 X86::RAX : X86::EAX;
2113 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2114 Flag = Chain.getValue(1);
2116 // RAX/EAX now acts like a return value.
2117 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2120 RetOps[0] = Chain; // Update chain.
2122 // Add the flag if we have it.
2124 RetOps.push_back(Flag);
2126 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2129 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2130 if (N->getNumValues() != 1)
2132 if (!N->hasNUsesOfValue(1, 0))
2135 SDValue TCChain = Chain;
2136 SDNode *Copy = *N->use_begin();
2137 if (Copy->getOpcode() == ISD::CopyToReg) {
2138 // If the copy has a glue operand, we conservatively assume it isn't safe to
2139 // perform a tail call.
2140 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2142 TCChain = Copy->getOperand(0);
2143 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2146 bool HasRet = false;
2147 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2149 if (UI->getOpcode() != X86ISD::RET_FLAG)
2151 // If we are returning more than one value, we can definitely
2152 // not make a tail call see PR19530
2153 if (UI->getNumOperands() > 4)
2155 if (UI->getNumOperands() == 4 &&
2156 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2169 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2170 ISD::NodeType ExtendKind) const {
2172 // TODO: Is this also valid on 32-bit?
2173 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2174 ReturnMVT = MVT::i8;
2176 ReturnMVT = MVT::i32;
2178 EVT MinVT = getRegisterType(Context, ReturnMVT);
2179 return VT.bitsLT(MinVT) ? MinVT : VT;
2182 /// Lower the result values of a call into the
2183 /// appropriate copies out of appropriate physical registers.
2186 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2187 CallingConv::ID CallConv, bool isVarArg,
2188 const SmallVectorImpl<ISD::InputArg> &Ins,
2189 SDLoc dl, SelectionDAG &DAG,
2190 SmallVectorImpl<SDValue> &InVals) const {
2192 // Assign locations to each value returned by this call.
2193 SmallVector<CCValAssign, 16> RVLocs;
2194 bool Is64Bit = Subtarget->is64Bit();
2195 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2197 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2199 // Copy all of the result registers out of their specified physreg.
2200 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2201 CCValAssign &VA = RVLocs[i];
2202 EVT CopyVT = VA.getValVT();
2204 // If this is x86-64, and we disabled SSE, we can't return FP values
2205 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2206 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2207 report_fatal_error("SSE register return with SSE disabled");
2210 // If we prefer to use the value in xmm registers, copy it out as f80 and
2211 // use a truncate to move it from fp stack reg to xmm reg.
2212 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2213 isScalarFPTypeInSSEReg(VA.getValVT()))
2216 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2217 CopyVT, InFlag).getValue(1);
2218 SDValue Val = Chain.getValue(0);
2220 if (CopyVT != VA.getValVT())
2221 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2222 // This truncation won't change the value.
2223 DAG.getIntPtrConstant(1));
2225 InFlag = Chain.getValue(2);
2226 InVals.push_back(Val);
2232 //===----------------------------------------------------------------------===//
2233 // C & StdCall & Fast Calling Convention implementation
2234 //===----------------------------------------------------------------------===//
2235 // StdCall calling convention seems to be standard for many Windows' API
2236 // routines and around. It differs from C calling convention just a little:
2237 // callee should clean up the stack, not caller. Symbols should be also
2238 // decorated in some fancy way :) It doesn't support any vector arguments.
2239 // For info on fast calling convention see Fast Calling Convention (tail call)
2240 // implementation LowerX86_32FastCCCallTo.
2242 /// CallIsStructReturn - Determines whether a call uses struct return
2244 enum StructReturnType {
2249 static StructReturnType
2250 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2252 return NotStructReturn;
2254 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2255 if (!Flags.isSRet())
2256 return NotStructReturn;
2257 if (Flags.isInReg())
2258 return RegStructReturn;
2259 return StackStructReturn;
2262 /// Determines whether a function uses struct return semantics.
2263 static StructReturnType
2264 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2266 return NotStructReturn;
2268 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2269 if (!Flags.isSRet())
2270 return NotStructReturn;
2271 if (Flags.isInReg())
2272 return RegStructReturn;
2273 return StackStructReturn;
2276 /// Make a copy of an aggregate at address specified by "Src" to address
2277 /// "Dst" with size and alignment information specified by the specific
2278 /// parameter attribute. The copy will be passed as a byval function parameter.
2280 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2281 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2283 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2285 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2286 /*isVolatile*/false, /*AlwaysInline=*/true,
2287 MachinePointerInfo(), MachinePointerInfo());
2290 /// Return true if the calling convention is one that
2291 /// supports tail call optimization.
2292 static bool IsTailCallConvention(CallingConv::ID CC) {
2293 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2294 CC == CallingConv::HiPE);
2297 /// \brief Return true if the calling convention is a C calling convention.
2298 static bool IsCCallConvention(CallingConv::ID CC) {
2299 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2300 CC == CallingConv::X86_64_SysV);
2303 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2304 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2308 CallingConv::ID CalleeCC = CS.getCallingConv();
2309 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2315 /// Return true if the function is being made into
2316 /// a tailcall target by changing its ABI.
2317 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2318 bool GuaranteedTailCallOpt) {
2319 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2323 X86TargetLowering::LowerMemArgument(SDValue Chain,
2324 CallingConv::ID CallConv,
2325 const SmallVectorImpl<ISD::InputArg> &Ins,
2326 SDLoc dl, SelectionDAG &DAG,
2327 const CCValAssign &VA,
2328 MachineFrameInfo *MFI,
2330 // Create the nodes corresponding to a load from this parameter slot.
2331 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2332 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2333 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2334 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2337 // If value is passed by pointer we have address passed instead of the value
2339 if (VA.getLocInfo() == CCValAssign::Indirect)
2340 ValVT = VA.getLocVT();
2342 ValVT = VA.getValVT();
2344 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2345 // changed with more analysis.
2346 // In case of tail call optimization mark all arguments mutable. Since they
2347 // could be overwritten by lowering of arguments in case of a tail call.
2348 if (Flags.isByVal()) {
2349 unsigned Bytes = Flags.getByValSize();
2350 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2351 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2352 return DAG.getFrameIndex(FI, getPointerTy());
2354 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2355 VA.getLocMemOffset(), isImmutable);
2356 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2357 return DAG.getLoad(ValVT, dl, Chain, FIN,
2358 MachinePointerInfo::getFixedStack(FI),
2359 false, false, false, 0);
2363 // FIXME: Get this from tablegen.
2364 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2365 const X86Subtarget *Subtarget) {
2366 assert(Subtarget->is64Bit());
2368 if (Subtarget->isCallingConvWin64(CallConv)) {
2369 static const MCPhysReg GPR64ArgRegsWin64[] = {
2370 X86::RCX, X86::RDX, X86::R8, X86::R9
2372 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2375 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2376 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2378 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2381 // FIXME: Get this from tablegen.
2382 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2383 CallingConv::ID CallConv,
2384 const X86Subtarget *Subtarget) {
2385 assert(Subtarget->is64Bit());
2386 if (Subtarget->isCallingConvWin64(CallConv)) {
2387 // The XMM registers which might contain var arg parameters are shadowed
2388 // in their paired GPR. So we only need to save the GPR to their home
2390 // TODO: __vectorcall will change this.
2394 const Function *Fn = MF.getFunction();
2395 bool NoImplicitFloatOps = Fn->getAttributes().
2396 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
2397 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2398 "SSE register cannot be used when SSE is disabled!");
2399 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2400 !Subtarget->hasSSE1())
2401 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2405 static const MCPhysReg XMMArgRegs64Bit[] = {
2406 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2407 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2409 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2413 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2414 CallingConv::ID CallConv,
2416 const SmallVectorImpl<ISD::InputArg> &Ins,
2419 SmallVectorImpl<SDValue> &InVals)
2421 MachineFunction &MF = DAG.getMachineFunction();
2422 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2424 const Function* Fn = MF.getFunction();
2425 if (Fn->hasExternalLinkage() &&
2426 Subtarget->isTargetCygMing() &&
2427 Fn->getName() == "main")
2428 FuncInfo->setForceFramePointer(true);
2430 MachineFrameInfo *MFI = MF.getFrameInfo();
2431 bool Is64Bit = Subtarget->is64Bit();
2432 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2434 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2435 "Var args not supported with calling convention fastcc, ghc or hipe");
2437 // Assign locations to all of the incoming arguments.
2438 SmallVector<CCValAssign, 16> ArgLocs;
2439 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2441 // Allocate shadow area for Win64
2443 CCInfo.AllocateStack(32, 8);
2445 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2447 unsigned LastVal = ~0U;
2449 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2450 CCValAssign &VA = ArgLocs[i];
2451 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2453 assert(VA.getValNo() != LastVal &&
2454 "Don't support value assigned to multiple locs yet");
2456 LastVal = VA.getValNo();
2458 if (VA.isRegLoc()) {
2459 EVT RegVT = VA.getLocVT();
2460 const TargetRegisterClass *RC;
2461 if (RegVT == MVT::i32)
2462 RC = &X86::GR32RegClass;
2463 else if (Is64Bit && RegVT == MVT::i64)
2464 RC = &X86::GR64RegClass;
2465 else if (RegVT == MVT::f32)
2466 RC = &X86::FR32RegClass;
2467 else if (RegVT == MVT::f64)
2468 RC = &X86::FR64RegClass;
2469 else if (RegVT.is512BitVector())
2470 RC = &X86::VR512RegClass;
2471 else if (RegVT.is256BitVector())
2472 RC = &X86::VR256RegClass;
2473 else if (RegVT.is128BitVector())
2474 RC = &X86::VR128RegClass;
2475 else if (RegVT == MVT::x86mmx)
2476 RC = &X86::VR64RegClass;
2477 else if (RegVT == MVT::i1)
2478 RC = &X86::VK1RegClass;
2479 else if (RegVT == MVT::v8i1)
2480 RC = &X86::VK8RegClass;
2481 else if (RegVT == MVT::v16i1)
2482 RC = &X86::VK16RegClass;
2483 else if (RegVT == MVT::v32i1)
2484 RC = &X86::VK32RegClass;
2485 else if (RegVT == MVT::v64i1)
2486 RC = &X86::VK64RegClass;
2488 llvm_unreachable("Unknown argument type!");
2490 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2491 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2493 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2494 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2496 if (VA.getLocInfo() == CCValAssign::SExt)
2497 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2498 DAG.getValueType(VA.getValVT()));
2499 else if (VA.getLocInfo() == CCValAssign::ZExt)
2500 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2501 DAG.getValueType(VA.getValVT()));
2502 else if (VA.getLocInfo() == CCValAssign::BCvt)
2503 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2505 if (VA.isExtInLoc()) {
2506 // Handle MMX values passed in XMM regs.
2507 if (RegVT.isVector())
2508 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2510 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2513 assert(VA.isMemLoc());
2514 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2517 // If value is passed via pointer - do a load.
2518 if (VA.getLocInfo() == CCValAssign::Indirect)
2519 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2520 MachinePointerInfo(), false, false, false, 0);
2522 InVals.push_back(ArgValue);
2525 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2526 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2527 // The x86-64 ABIs require that for returning structs by value we copy
2528 // the sret argument into %rax/%eax (depending on ABI) for the return.
2529 // Win32 requires us to put the sret argument to %eax as well.
2530 // Save the argument into a virtual register so that we can access it
2531 // from the return points.
2532 if (Ins[i].Flags.isSRet()) {
2533 unsigned Reg = FuncInfo->getSRetReturnReg();
2535 MVT PtrTy = getPointerTy();
2536 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2537 FuncInfo->setSRetReturnReg(Reg);
2539 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2540 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2546 unsigned StackSize = CCInfo.getNextStackOffset();
2547 // Align stack specially for tail calls.
2548 if (FuncIsMadeTailCallSafe(CallConv,
2549 MF.getTarget().Options.GuaranteedTailCallOpt))
2550 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2552 // If the function takes variable number of arguments, make a frame index for
2553 // the start of the first vararg value... for expansion of llvm.va_start. We
2554 // can skip this if there are no va_start calls.
2555 if (MFI->hasVAStart() &&
2556 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2557 CallConv != CallingConv::X86_ThisCall))) {
2558 FuncInfo->setVarArgsFrameIndex(
2559 MFI->CreateFixedObject(1, StackSize, true));
2562 // Figure out if XMM registers are in use.
2563 assert(!(MF.getTarget().Options.UseSoftFloat &&
2564 Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2565 Attribute::NoImplicitFloat)) &&
2566 "SSE register cannot be used when SSE is disabled!");
2568 // 64-bit calling conventions support varargs and register parameters, so we
2569 // have to do extra work to spill them in the prologue.
2570 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2571 // Find the first unallocated argument registers.
2572 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2573 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2574 unsigned NumIntRegs =
2575 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2576 unsigned NumXMMRegs =
2577 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2578 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2579 "SSE register cannot be used when SSE is disabled!");
2581 // Gather all the live in physical registers.
2582 SmallVector<SDValue, 6> LiveGPRs;
2583 SmallVector<SDValue, 8> LiveXMMRegs;
2585 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2586 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2588 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2590 if (!ArgXMMs.empty()) {
2591 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2592 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2593 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2594 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2595 LiveXMMRegs.push_back(
2596 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2601 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
2602 // Get to the caller-allocated home save location. Add 8 to account
2603 // for the return address.
2604 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2605 FuncInfo->setRegSaveFrameIndex(
2606 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2607 // Fixup to set vararg frame on shadow area (4 x i64).
2609 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2611 // For X86-64, if there are vararg parameters that are passed via
2612 // registers, then we must store them to their spots on the stack so
2613 // they may be loaded by deferencing the result of va_next.
2614 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2615 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2616 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2617 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2620 // Store the integer parameter registers.
2621 SmallVector<SDValue, 8> MemOps;
2622 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2624 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2625 for (SDValue Val : LiveGPRs) {
2626 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2627 DAG.getIntPtrConstant(Offset));
2629 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2630 MachinePointerInfo::getFixedStack(
2631 FuncInfo->getRegSaveFrameIndex(), Offset),
2633 MemOps.push_back(Store);
2637 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2638 // Now store the XMM (fp + vector) parameter registers.
2639 SmallVector<SDValue, 12> SaveXMMOps;
2640 SaveXMMOps.push_back(Chain);
2641 SaveXMMOps.push_back(ALVal);
2642 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2643 FuncInfo->getRegSaveFrameIndex()));
2644 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2645 FuncInfo->getVarArgsFPOffset()));
2646 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2648 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2649 MVT::Other, SaveXMMOps));
2652 if (!MemOps.empty())
2653 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2656 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2657 // Find the largest legal vector type.
2658 MVT VecVT = MVT::Other;
2659 // FIXME: Only some x86_32 calling conventions support AVX512.
2660 if (Subtarget->hasAVX512() &&
2661 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2662 CallConv == CallingConv::Intel_OCL_BI)))
2663 VecVT = MVT::v16f32;
2664 else if (Subtarget->hasAVX())
2666 else if (Subtarget->hasSSE2())
2669 // We forward some GPRs and some vector types.
2670 SmallVector<MVT, 2> RegParmTypes;
2671 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2672 RegParmTypes.push_back(IntVT);
2673 if (VecVT != MVT::Other)
2674 RegParmTypes.push_back(VecVT);
2676 // Compute the set of forwarded registers. The rest are scratch.
2677 SmallVectorImpl<ForwardedRegister> &Forwards =
2678 FuncInfo->getForwardedMustTailRegParms();
2679 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2681 // Conservatively forward AL on x86_64, since it might be used for varargs.
2682 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2683 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2684 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2687 // Copy all forwards from physical to virtual registers.
2688 for (ForwardedRegister &F : Forwards) {
2689 // FIXME: Can we use a less constrained schedule?
2690 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2691 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2692 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2696 // Some CCs need callee pop.
2697 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2698 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2699 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2701 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2702 // If this is an sret function, the return should pop the hidden pointer.
2703 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2704 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2705 argsAreStructReturn(Ins) == StackStructReturn)
2706 FuncInfo->setBytesToPopOnReturn(4);
2710 // RegSaveFrameIndex is X86-64 only.
2711 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2712 if (CallConv == CallingConv::X86_FastCall ||
2713 CallConv == CallingConv::X86_ThisCall)
2714 // fastcc functions can't have varargs.
2715 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2718 FuncInfo->setArgumentStackSize(StackSize);
2724 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2725 SDValue StackPtr, SDValue Arg,
2726 SDLoc dl, SelectionDAG &DAG,
2727 const CCValAssign &VA,
2728 ISD::ArgFlagsTy Flags) const {
2729 unsigned LocMemOffset = VA.getLocMemOffset();
2730 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2731 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2732 if (Flags.isByVal())
2733 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2735 return DAG.getStore(Chain, dl, Arg, PtrOff,
2736 MachinePointerInfo::getStack(LocMemOffset),
2740 /// Emit a load of return address if tail call
2741 /// optimization is performed and it is required.
2743 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2744 SDValue &OutRetAddr, SDValue Chain,
2745 bool IsTailCall, bool Is64Bit,
2746 int FPDiff, SDLoc dl) const {
2747 // Adjust the Return address stack slot.
2748 EVT VT = getPointerTy();
2749 OutRetAddr = getReturnAddressFrameIndex(DAG);
2751 // Load the "old" Return address.
2752 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2753 false, false, false, 0);
2754 return SDValue(OutRetAddr.getNode(), 1);
2757 /// Emit a store of the return address if tail call
2758 /// optimization is performed and it is required (FPDiff!=0).
2759 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2760 SDValue Chain, SDValue RetAddrFrIdx,
2761 EVT PtrVT, unsigned SlotSize,
2762 int FPDiff, SDLoc dl) {
2763 // Store the return address to the appropriate stack slot.
2764 if (!FPDiff) return Chain;
2765 // Calculate the new stack slot for the return address.
2766 int NewReturnAddrFI =
2767 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2769 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2770 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2771 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2777 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2778 SmallVectorImpl<SDValue> &InVals) const {
2779 SelectionDAG &DAG = CLI.DAG;
2781 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2782 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2783 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2784 SDValue Chain = CLI.Chain;
2785 SDValue Callee = CLI.Callee;
2786 CallingConv::ID CallConv = CLI.CallConv;
2787 bool &isTailCall = CLI.IsTailCall;
2788 bool isVarArg = CLI.IsVarArg;
2790 MachineFunction &MF = DAG.getMachineFunction();
2791 bool Is64Bit = Subtarget->is64Bit();
2792 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2793 StructReturnType SR = callIsStructReturn(Outs);
2794 bool IsSibcall = false;
2795 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2797 if (MF.getTarget().Options.DisableTailCalls)
2800 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2802 // Force this to be a tail call. The verifier rules are enough to ensure
2803 // that we can lower this successfully without moving the return address
2806 } else if (isTailCall) {
2807 // Check if it's really possible to do a tail call.
2808 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2809 isVarArg, SR != NotStructReturn,
2810 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2811 Outs, OutVals, Ins, DAG);
2813 // Sibcalls are automatically detected tailcalls which do not require
2815 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2822 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2823 "Var args not supported with calling convention fastcc, ghc or hipe");
2825 // Analyze operands of the call, assigning locations to each operand.
2826 SmallVector<CCValAssign, 16> ArgLocs;
2827 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2829 // Allocate shadow area for Win64
2831 CCInfo.AllocateStack(32, 8);
2833 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2835 // Get a count of how many bytes are to be pushed on the stack.
2836 unsigned NumBytes = CCInfo.getNextStackOffset();
2838 // This is a sibcall. The memory operands are available in caller's
2839 // own caller's stack.
2841 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2842 IsTailCallConvention(CallConv))
2843 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2846 if (isTailCall && !IsSibcall && !IsMustTail) {
2847 // Lower arguments at fp - stackoffset + fpdiff.
2848 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2850 FPDiff = NumBytesCallerPushed - NumBytes;
2852 // Set the delta of movement of the returnaddr stackslot.
2853 // But only set if delta is greater than previous delta.
2854 if (FPDiff < X86Info->getTCReturnAddrDelta())
2855 X86Info->setTCReturnAddrDelta(FPDiff);
2858 unsigned NumBytesToPush = NumBytes;
2859 unsigned NumBytesToPop = NumBytes;
2861 // If we have an inalloca argument, all stack space has already been allocated
2862 // for us and be right at the top of the stack. We don't support multiple
2863 // arguments passed in memory when using inalloca.
2864 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2866 if (!ArgLocs.back().isMemLoc())
2867 report_fatal_error("cannot use inalloca attribute on a register "
2869 if (ArgLocs.back().getLocMemOffset() != 0)
2870 report_fatal_error("any parameter with the inalloca attribute must be "
2871 "the only memory argument");
2875 Chain = DAG.getCALLSEQ_START(
2876 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2878 SDValue RetAddrFrIdx;
2879 // Load return address for tail calls.
2880 if (isTailCall && FPDiff)
2881 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2882 Is64Bit, FPDiff, dl);
2884 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2885 SmallVector<SDValue, 8> MemOpChains;
2888 // Walk the register/memloc assignments, inserting copies/loads. In the case
2889 // of tail call optimization arguments are handle later.
2890 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
2891 DAG.getSubtarget().getRegisterInfo());
2892 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2893 // Skip inalloca arguments, they have already been written.
2894 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2895 if (Flags.isInAlloca())
2898 CCValAssign &VA = ArgLocs[i];
2899 EVT RegVT = VA.getLocVT();
2900 SDValue Arg = OutVals[i];
2901 bool isByVal = Flags.isByVal();
2903 // Promote the value if needed.
2904 switch (VA.getLocInfo()) {
2905 default: llvm_unreachable("Unknown loc info!");
2906 case CCValAssign::Full: break;
2907 case CCValAssign::SExt:
2908 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2910 case CCValAssign::ZExt:
2911 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2913 case CCValAssign::AExt:
2914 if (RegVT.is128BitVector()) {
2915 // Special case: passing MMX values in XMM registers.
2916 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2917 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2918 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2920 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2922 case CCValAssign::BCvt:
2923 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2925 case CCValAssign::Indirect: {
2926 // Store the argument.
2927 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2928 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2929 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2930 MachinePointerInfo::getFixedStack(FI),
2937 if (VA.isRegLoc()) {
2938 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2939 if (isVarArg && IsWin64) {
2940 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2941 // shadow reg if callee is a varargs function.
2942 unsigned ShadowReg = 0;
2943 switch (VA.getLocReg()) {
2944 case X86::XMM0: ShadowReg = X86::RCX; break;
2945 case X86::XMM1: ShadowReg = X86::RDX; break;
2946 case X86::XMM2: ShadowReg = X86::R8; break;
2947 case X86::XMM3: ShadowReg = X86::R9; break;
2950 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2952 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2953 assert(VA.isMemLoc());
2954 if (!StackPtr.getNode())
2955 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2957 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2958 dl, DAG, VA, Flags));
2962 if (!MemOpChains.empty())
2963 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2965 if (Subtarget->isPICStyleGOT()) {
2966 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2969 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2970 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2972 // If we are tail calling and generating PIC/GOT style code load the
2973 // address of the callee into ECX. The value in ecx is used as target of
2974 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2975 // for tail calls on PIC/GOT architectures. Normally we would just put the
2976 // address of GOT into ebx and then call target@PLT. But for tail calls
2977 // ebx would be restored (since ebx is callee saved) before jumping to the
2980 // Note: The actual moving to ECX is done further down.
2981 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2982 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2983 !G->getGlobal()->hasProtectedVisibility())
2984 Callee = LowerGlobalAddress(Callee, DAG);
2985 else if (isa<ExternalSymbolSDNode>(Callee))
2986 Callee = LowerExternalSymbol(Callee, DAG);
2990 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2991 // From AMD64 ABI document:
2992 // For calls that may call functions that use varargs or stdargs
2993 // (prototype-less calls or calls to functions containing ellipsis (...) in
2994 // the declaration) %al is used as hidden argument to specify the number
2995 // of SSE registers used. The contents of %al do not need to match exactly
2996 // the number of registers, but must be an ubound on the number of SSE
2997 // registers used and is in the range 0 - 8 inclusive.
2999 // Count the number of XMM registers allocated.
3000 static const MCPhysReg XMMArgRegs[] = {
3001 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3002 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3004 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3005 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3006 && "SSE registers cannot be used when SSE is disabled");
3008 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3009 DAG.getConstant(NumXMMRegs, MVT::i8)));
3012 if (isVarArg && IsMustTail) {
3013 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3014 for (const auto &F : Forwards) {
3015 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3016 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3020 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3021 // don't need this because the eligibility check rejects calls that require
3022 // shuffling arguments passed in memory.
3023 if (!IsSibcall && isTailCall) {
3024 // Force all the incoming stack arguments to be loaded from the stack
3025 // before any new outgoing arguments are stored to the stack, because the
3026 // outgoing stack slots may alias the incoming argument stack slots, and
3027 // the alias isn't otherwise explicit. This is slightly more conservative
3028 // than necessary, because it means that each store effectively depends
3029 // on every argument instead of just those arguments it would clobber.
3030 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3032 SmallVector<SDValue, 8> MemOpChains2;
3035 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3036 CCValAssign &VA = ArgLocs[i];
3039 assert(VA.isMemLoc());
3040 SDValue Arg = OutVals[i];
3041 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3042 // Skip inalloca arguments. They don't require any work.
3043 if (Flags.isInAlloca())
3045 // Create frame index.
3046 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3047 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3048 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3049 FIN = DAG.getFrameIndex(FI, getPointerTy());
3051 if (Flags.isByVal()) {
3052 // Copy relative to framepointer.
3053 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3054 if (!StackPtr.getNode())
3055 StackPtr = DAG.getCopyFromReg(Chain, dl,
3056 RegInfo->getStackRegister(),
3058 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3060 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3064 // Store relative to framepointer.
3065 MemOpChains2.push_back(
3066 DAG.getStore(ArgChain, dl, Arg, FIN,
3067 MachinePointerInfo::getFixedStack(FI),
3072 if (!MemOpChains2.empty())
3073 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3075 // Store the return address to the appropriate stack slot.
3076 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3077 getPointerTy(), RegInfo->getSlotSize(),
3081 // Build a sequence of copy-to-reg nodes chained together with token chain
3082 // and flag operands which copy the outgoing args into registers.
3084 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3085 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3086 RegsToPass[i].second, InFlag);
3087 InFlag = Chain.getValue(1);
3090 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3091 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3092 // In the 64-bit large code model, we have to make all calls
3093 // through a register, since the call instruction's 32-bit
3094 // pc-relative offset may not be large enough to hold the whole
3096 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3097 // If the callee is a GlobalAddress node (quite common, every direct call
3098 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3100 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3102 // We should use extra load for direct calls to dllimported functions in
3104 const GlobalValue *GV = G->getGlobal();
3105 if (!GV->hasDLLImportStorageClass()) {
3106 unsigned char OpFlags = 0;
3107 bool ExtraLoad = false;
3108 unsigned WrapperKind = ISD::DELETED_NODE;
3110 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3111 // external symbols most go through the PLT in PIC mode. If the symbol
3112 // has hidden or protected visibility, or if it is static or local, then
3113 // we don't need to use the PLT - we can directly call it.
3114 if (Subtarget->isTargetELF() &&
3115 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3116 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3117 OpFlags = X86II::MO_PLT;
3118 } else if (Subtarget->isPICStyleStubAny() &&
3119 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3120 (!Subtarget->getTargetTriple().isMacOSX() ||
3121 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3122 // PC-relative references to external symbols should go through $stub,
3123 // unless we're building with the leopard linker or later, which
3124 // automatically synthesizes these stubs.
3125 OpFlags = X86II::MO_DARWIN_STUB;
3126 } else if (Subtarget->isPICStyleRIPRel() &&
3127 isa<Function>(GV) &&
3128 cast<Function>(GV)->getAttributes().
3129 hasAttribute(AttributeSet::FunctionIndex,
3130 Attribute::NonLazyBind)) {
3131 // If the function is marked as non-lazy, generate an indirect call
3132 // which loads from the GOT directly. This avoids runtime overhead
3133 // at the cost of eager binding (and one extra byte of encoding).
3134 OpFlags = X86II::MO_GOTPCREL;
3135 WrapperKind = X86ISD::WrapperRIP;
3139 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3140 G->getOffset(), OpFlags);
3142 // Add a wrapper if needed.
3143 if (WrapperKind != ISD::DELETED_NODE)
3144 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3145 // Add extra indirection if needed.
3147 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3148 MachinePointerInfo::getGOT(),
3149 false, false, false, 0);
3151 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3152 unsigned char OpFlags = 0;
3154 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3155 // external symbols should go through the PLT.
3156 if (Subtarget->isTargetELF() &&
3157 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3158 OpFlags = X86II::MO_PLT;
3159 } else if (Subtarget->isPICStyleStubAny() &&
3160 (!Subtarget->getTargetTriple().isMacOSX() ||
3161 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3162 // PC-relative references to external symbols should go through $stub,
3163 // unless we're building with the leopard linker or later, which
3164 // automatically synthesizes these stubs.
3165 OpFlags = X86II::MO_DARWIN_STUB;
3168 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3170 } else if (Subtarget->isTarget64BitILP32() && Callee->getValueType(0) == MVT::i32) {
3171 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3172 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3175 // Returns a chain & a flag for retval copy to use.
3176 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3177 SmallVector<SDValue, 8> Ops;
3179 if (!IsSibcall && isTailCall) {
3180 Chain = DAG.getCALLSEQ_END(Chain,
3181 DAG.getIntPtrConstant(NumBytesToPop, true),
3182 DAG.getIntPtrConstant(0, true), InFlag, dl);
3183 InFlag = Chain.getValue(1);
3186 Ops.push_back(Chain);
3187 Ops.push_back(Callee);
3190 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3192 // Add argument registers to the end of the list so that they are known live
3194 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3195 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3196 RegsToPass[i].second.getValueType()));
3198 // Add a register mask operand representing the call-preserved registers.
3199 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
3200 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3201 assert(Mask && "Missing call preserved mask for calling convention");
3202 Ops.push_back(DAG.getRegisterMask(Mask));
3204 if (InFlag.getNode())
3205 Ops.push_back(InFlag);
3209 //// If this is the first return lowered for this function, add the regs
3210 //// to the liveout set for the function.
3211 // This isn't right, although it's probably harmless on x86; liveouts
3212 // should be computed from returns not tail calls. Consider a void
3213 // function making a tail call to a function returning int.
3214 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3217 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3218 InFlag = Chain.getValue(1);
3220 // Create the CALLSEQ_END node.
3221 unsigned NumBytesForCalleeToPop;
3222 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3223 DAG.getTarget().Options.GuaranteedTailCallOpt))
3224 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3225 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3226 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3227 SR == StackStructReturn)
3228 // If this is a call to a struct-return function, the callee
3229 // pops the hidden struct pointer, so we have to push it back.
3230 // This is common for Darwin/X86, Linux & Mingw32 targets.
3231 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3232 NumBytesForCalleeToPop = 4;
3234 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3236 // Returns a flag for retval copy to use.
3238 Chain = DAG.getCALLSEQ_END(Chain,
3239 DAG.getIntPtrConstant(NumBytesToPop, true),
3240 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3243 InFlag = Chain.getValue(1);
3246 // Handle result values, copying them out of physregs into vregs that we
3248 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3249 Ins, dl, DAG, InVals);
3252 //===----------------------------------------------------------------------===//
3253 // Fast Calling Convention (tail call) implementation
3254 //===----------------------------------------------------------------------===//
3256 // Like std call, callee cleans arguments, convention except that ECX is
3257 // reserved for storing the tail called function address. Only 2 registers are
3258 // free for argument passing (inreg). Tail call optimization is performed
3260 // * tailcallopt is enabled
3261 // * caller/callee are fastcc
3262 // On X86_64 architecture with GOT-style position independent code only local
3263 // (within module) calls are supported at the moment.
3264 // To keep the stack aligned according to platform abi the function
3265 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3266 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3267 // If a tail called function callee has more arguments than the caller the
3268 // caller needs to make sure that there is room to move the RETADDR to. This is
3269 // achieved by reserving an area the size of the argument delta right after the
3270 // original RETADDR, but before the saved framepointer or the spilled registers
3271 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3283 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3284 /// for a 16 byte align requirement.
3286 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3287 SelectionDAG& DAG) const {
3288 MachineFunction &MF = DAG.getMachineFunction();
3289 const TargetMachine &TM = MF.getTarget();
3290 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3291 TM.getSubtargetImpl()->getRegisterInfo());
3292 const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
3293 unsigned StackAlignment = TFI.getStackAlignment();
3294 uint64_t AlignMask = StackAlignment - 1;
3295 int64_t Offset = StackSize;
3296 unsigned SlotSize = RegInfo->getSlotSize();
3297 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3298 // Number smaller than 12 so just add the difference.
3299 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3301 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3302 Offset = ((~AlignMask) & Offset) + StackAlignment +
3303 (StackAlignment-SlotSize);
3308 /// MatchingStackOffset - Return true if the given stack call argument is
3309 /// already available in the same position (relatively) of the caller's
3310 /// incoming argument stack.
3312 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3313 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3314 const X86InstrInfo *TII) {
3315 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3317 if (Arg.getOpcode() == ISD::CopyFromReg) {
3318 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3319 if (!TargetRegisterInfo::isVirtualRegister(VR))
3321 MachineInstr *Def = MRI->getVRegDef(VR);
3324 if (!Flags.isByVal()) {
3325 if (!TII->isLoadFromStackSlot(Def, FI))
3328 unsigned Opcode = Def->getOpcode();
3329 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) &&
3330 Def->getOperand(1).isFI()) {
3331 FI = Def->getOperand(1).getIndex();
3332 Bytes = Flags.getByValSize();
3336 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3337 if (Flags.isByVal())
3338 // ByVal argument is passed in as a pointer but it's now being
3339 // dereferenced. e.g.
3340 // define @foo(%struct.X* %A) {
3341 // tail call @bar(%struct.X* byval %A)
3344 SDValue Ptr = Ld->getBasePtr();
3345 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3348 FI = FINode->getIndex();
3349 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3350 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3351 FI = FINode->getIndex();
3352 Bytes = Flags.getByValSize();
3356 assert(FI != INT_MAX);
3357 if (!MFI->isFixedObjectIndex(FI))
3359 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3362 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3363 /// for tail call optimization. Targets which want to do tail call
3364 /// optimization should implement this function.
3366 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3367 CallingConv::ID CalleeCC,
3369 bool isCalleeStructRet,
3370 bool isCallerStructRet,
3372 const SmallVectorImpl<ISD::OutputArg> &Outs,
3373 const SmallVectorImpl<SDValue> &OutVals,
3374 const SmallVectorImpl<ISD::InputArg> &Ins,
3375 SelectionDAG &DAG) const {
3376 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3379 // If -tailcallopt is specified, make fastcc functions tail-callable.
3380 const MachineFunction &MF = DAG.getMachineFunction();
3381 const Function *CallerF = MF.getFunction();
3383 // If the function return type is x86_fp80 and the callee return type is not,
3384 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3385 // perform a tailcall optimization here.
3386 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3389 CallingConv::ID CallerCC = CallerF->getCallingConv();
3390 bool CCMatch = CallerCC == CalleeCC;
3391 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3392 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3394 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3395 if (IsTailCallConvention(CalleeCC) && CCMatch)
3400 // Look for obvious safe cases to perform tail call optimization that do not
3401 // require ABI changes. This is what gcc calls sibcall.
3403 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3404 // emit a special epilogue.
3405 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3406 DAG.getSubtarget().getRegisterInfo());
3407 if (RegInfo->needsStackRealignment(MF))
3410 // Also avoid sibcall optimization if either caller or callee uses struct
3411 // return semantics.
3412 if (isCalleeStructRet || isCallerStructRet)
3415 // An stdcall/thiscall caller is expected to clean up its arguments; the
3416 // callee isn't going to do that.
3417 // FIXME: this is more restrictive than needed. We could produce a tailcall
3418 // when the stack adjustment matches. For example, with a thiscall that takes
3419 // only one argument.
3420 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3421 CallerCC == CallingConv::X86_ThisCall))
3424 // Do not sibcall optimize vararg calls unless all arguments are passed via
3426 if (isVarArg && !Outs.empty()) {
3428 // Optimizing for varargs on Win64 is unlikely to be safe without
3429 // additional testing.
3430 if (IsCalleeWin64 || IsCallerWin64)
3433 SmallVector<CCValAssign, 16> ArgLocs;
3434 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3437 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3438 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3439 if (!ArgLocs[i].isRegLoc())
3443 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3444 // stack. Therefore, if it's not used by the call it is not safe to optimize
3445 // this into a sibcall.
3446 bool Unused = false;
3447 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3454 SmallVector<CCValAssign, 16> RVLocs;
3455 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3457 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3458 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3459 CCValAssign &VA = RVLocs[i];
3460 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3465 // If the calling conventions do not match, then we'd better make sure the
3466 // results are returned in the same way as what the caller expects.
3468 SmallVector<CCValAssign, 16> RVLocs1;
3469 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3471 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3473 SmallVector<CCValAssign, 16> RVLocs2;
3474 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3476 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3478 if (RVLocs1.size() != RVLocs2.size())
3480 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3481 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3483 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3485 if (RVLocs1[i].isRegLoc()) {
3486 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3489 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3495 // If the callee takes no arguments then go on to check the results of the
3497 if (!Outs.empty()) {
3498 // Check if stack adjustment is needed. For now, do not do this if any
3499 // argument is passed on the stack.
3500 SmallVector<CCValAssign, 16> ArgLocs;
3501 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3504 // Allocate shadow area for Win64
3506 CCInfo.AllocateStack(32, 8);
3508 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3509 if (CCInfo.getNextStackOffset()) {
3510 MachineFunction &MF = DAG.getMachineFunction();
3511 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3514 // Check if the arguments are already laid out in the right way as
3515 // the caller's fixed stack objects.
3516 MachineFrameInfo *MFI = MF.getFrameInfo();
3517 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3518 const X86InstrInfo *TII =
3519 static_cast<const X86InstrInfo *>(DAG.getSubtarget().getInstrInfo());
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3674 DAG.getSubtarget().getRegisterInfo());
3675 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3676 int ReturnAddrIndex = FuncInfo->getRAIndex();
3678 if (ReturnAddrIndex == 0) {
3679 // Set up a frame object for the return address.
3680 unsigned SlotSize = RegInfo->getSlotSize();
3681 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3684 FuncInfo->setRAIndex(ReturnAddrIndex);
3687 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3690 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3691 bool hasSymbolicDisplacement) {
3692 // Offset should fit into 32 bit immediate field.
3693 if (!isInt<32>(Offset))
3696 // If we don't have a symbolic displacement - we don't have any extra
3698 if (!hasSymbolicDisplacement)
3701 // FIXME: Some tweaks might be needed for medium code model.
3702 if (M != CodeModel::Small && M != CodeModel::Kernel)
3705 // For small code model we assume that latest object is 16MB before end of 31
3706 // bits boundary. We may also accept pretty large negative constants knowing
3707 // that all objects are in the positive half of address space.
3708 if (M == CodeModel::Small && Offset < 16*1024*1024)
3711 // For kernel code model we know that all object resist in the negative half
3712 // of 32bits address space. We may not accept negative offsets, since they may
3713 // be just off and we may accept pretty large positive ones.
3714 if (M == CodeModel::Kernel && Offset >= 0)
3720 /// isCalleePop - Determines whether the callee is required to pop its
3721 /// own arguments. Callee pop is necessary to support tail calls.
3722 bool X86::isCalleePop(CallingConv::ID CallingConv,
3723 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3724 switch (CallingConv) {
3727 case CallingConv::X86_StdCall:
3728 case CallingConv::X86_FastCall:
3729 case CallingConv::X86_ThisCall:
3731 case CallingConv::Fast:
3732 case CallingConv::GHC:
3733 case CallingConv::HiPE:
3740 /// \brief Return true if the condition is an unsigned comparison operation.
3741 static bool isX86CCUnsigned(unsigned X86CC) {
3743 default: llvm_unreachable("Invalid integer condition!");
3744 case X86::COND_E: return true;
3745 case X86::COND_G: return false;
3746 case X86::COND_GE: return false;
3747 case X86::COND_L: return false;
3748 case X86::COND_LE: return false;
3749 case X86::COND_NE: return true;
3750 case X86::COND_B: return true;
3751 case X86::COND_A: return true;
3752 case X86::COND_BE: return true;
3753 case X86::COND_AE: return true;
3755 llvm_unreachable("covered switch fell through?!");
3758 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3759 /// specific condition code, returning the condition code and the LHS/RHS of the
3760 /// comparison to make.
3761 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3762 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3764 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3765 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3766 // X > -1 -> X == 0, jump !sign.
3767 RHS = DAG.getConstant(0, RHS.getValueType());
3768 return X86::COND_NS;
3770 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3771 // X < 0 -> X == 0, jump on sign.
3774 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3776 RHS = DAG.getConstant(0, RHS.getValueType());
3777 return X86::COND_LE;
3781 switch (SetCCOpcode) {
3782 default: llvm_unreachable("Invalid integer condition!");
3783 case ISD::SETEQ: return X86::COND_E;
3784 case ISD::SETGT: return X86::COND_G;
3785 case ISD::SETGE: return X86::COND_GE;
3786 case ISD::SETLT: return X86::COND_L;
3787 case ISD::SETLE: return X86::COND_LE;
3788 case ISD::SETNE: return X86::COND_NE;
3789 case ISD::SETULT: return X86::COND_B;
3790 case ISD::SETUGT: return X86::COND_A;
3791 case ISD::SETULE: return X86::COND_BE;
3792 case ISD::SETUGE: return X86::COND_AE;
3796 // First determine if it is required or is profitable to flip the operands.
3798 // If LHS is a foldable load, but RHS is not, flip the condition.
3799 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3800 !ISD::isNON_EXTLoad(RHS.getNode())) {
3801 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3802 std::swap(LHS, RHS);
3805 switch (SetCCOpcode) {
3811 std::swap(LHS, RHS);
3815 // On a floating point condition, the flags are set as follows:
3817 // 0 | 0 | 0 | X > Y
3818 // 0 | 0 | 1 | X < Y
3819 // 1 | 0 | 0 | X == Y
3820 // 1 | 1 | 1 | unordered
3821 switch (SetCCOpcode) {
3822 default: llvm_unreachable("Condcode should be pre-legalized away");
3824 case ISD::SETEQ: return X86::COND_E;
3825 case ISD::SETOLT: // flipped
3827 case ISD::SETGT: return X86::COND_A;
3828 case ISD::SETOLE: // flipped
3830 case ISD::SETGE: return X86::COND_AE;
3831 case ISD::SETUGT: // flipped
3833 case ISD::SETLT: return X86::COND_B;
3834 case ISD::SETUGE: // flipped
3836 case ISD::SETLE: return X86::COND_BE;
3838 case ISD::SETNE: return X86::COND_NE;
3839 case ISD::SETUO: return X86::COND_P;
3840 case ISD::SETO: return X86::COND_NP;
3842 case ISD::SETUNE: return X86::COND_INVALID;
3846 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3847 /// code. Current x86 isa includes the following FP cmov instructions:
3848 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3849 static bool hasFPCMov(unsigned X86CC) {
3865 /// isFPImmLegal - Returns true if the target can instruction select the
3866 /// specified FP immediate natively. If false, the legalizer will
3867 /// materialize the FP immediate as a load from a constant pool.
3868 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3869 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3870 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3876 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3877 ISD::LoadExtType ExtTy,
3879 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3880 // relocation target a movq or addq instruction: don't let the load shrink.
3881 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3882 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3883 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3884 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3888 /// \brief Returns true if it is beneficial to convert a load of a constant
3889 /// to just the constant itself.
3890 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3892 assert(Ty->isIntegerTy());
3894 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3895 if (BitSize == 0 || BitSize > 64)
3900 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3901 unsigned Index) const {
3902 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3905 return (Index == 0 || Index == ResVT.getVectorNumElements());
3908 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3909 // Speculate cttz only if we can directly use TZCNT.
3910 return Subtarget->hasBMI();
3913 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3914 // Speculate ctlz only if we can directly use LZCNT.
3915 return Subtarget->hasLZCNT();
3918 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3919 /// the specified range (L, H].
3920 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3921 return (Val < 0) || (Val >= Low && Val < Hi);
3924 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3925 /// specified value.
3926 static bool isUndefOrEqual(int Val, int CmpVal) {
3927 return (Val < 0 || Val == CmpVal);
3930 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3931 /// from position Pos and ending in Pos+Size, falls within the specified
3932 /// sequential range (Low, Low+Size]. or is undef.
3933 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3934 unsigned Pos, unsigned Size, int Low) {
3935 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3936 if (!isUndefOrEqual(Mask[i], Low))
3941 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3942 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3943 /// operand - by default will match for first operand.
3944 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3945 bool TestSecondOperand = false) {
3946 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3947 VT != MVT::v2f64 && VT != MVT::v2i64)
3950 unsigned NumElems = VT.getVectorNumElements();
3951 unsigned Lo = TestSecondOperand ? NumElems : 0;
3952 unsigned Hi = Lo + NumElems;
3954 for (unsigned i = 0; i < NumElems; ++i)
3955 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3961 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3962 /// is suitable for input to PSHUFHW.
3963 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3964 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3967 // Lower quadword copied in order or undef.
3968 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3971 // Upper quadword shuffled.
3972 for (unsigned i = 4; i != 8; ++i)
3973 if (!isUndefOrInRange(Mask[i], 4, 8))
3976 if (VT == MVT::v16i16) {
3977 // Lower quadword copied in order or undef.
3978 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3981 // Upper quadword shuffled.
3982 for (unsigned i = 12; i != 16; ++i)
3983 if (!isUndefOrInRange(Mask[i], 12, 16))
3990 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3991 /// is suitable for input to PSHUFLW.
3992 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3993 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3996 // Upper quadword copied in order.
3997 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4000 // Lower quadword shuffled.
4001 for (unsigned i = 0; i != 4; ++i)
4002 if (!isUndefOrInRange(Mask[i], 0, 4))
4005 if (VT == MVT::v16i16) {
4006 // Upper quadword copied in order.
4007 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4010 // Lower quadword shuffled.
4011 for (unsigned i = 8; i != 12; ++i)
4012 if (!isUndefOrInRange(Mask[i], 8, 12))
4019 /// \brief Return true if the mask specifies a shuffle of elements that is
4020 /// suitable for input to intralane (palignr) or interlane (valign) vector
4022 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4023 unsigned NumElts = VT.getVectorNumElements();
4024 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4025 unsigned NumLaneElts = NumElts/NumLanes;
4027 // Do not handle 64-bit element shuffles with palignr.
4028 if (NumLaneElts == 2)
4031 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4033 for (i = 0; i != NumLaneElts; ++i) {
4038 // Lane is all undef, go to next lane
4039 if (i == NumLaneElts)
4042 int Start = Mask[i+l];
4044 // Make sure its in this lane in one of the sources
4045 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4046 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4049 // If not lane 0, then we must match lane 0
4050 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4053 // Correct second source to be contiguous with first source
4054 if (Start >= (int)NumElts)
4055 Start -= NumElts - NumLaneElts;
4057 // Make sure we're shifting in the right direction.
4058 if (Start <= (int)(i+l))
4063 // Check the rest of the elements to see if they are consecutive.
4064 for (++i; i != NumLaneElts; ++i) {
4065 int Idx = Mask[i+l];
4067 // Make sure its in this lane
4068 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4069 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4072 // If not lane 0, then we must match lane 0
4073 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4076 if (Idx >= (int)NumElts)
4077 Idx -= NumElts - NumLaneElts;
4079 if (!isUndefOrEqual(Idx, Start+i))
4088 /// \brief Return true if the node specifies a shuffle of elements that is
4089 /// suitable for input to PALIGNR.
4090 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4091 const X86Subtarget *Subtarget) {
4092 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4093 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4094 VT.is512BitVector())
4095 // FIXME: Add AVX512BW.
4098 return isAlignrMask(Mask, VT, false);
4101 /// \brief Return true if the node specifies a shuffle of elements that is
4102 /// suitable for input to VALIGN.
4103 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4104 const X86Subtarget *Subtarget) {
4105 // FIXME: Add AVX512VL.
4106 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4108 return isAlignrMask(Mask, VT, true);
4111 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4112 /// the two vector operands have swapped position.
4113 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4114 unsigned NumElems) {
4115 for (unsigned i = 0; i != NumElems; ++i) {
4119 else if (idx < (int)NumElems)
4120 Mask[i] = idx + NumElems;
4122 Mask[i] = idx - NumElems;
4126 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4127 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4128 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4129 /// reverse of what x86 shuffles want.
4130 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4132 unsigned NumElems = VT.getVectorNumElements();
4133 unsigned NumLanes = VT.getSizeInBits()/128;
4134 unsigned NumLaneElems = NumElems/NumLanes;
4136 if (NumLaneElems != 2 && NumLaneElems != 4)
4139 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4140 bool symetricMaskRequired =
4141 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4143 // VSHUFPSY divides the resulting vector into 4 chunks.
4144 // The sources are also splitted into 4 chunks, and each destination
4145 // chunk must come from a different source chunk.
4147 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4148 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4150 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4151 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4153 // VSHUFPDY divides the resulting vector into 4 chunks.
4154 // The sources are also splitted into 4 chunks, and each destination
4155 // chunk must come from a different source chunk.
4157 // SRC1 => X3 X2 X1 X0
4158 // SRC2 => Y3 Y2 Y1 Y0
4160 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4162 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4163 unsigned HalfLaneElems = NumLaneElems/2;
4164 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4165 for (unsigned i = 0; i != NumLaneElems; ++i) {
4166 int Idx = Mask[i+l];
4167 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4168 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4170 // For VSHUFPSY, the mask of the second half must be the same as the
4171 // first but with the appropriate offsets. This works in the same way as
4172 // VPERMILPS works with masks.
4173 if (!symetricMaskRequired || Idx < 0)
4175 if (MaskVal[i] < 0) {
4176 MaskVal[i] = Idx - l;
4179 if ((signed)(Idx - l) != MaskVal[i])
4187 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4188 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4189 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4190 if (!VT.is128BitVector())
4193 unsigned NumElems = VT.getVectorNumElements();
4198 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4199 return isUndefOrEqual(Mask[0], 6) &&
4200 isUndefOrEqual(Mask[1], 7) &&
4201 isUndefOrEqual(Mask[2], 2) &&
4202 isUndefOrEqual(Mask[3], 3);
4205 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4206 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4208 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4209 if (!VT.is128BitVector())
4212 unsigned NumElems = VT.getVectorNumElements();
4217 return isUndefOrEqual(Mask[0], 2) &&
4218 isUndefOrEqual(Mask[1], 3) &&
4219 isUndefOrEqual(Mask[2], 2) &&
4220 isUndefOrEqual(Mask[3], 3);
4223 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4224 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4225 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4226 if (!VT.is128BitVector())
4229 unsigned NumElems = VT.getVectorNumElements();
4231 if (NumElems != 2 && NumElems != 4)
4234 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4235 if (!isUndefOrEqual(Mask[i], i + NumElems))
4238 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4239 if (!isUndefOrEqual(Mask[i], i))
4245 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4246 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4247 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4248 if (!VT.is128BitVector())
4251 unsigned NumElems = VT.getVectorNumElements();
4253 if (NumElems != 2 && NumElems != 4)
4256 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4257 if (!isUndefOrEqual(Mask[i], i))
4260 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4261 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4267 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4268 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4269 /// i. e: If all but one element come from the same vector.
4270 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4271 // TODO: Deal with AVX's VINSERTPS
4272 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4275 unsigned CorrectPosV1 = 0;
4276 unsigned CorrectPosV2 = 0;
4277 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4278 if (Mask[i] == -1) {
4286 else if (Mask[i] == i + 4)
4290 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4291 // We have 3 elements (undefs count as elements from any vector) from one
4292 // vector, and one from another.
4299 // Some special combinations that can be optimized.
4302 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4303 SelectionDAG &DAG) {
4304 MVT VT = SVOp->getSimpleValueType(0);
4307 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4310 ArrayRef<int> Mask = SVOp->getMask();
4312 // These are the special masks that may be optimized.
4313 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4314 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4315 bool MatchEvenMask = true;
4316 bool MatchOddMask = true;
4317 for (int i=0; i<8; ++i) {
4318 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4319 MatchEvenMask = false;
4320 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4321 MatchOddMask = false;
4324 if (!MatchEvenMask && !MatchOddMask)
4327 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4329 SDValue Op0 = SVOp->getOperand(0);
4330 SDValue Op1 = SVOp->getOperand(1);
4332 if (MatchEvenMask) {
4333 // Shift the second operand right to 32 bits.
4334 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4335 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4337 // Shift the first operand left to 32 bits.
4338 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4339 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4341 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4342 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4345 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4346 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4347 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4348 bool HasInt256, bool V2IsSplat = false) {
4350 assert(VT.getSizeInBits() >= 128 &&
4351 "Unsupported vector type for unpckl");
4353 unsigned NumElts = VT.getVectorNumElements();
4354 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4355 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4358 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4359 "Unsupported vector type for unpckh");
4361 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4362 unsigned NumLanes = VT.getSizeInBits()/128;
4363 unsigned NumLaneElts = NumElts/NumLanes;
4365 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4366 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4367 int BitI = Mask[l+i];
4368 int BitI1 = Mask[l+i+1];
4369 if (!isUndefOrEqual(BitI, j))
4372 if (!isUndefOrEqual(BitI1, NumElts))
4375 if (!isUndefOrEqual(BitI1, j + NumElts))
4384 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4385 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4386 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4387 bool HasInt256, bool V2IsSplat = false) {
4388 assert(VT.getSizeInBits() >= 128 &&
4389 "Unsupported vector type for unpckh");
4391 unsigned NumElts = VT.getVectorNumElements();
4392 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4393 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4396 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4397 "Unsupported vector type for unpckh");
4399 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4400 unsigned NumLanes = VT.getSizeInBits()/128;
4401 unsigned NumLaneElts = NumElts/NumLanes;
4403 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4404 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4405 int BitI = Mask[l+i];
4406 int BitI1 = Mask[l+i+1];
4407 if (!isUndefOrEqual(BitI, j))
4410 if (isUndefOrEqual(BitI1, NumElts))
4413 if (!isUndefOrEqual(BitI1, j+NumElts))
4421 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4422 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4424 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4425 unsigned NumElts = VT.getVectorNumElements();
4426 bool Is256BitVec = VT.is256BitVector();
4428 if (VT.is512BitVector())
4430 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4431 "Unsupported vector type for unpckh");
4433 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4434 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4437 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4438 // FIXME: Need a better way to get rid of this, there's no latency difference
4439 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4440 // the former later. We should also remove the "_undef" special mask.
4441 if (NumElts == 4 && Is256BitVec)
4444 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4445 // independently on 128-bit lanes.
4446 unsigned NumLanes = VT.getSizeInBits()/128;
4447 unsigned NumLaneElts = NumElts/NumLanes;
4449 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4450 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4451 int BitI = Mask[l+i];
4452 int BitI1 = Mask[l+i+1];
4454 if (!isUndefOrEqual(BitI, j))
4456 if (!isUndefOrEqual(BitI1, j))
4464 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4465 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4467 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4468 unsigned NumElts = VT.getVectorNumElements();
4470 if (VT.is512BitVector())
4473 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4474 "Unsupported vector type for unpckh");
4476 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4477 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4480 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4481 // independently on 128-bit lanes.
4482 unsigned NumLanes = VT.getSizeInBits()/128;
4483 unsigned NumLaneElts = NumElts/NumLanes;
4485 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4486 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4487 int BitI = Mask[l+i];
4488 int BitI1 = Mask[l+i+1];
4489 if (!isUndefOrEqual(BitI, j))
4491 if (!isUndefOrEqual(BitI1, j))
4498 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4499 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4500 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4501 if (!VT.is512BitVector())
4504 unsigned NumElts = VT.getVectorNumElements();
4505 unsigned HalfSize = NumElts/2;
4506 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4507 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4513 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4521 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4522 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4523 /// MOVSD, and MOVD, i.e. setting the lowest element.
4524 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4525 if (VT.getVectorElementType().getSizeInBits() < 32)
4527 if (!VT.is128BitVector())
4530 unsigned NumElts = VT.getVectorNumElements();
4532 if (!isUndefOrEqual(Mask[0], NumElts))
4535 for (unsigned i = 1; i != NumElts; ++i)
4536 if (!isUndefOrEqual(Mask[i], i))
4542 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4543 /// as permutations between 128-bit chunks or halves. As an example: this
4545 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4546 /// The first half comes from the second half of V1 and the second half from the
4547 /// the second half of V2.
4548 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4549 if (!HasFp256 || !VT.is256BitVector())
4552 // The shuffle result is divided into half A and half B. In total the two
4553 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4554 // B must come from C, D, E or F.
4555 unsigned HalfSize = VT.getVectorNumElements()/2;
4556 bool MatchA = false, MatchB = false;
4558 // Check if A comes from one of C, D, E, F.
4559 for (unsigned Half = 0; Half != 4; ++Half) {
4560 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4566 // Check if B comes from one of C, D, E, F.
4567 for (unsigned Half = 0; Half != 4; ++Half) {
4568 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4574 return MatchA && MatchB;
4577 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4578 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4579 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4580 MVT VT = SVOp->getSimpleValueType(0);
4582 unsigned HalfSize = VT.getVectorNumElements()/2;
4584 unsigned FstHalf = 0, SndHalf = 0;
4585 for (unsigned i = 0; i < HalfSize; ++i) {
4586 if (SVOp->getMaskElt(i) > 0) {
4587 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4591 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4592 if (SVOp->getMaskElt(i) > 0) {
4593 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4598 return (FstHalf | (SndHalf << 4));
4601 // Symetric in-lane mask. Each lane has 4 elements (for imm8)
4602 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4603 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4607 unsigned NumElts = VT.getVectorNumElements();
4609 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4610 for (unsigned i = 0; i != NumElts; ++i) {
4613 Imm8 |= Mask[i] << (i*2);
4618 unsigned LaneSize = 4;
4619 SmallVector<int, 4> MaskVal(LaneSize, -1);
4621 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4622 for (unsigned i = 0; i != LaneSize; ++i) {
4623 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4627 if (MaskVal[i] < 0) {
4628 MaskVal[i] = Mask[i+l] - l;
4629 Imm8 |= MaskVal[i] << (i*2);
4632 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4639 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4640 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4641 /// Note that VPERMIL mask matching is different depending whether theunderlying
4642 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4643 /// to the same elements of the low, but to the higher half of the source.
4644 /// In VPERMILPD the two lanes could be shuffled independently of each other
4645 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4646 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4647 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4648 if (VT.getSizeInBits() < 256 || EltSize < 32)
4650 bool symetricMaskRequired = (EltSize == 32);
4651 unsigned NumElts = VT.getVectorNumElements();
4653 unsigned NumLanes = VT.getSizeInBits()/128;
4654 unsigned LaneSize = NumElts/NumLanes;
4655 // 2 or 4 elements in one lane
4657 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4658 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4659 for (unsigned i = 0; i != LaneSize; ++i) {
4660 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4662 if (symetricMaskRequired) {
4663 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4664 ExpectedMaskVal[i] = Mask[i+l] - l;
4667 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4675 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4676 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4677 /// element of vector 2 and the other elements to come from vector 1 in order.
4678 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4679 bool V2IsSplat = false, bool V2IsUndef = false) {
4680 if (!VT.is128BitVector())
4683 unsigned NumOps = VT.getVectorNumElements();
4684 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4687 if (!isUndefOrEqual(Mask[0], 0))
4690 for (unsigned i = 1; i != NumOps; ++i)
4691 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4692 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4693 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4699 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4700 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4701 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4702 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4703 const X86Subtarget *Subtarget) {
4704 if (!Subtarget->hasSSE3())
4707 unsigned NumElems = VT.getVectorNumElements();
4709 if ((VT.is128BitVector() && NumElems != 4) ||
4710 (VT.is256BitVector() && NumElems != 8) ||
4711 (VT.is512BitVector() && NumElems != 16))
4714 // "i+1" is the value the indexed mask element must have
4715 for (unsigned i = 0; i != NumElems; i += 2)
4716 if (!isUndefOrEqual(Mask[i], i+1) ||
4717 !isUndefOrEqual(Mask[i+1], i+1))
4723 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4724 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4725 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4726 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4727 const X86Subtarget *Subtarget) {
4728 if (!Subtarget->hasSSE3())
4731 unsigned NumElems = VT.getVectorNumElements();
4733 if ((VT.is128BitVector() && NumElems != 4) ||
4734 (VT.is256BitVector() && NumElems != 8) ||
4735 (VT.is512BitVector() && NumElems != 16))
4738 // "i" is the value the indexed mask element must have
4739 for (unsigned i = 0; i != NumElems; i += 2)
4740 if (!isUndefOrEqual(Mask[i], i) ||
4741 !isUndefOrEqual(Mask[i+1], i))
4747 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4748 /// specifies a shuffle of elements that is suitable for input to 256-bit
4749 /// version of MOVDDUP.
4750 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4751 if (!HasFp256 || !VT.is256BitVector())
4754 unsigned NumElts = VT.getVectorNumElements();
4758 for (unsigned i = 0; i != NumElts/2; ++i)
4759 if (!isUndefOrEqual(Mask[i], 0))
4761 for (unsigned i = NumElts/2; i != NumElts; ++i)
4762 if (!isUndefOrEqual(Mask[i], NumElts/2))
4767 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4768 /// specifies a shuffle of elements that is suitable for input to 128-bit
4769 /// version of MOVDDUP.
4770 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4771 if (!VT.is128BitVector())
4774 unsigned e = VT.getVectorNumElements() / 2;
4775 for (unsigned i = 0; i != e; ++i)
4776 if (!isUndefOrEqual(Mask[i], i))
4778 for (unsigned i = 0; i != e; ++i)
4779 if (!isUndefOrEqual(Mask[e+i], i))
4784 /// isVEXTRACTIndex - Return true if the specified
4785 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4786 /// suitable for instruction that extract 128 or 256 bit vectors
4787 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4788 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4789 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4792 // The index should be aligned on a vecWidth-bit boundary.
4794 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4796 MVT VT = N->getSimpleValueType(0);
4797 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4798 bool Result = (Index * ElSize) % vecWidth == 0;
4803 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4804 /// operand specifies a subvector insert that is suitable for input to
4805 /// insertion of 128 or 256-bit subvectors
4806 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4807 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4808 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4810 // The index should be aligned on a vecWidth-bit boundary.
4812 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4814 MVT VT = N->getSimpleValueType(0);
4815 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4816 bool Result = (Index * ElSize) % vecWidth == 0;
4821 bool X86::isVINSERT128Index(SDNode *N) {
4822 return isVINSERTIndex(N, 128);
4825 bool X86::isVINSERT256Index(SDNode *N) {
4826 return isVINSERTIndex(N, 256);
4829 bool X86::isVEXTRACT128Index(SDNode *N) {
4830 return isVEXTRACTIndex(N, 128);
4833 bool X86::isVEXTRACT256Index(SDNode *N) {
4834 return isVEXTRACTIndex(N, 256);
4837 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4838 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4839 /// Handles 128-bit and 256-bit.
4840 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4841 MVT VT = N->getSimpleValueType(0);
4843 assert((VT.getSizeInBits() >= 128) &&
4844 "Unsupported vector type for PSHUF/SHUFP");
4846 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4847 // independently on 128-bit lanes.
4848 unsigned NumElts = VT.getVectorNumElements();
4849 unsigned NumLanes = VT.getSizeInBits()/128;
4850 unsigned NumLaneElts = NumElts/NumLanes;
4852 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4853 "Only supports 2, 4 or 8 elements per lane");
4855 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4857 for (unsigned i = 0; i != NumElts; ++i) {
4858 int Elt = N->getMaskElt(i);
4859 if (Elt < 0) continue;
4860 Elt &= NumLaneElts - 1;
4861 unsigned ShAmt = (i << Shift) % 8;
4862 Mask |= Elt << ShAmt;
4868 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4869 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4870 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4871 MVT VT = N->getSimpleValueType(0);
4873 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4874 "Unsupported vector type for PSHUFHW");
4876 unsigned NumElts = VT.getVectorNumElements();
4879 for (unsigned l = 0; l != NumElts; l += 8) {
4880 // 8 nodes per lane, but we only care about the last 4.
4881 for (unsigned i = 0; i < 4; ++i) {
4882 int Elt = N->getMaskElt(l+i+4);
4883 if (Elt < 0) continue;
4884 Elt &= 0x3; // only 2-bits.
4885 Mask |= Elt << (i * 2);
4892 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4893 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4894 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4895 MVT VT = N->getSimpleValueType(0);
4897 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4898 "Unsupported vector type for PSHUFHW");
4900 unsigned NumElts = VT.getVectorNumElements();
4903 for (unsigned l = 0; l != NumElts; l += 8) {
4904 // 8 nodes per lane, but we only care about the first 4.
4905 for (unsigned i = 0; i < 4; ++i) {
4906 int Elt = N->getMaskElt(l+i);
4907 if (Elt < 0) continue;
4908 Elt &= 0x3; // only 2-bits
4909 Mask |= Elt << (i * 2);
4916 /// \brief Return the appropriate immediate to shuffle the specified
4917 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4918 /// VALIGN (if Interlane is true) instructions.
4919 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4921 MVT VT = SVOp->getSimpleValueType(0);
4922 unsigned EltSize = InterLane ? 1 :
4923 VT.getVectorElementType().getSizeInBits() >> 3;
4925 unsigned NumElts = VT.getVectorNumElements();
4926 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4927 unsigned NumLaneElts = NumElts/NumLanes;
4931 for (i = 0; i != NumElts; ++i) {
4932 Val = SVOp->getMaskElt(i);
4936 if (Val >= (int)NumElts)
4937 Val -= NumElts - NumLaneElts;
4939 assert(Val - i > 0 && "PALIGNR imm should be positive");
4940 return (Val - i) * EltSize;
4943 /// \brief Return the appropriate immediate to shuffle the specified
4944 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4945 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4946 return getShuffleAlignrImmediate(SVOp, false);
4949 /// \brief Return the appropriate immediate to shuffle the specified
4950 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4951 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4952 return getShuffleAlignrImmediate(SVOp, true);
4956 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4957 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4958 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4959 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4962 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4964 MVT VecVT = N->getOperand(0).getSimpleValueType();
4965 MVT ElVT = VecVT.getVectorElementType();
4967 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4968 return Index / NumElemsPerChunk;
4971 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4972 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4973 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4974 llvm_unreachable("Illegal insert subvector for VINSERT");
4977 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4979 MVT VecVT = N->getSimpleValueType(0);
4980 MVT ElVT = VecVT.getVectorElementType();
4982 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4983 return Index / NumElemsPerChunk;
4986 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4987 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4988 /// and VINSERTI128 instructions.
4989 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4990 return getExtractVEXTRACTImmediate(N, 128);
4993 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4994 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4995 /// and VINSERTI64x4 instructions.
4996 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4997 return getExtractVEXTRACTImmediate(N, 256);
5000 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5001 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5002 /// and VINSERTI128 instructions.
5003 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5004 return getInsertVINSERTImmediate(N, 128);
5007 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5008 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5009 /// and VINSERTI64x4 instructions.
5010 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5011 return getInsertVINSERTImmediate(N, 256);
5014 /// isZero - Returns true if Elt is a constant integer zero
5015 static bool isZero(SDValue V) {
5016 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5017 return C && C->isNullValue();
5020 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5022 bool X86::isZeroNode(SDValue Elt) {
5025 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5026 return CFP->getValueAPF().isPosZero();
5030 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5031 /// match movhlps. The lower half elements should come from upper half of
5032 /// V1 (and in order), and the upper half elements should come from the upper
5033 /// half of V2 (and in order).
5034 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5035 if (!VT.is128BitVector())
5037 if (VT.getVectorNumElements() != 4)
5039 for (unsigned i = 0, e = 2; i != e; ++i)
5040 if (!isUndefOrEqual(Mask[i], i+2))
5042 for (unsigned i = 2; i != 4; ++i)
5043 if (!isUndefOrEqual(Mask[i], i+4))
5048 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5049 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5051 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5052 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5054 N = N->getOperand(0).getNode();
5055 if (!ISD::isNON_EXTLoad(N))
5058 *LD = cast<LoadSDNode>(N);
5062 // Test whether the given value is a vector value which will be legalized
5064 static bool WillBeConstantPoolLoad(SDNode *N) {
5065 if (N->getOpcode() != ISD::BUILD_VECTOR)
5068 // Check for any non-constant elements.
5069 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5070 switch (N->getOperand(i).getNode()->getOpcode()) {
5072 case ISD::ConstantFP:
5079 // Vectors of all-zeros and all-ones are materialized with special
5080 // instructions rather than being loaded.
5081 return !ISD::isBuildVectorAllZeros(N) &&
5082 !ISD::isBuildVectorAllOnes(N);
5085 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5086 /// match movlp{s|d}. The lower half elements should come from lower half of
5087 /// V1 (and in order), and the upper half elements should come from the upper
5088 /// half of V2 (and in order). And since V1 will become the source of the
5089 /// MOVLP, it must be either a vector load or a scalar load to vector.
5090 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5091 ArrayRef<int> Mask, MVT VT) {
5092 if (!VT.is128BitVector())
5095 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5097 // Is V2 is a vector load, don't do this transformation. We will try to use
5098 // load folding shufps op.
5099 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5102 unsigned NumElems = VT.getVectorNumElements();
5104 if (NumElems != 2 && NumElems != 4)
5106 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5107 if (!isUndefOrEqual(Mask[i], i))
5109 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5110 if (!isUndefOrEqual(Mask[i], i+NumElems))
5115 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5116 /// to an zero vector.
5117 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5118 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5119 SDValue V1 = N->getOperand(0);
5120 SDValue V2 = N->getOperand(1);
5121 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5122 for (unsigned i = 0; i != NumElems; ++i) {
5123 int Idx = N->getMaskElt(i);
5124 if (Idx >= (int)NumElems) {
5125 unsigned Opc = V2.getOpcode();
5126 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5128 if (Opc != ISD::BUILD_VECTOR ||
5129 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5131 } else if (Idx >= 0) {
5132 unsigned Opc = V1.getOpcode();
5133 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5135 if (Opc != ISD::BUILD_VECTOR ||
5136 !X86::isZeroNode(V1.getOperand(Idx)))
5143 /// getZeroVector - Returns a vector of specified type with all zero elements.
5145 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5146 SelectionDAG &DAG, SDLoc dl) {
5147 assert(VT.isVector() && "Expected a vector type");
5149 // Always build SSE zero vectors as <4 x i32> bitcasted
5150 // to their dest type. This ensures they get CSE'd.
5152 if (VT.is128BitVector()) { // SSE
5153 if (Subtarget->hasSSE2()) { // SSE2
5154 SDValue Cst = DAG.getConstant(0, MVT::i32);
5155 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5157 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5158 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5160 } else if (VT.is256BitVector()) { // AVX
5161 if (Subtarget->hasInt256()) { // AVX2
5162 SDValue Cst = DAG.getConstant(0, MVT::i32);
5163 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5164 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5166 // 256-bit logic and arithmetic instructions in AVX are all
5167 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5168 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5169 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5170 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5172 } else if (VT.is512BitVector()) { // AVX-512
5173 SDValue Cst = DAG.getConstant(0, MVT::i32);
5174 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5175 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5176 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5177 } else if (VT.getScalarType() == MVT::i1) {
5178 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5179 SDValue Cst = DAG.getConstant(0, MVT::i1);
5180 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5181 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5183 llvm_unreachable("Unexpected vector type");
5185 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5188 /// getOnesVector - Returns a vector of specified type with all bits set.
5189 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5190 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5191 /// Then bitcast to their original type, ensuring they get CSE'd.
5192 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5194 assert(VT.isVector() && "Expected a vector type");
5196 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5198 if (VT.is256BitVector()) {
5199 if (HasInt256) { // AVX2
5200 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5201 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5203 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5204 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5206 } else if (VT.is128BitVector()) {
5207 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5209 llvm_unreachable("Unexpected vector type");
5211 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5214 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5215 /// that point to V2 points to its first element.
5216 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5217 for (unsigned i = 0; i != NumElems; ++i) {
5218 if (Mask[i] > (int)NumElems) {
5224 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5225 /// operation of specified width.
5226 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5228 unsigned NumElems = VT.getVectorNumElements();
5229 SmallVector<int, 8> Mask;
5230 Mask.push_back(NumElems);
5231 for (unsigned i = 1; i != NumElems; ++i)
5233 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5236 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5237 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5239 unsigned NumElems = VT.getVectorNumElements();
5240 SmallVector<int, 8> Mask;
5241 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5243 Mask.push_back(i + NumElems);
5245 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5248 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5249 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5251 unsigned NumElems = VT.getVectorNumElements();
5252 SmallVector<int, 8> Mask;
5253 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5254 Mask.push_back(i + Half);
5255 Mask.push_back(i + NumElems + Half);
5257 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5260 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5261 // a generic shuffle instruction because the target has no such instructions.
5262 // Generate shuffles which repeat i16 and i8 several times until they can be
5263 // represented by v4f32 and then be manipulated by target suported shuffles.
5264 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5265 MVT VT = V.getSimpleValueType();
5266 int NumElems = VT.getVectorNumElements();
5269 while (NumElems > 4) {
5270 if (EltNo < NumElems/2) {
5271 V = getUnpackl(DAG, dl, VT, V, V);
5273 V = getUnpackh(DAG, dl, VT, V, V);
5274 EltNo -= NumElems/2;
5281 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5282 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5283 MVT VT = V.getSimpleValueType();
5286 if (VT.is128BitVector()) {
5287 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5288 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5289 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5291 } else if (VT.is256BitVector()) {
5292 // To use VPERMILPS to splat scalars, the second half of indicies must
5293 // refer to the higher part, which is a duplication of the lower one,
5294 // because VPERMILPS can only handle in-lane permutations.
5295 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5296 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5298 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5299 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5302 llvm_unreachable("Vector size not supported");
5304 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5307 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5308 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5309 MVT SrcVT = SV->getSimpleValueType(0);
5310 SDValue V1 = SV->getOperand(0);
5313 int EltNo = SV->getSplatIndex();
5314 int NumElems = SrcVT.getVectorNumElements();
5315 bool Is256BitVec = SrcVT.is256BitVector();
5317 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5318 "Unknown how to promote splat for type");
5320 // Extract the 128-bit part containing the splat element and update
5321 // the splat element index when it refers to the higher register.
5323 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5324 if (EltNo >= NumElems/2)
5325 EltNo -= NumElems/2;
5328 // All i16 and i8 vector types can't be used directly by a generic shuffle
5329 // instruction because the target has no such instruction. Generate shuffles
5330 // which repeat i16 and i8 several times until they fit in i32, and then can
5331 // be manipulated by target suported shuffles.
5332 MVT EltVT = SrcVT.getVectorElementType();
5333 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5334 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5336 // Recreate the 256-bit vector and place the same 128-bit vector
5337 // into the low and high part. This is necessary because we want
5338 // to use VPERM* to shuffle the vectors
5340 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5343 return getLegalSplat(DAG, V1, EltNo);
5346 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5347 /// vector of zero or undef vector. This produces a shuffle where the low
5348 /// element of V2 is swizzled into the zero/undef vector, landing at element
5349 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5350 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5352 const X86Subtarget *Subtarget,
5353 SelectionDAG &DAG) {
5354 MVT VT = V2.getSimpleValueType();
5356 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5357 unsigned NumElems = VT.getVectorNumElements();
5358 SmallVector<int, 16> MaskVec;
5359 for (unsigned i = 0; i != NumElems; ++i)
5360 // If this is the insertion idx, put the low elt of V2 here.
5361 MaskVec.push_back(i == Idx ? NumElems : i);
5362 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5365 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5366 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5367 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5368 /// shuffles which use a single input multiple times, and in those cases it will
5369 /// adjust the mask to only have indices within that single input.
5370 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5371 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5372 unsigned NumElems = VT.getVectorNumElements();
5376 bool IsFakeUnary = false;
5377 switch(N->getOpcode()) {
5378 case X86ISD::BLENDI:
5379 ImmN = N->getOperand(N->getNumOperands()-1);
5380 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5383 ImmN = N->getOperand(N->getNumOperands()-1);
5384 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5385 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5387 case X86ISD::UNPCKH:
5388 DecodeUNPCKHMask(VT, Mask);
5389 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5391 case X86ISD::UNPCKL:
5392 DecodeUNPCKLMask(VT, Mask);
5393 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5395 case X86ISD::MOVHLPS:
5396 DecodeMOVHLPSMask(NumElems, Mask);
5397 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5399 case X86ISD::MOVLHPS:
5400 DecodeMOVLHPSMask(NumElems, Mask);
5401 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5403 case X86ISD::PALIGNR:
5404 ImmN = N->getOperand(N->getNumOperands()-1);
5405 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5407 case X86ISD::PSHUFD:
5408 case X86ISD::VPERMILPI:
5409 ImmN = N->getOperand(N->getNumOperands()-1);
5410 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5413 case X86ISD::PSHUFHW:
5414 ImmN = N->getOperand(N->getNumOperands()-1);
5415 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5418 case X86ISD::PSHUFLW:
5419 ImmN = N->getOperand(N->getNumOperands()-1);
5420 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5423 case X86ISD::PSHUFB: {
5425 SDValue MaskNode = N->getOperand(1);
5426 while (MaskNode->getOpcode() == ISD::BITCAST)
5427 MaskNode = MaskNode->getOperand(0);
5429 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5430 // If we have a build-vector, then things are easy.
5431 EVT VT = MaskNode.getValueType();
5432 assert(VT.isVector() &&
5433 "Can't produce a non-vector with a build_vector!");
5434 if (!VT.isInteger())
5437 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5439 SmallVector<uint64_t, 32> RawMask;
5440 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5441 SDValue Op = MaskNode->getOperand(i);
5442 if (Op->getOpcode() == ISD::UNDEF) {
5443 RawMask.push_back((uint64_t)SM_SentinelUndef);
5446 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5449 APInt MaskElement = CN->getAPIntValue();
5451 // We now have to decode the element which could be any integer size and
5452 // extract each byte of it.
5453 for (int j = 0; j < NumBytesPerElement; ++j) {
5454 // Note that this is x86 and so always little endian: the low byte is
5455 // the first byte of the mask.
5456 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5457 MaskElement = MaskElement.lshr(8);
5460 DecodePSHUFBMask(RawMask, Mask);
5464 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5468 SDValue Ptr = MaskLoad->getBasePtr();
5469 if (Ptr->getOpcode() == X86ISD::Wrapper)
5470 Ptr = Ptr->getOperand(0);
5472 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5473 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5476 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5477 DecodePSHUFBMask(C, Mask);
5485 case X86ISD::VPERMI:
5486 ImmN = N->getOperand(N->getNumOperands()-1);
5487 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 case X86ISD::MOVSD: {
5492 // The index 0 always comes from the first element of the second source,
5493 // this is why MOVSS and MOVSD are used in the first place. The other
5494 // elements come from the other positions of the first source vector
5495 Mask.push_back(NumElems);
5496 for (unsigned i = 1; i != NumElems; ++i) {
5501 case X86ISD::VPERM2X128:
5502 ImmN = N->getOperand(N->getNumOperands()-1);
5503 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5504 if (Mask.empty()) return false;
5506 case X86ISD::MOVSLDUP:
5507 DecodeMOVSLDUPMask(VT, Mask);
5509 case X86ISD::MOVSHDUP:
5510 DecodeMOVSHDUPMask(VT, Mask);
5512 case X86ISD::MOVDDUP:
5513 case X86ISD::MOVLHPD:
5514 case X86ISD::MOVLPD:
5515 case X86ISD::MOVLPS:
5516 // Not yet implemented
5518 default: llvm_unreachable("unknown target shuffle node");
5521 // If we have a fake unary shuffle, the shuffle mask is spread across two
5522 // inputs that are actually the same node. Re-map the mask to always point
5523 // into the first input.
5526 if (M >= (int)Mask.size())
5532 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5533 /// element of the result of the vector shuffle.
5534 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5537 return SDValue(); // Limit search depth.
5539 SDValue V = SDValue(N, 0);
5540 EVT VT = V.getValueType();
5541 unsigned Opcode = V.getOpcode();
5543 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5544 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5545 int Elt = SV->getMaskElt(Index);
5548 return DAG.getUNDEF(VT.getVectorElementType());
5550 unsigned NumElems = VT.getVectorNumElements();
5551 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5552 : SV->getOperand(1);
5553 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5556 // Recurse into target specific vector shuffles to find scalars.
5557 if (isTargetShuffle(Opcode)) {
5558 MVT ShufVT = V.getSimpleValueType();
5559 unsigned NumElems = ShufVT.getVectorNumElements();
5560 SmallVector<int, 16> ShuffleMask;
5563 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5566 int Elt = ShuffleMask[Index];
5568 return DAG.getUNDEF(ShufVT.getVectorElementType());
5570 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5572 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5576 // Actual nodes that may contain scalar elements
5577 if (Opcode == ISD::BITCAST) {
5578 V = V.getOperand(0);
5579 EVT SrcVT = V.getValueType();
5580 unsigned NumElems = VT.getVectorNumElements();
5582 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5586 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5587 return (Index == 0) ? V.getOperand(0)
5588 : DAG.getUNDEF(VT.getVectorElementType());
5590 if (V.getOpcode() == ISD::BUILD_VECTOR)
5591 return V.getOperand(Index);
5596 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5597 /// shuffle operation which come from a consecutively from a zero. The
5598 /// search can start in two different directions, from left or right.
5599 /// We count undefs as zeros until PreferredNum is reached.
5600 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5601 unsigned NumElems, bool ZerosFromLeft,
5603 unsigned PreferredNum = -1U) {
5604 unsigned NumZeros = 0;
5605 for (unsigned i = 0; i != NumElems; ++i) {
5606 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5607 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5611 if (X86::isZeroNode(Elt))
5613 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5614 NumZeros = std::min(NumZeros + 1, PreferredNum);
5622 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5623 /// correspond consecutively to elements from one of the vector operands,
5624 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5626 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5627 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5628 unsigned NumElems, unsigned &OpNum) {
5629 bool SeenV1 = false;
5630 bool SeenV2 = false;
5632 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5633 int Idx = SVOp->getMaskElt(i);
5634 // Ignore undef indicies
5638 if (Idx < (int)NumElems)
5643 // Only accept consecutive elements from the same vector
5644 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5648 OpNum = SeenV1 ? 0 : 1;
5652 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5653 /// logical left shift of a vector.
5654 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5655 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5657 SVOp->getSimpleValueType(0).getVectorNumElements();
5658 unsigned NumZeros = getNumOfConsecutiveZeros(
5659 SVOp, NumElems, false /* check zeros from right */, DAG,
5660 SVOp->getMaskElt(0));
5666 // Considering the elements in the mask that are not consecutive zeros,
5667 // check if they consecutively come from only one of the source vectors.
5669 // V1 = {X, A, B, C} 0
5671 // vector_shuffle V1, V2 <1, 2, 3, X>
5673 if (!isShuffleMaskConsecutive(SVOp,
5674 0, // Mask Start Index
5675 NumElems-NumZeros, // Mask End Index(exclusive)
5676 NumZeros, // Where to start looking in the src vector
5677 NumElems, // Number of elements in vector
5678 OpSrc)) // Which source operand ?
5683 ShVal = SVOp->getOperand(OpSrc);
5687 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5688 /// logical left shift of a vector.
5689 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5690 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5692 SVOp->getSimpleValueType(0).getVectorNumElements();
5693 unsigned NumZeros = getNumOfConsecutiveZeros(
5694 SVOp, NumElems, true /* check zeros from left */, DAG,
5695 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5701 // Considering the elements in the mask that are not consecutive zeros,
5702 // check if they consecutively come from only one of the source vectors.
5704 // 0 { A, B, X, X } = V2
5706 // vector_shuffle V1, V2 <X, X, 4, 5>
5708 if (!isShuffleMaskConsecutive(SVOp,
5709 NumZeros, // Mask Start Index
5710 NumElems, // Mask End Index(exclusive)
5711 0, // Where to start looking in the src vector
5712 NumElems, // Number of elements in vector
5713 OpSrc)) // Which source operand ?
5718 ShVal = SVOp->getOperand(OpSrc);
5722 /// isVectorShift - Returns true if the shuffle can be implemented as a
5723 /// logical left or right shift of a vector.
5724 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5725 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5726 // Although the logic below support any bitwidth size, there are no
5727 // shift instructions which handle more than 128-bit vectors.
5728 if (!SVOp->getSimpleValueType(0).is128BitVector())
5731 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5732 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5738 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5740 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5741 unsigned NumNonZero, unsigned NumZero,
5743 const X86Subtarget* Subtarget,
5744 const TargetLowering &TLI) {
5751 for (unsigned i = 0; i < 16; ++i) {
5752 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5753 if (ThisIsNonZero && First) {
5755 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5757 V = DAG.getUNDEF(MVT::v8i16);
5762 SDValue ThisElt, LastElt;
5763 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5764 if (LastIsNonZero) {
5765 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5766 MVT::i16, Op.getOperand(i-1));
5768 if (ThisIsNonZero) {
5769 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5770 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5771 ThisElt, DAG.getConstant(8, MVT::i8));
5773 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5777 if (ThisElt.getNode())
5778 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5779 DAG.getIntPtrConstant(i/2));
5783 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5786 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5788 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5789 unsigned NumNonZero, unsigned NumZero,
5791 const X86Subtarget* Subtarget,
5792 const TargetLowering &TLI) {
5799 for (unsigned i = 0; i < 8; ++i) {
5800 bool isNonZero = (NonZeros & (1 << i)) != 0;
5804 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5806 V = DAG.getUNDEF(MVT::v8i16);
5809 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5810 MVT::v8i16, V, Op.getOperand(i),
5811 DAG.getIntPtrConstant(i));
5818 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5819 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5820 const X86Subtarget *Subtarget,
5821 const TargetLowering &TLI) {
5822 // Find all zeroable elements.
5824 for (int i=0; i < 4; ++i) {
5825 SDValue Elt = Op->getOperand(i);
5826 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5828 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5829 [](bool M) { return !M; }) > 1 &&
5830 "We expect at least two non-zero elements!");
5832 // We only know how to deal with build_vector nodes where elements are either
5833 // zeroable or extract_vector_elt with constant index.
5834 SDValue FirstNonZero;
5835 unsigned FirstNonZeroIdx;
5836 for (unsigned i=0; i < 4; ++i) {
5839 SDValue Elt = Op->getOperand(i);
5840 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5841 !isa<ConstantSDNode>(Elt.getOperand(1)))
5843 // Make sure that this node is extracting from a 128-bit vector.
5844 MVT VT = Elt.getOperand(0).getSimpleValueType();
5845 if (!VT.is128BitVector())
5847 if (!FirstNonZero.getNode()) {
5849 FirstNonZeroIdx = i;
5853 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5854 SDValue V1 = FirstNonZero.getOperand(0);
5855 MVT VT = V1.getSimpleValueType();
5857 // See if this build_vector can be lowered as a blend with zero.
5859 unsigned EltMaskIdx, EltIdx;
5861 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5862 if (Zeroable[EltIdx]) {
5863 // The zero vector will be on the right hand side.
5864 Mask[EltIdx] = EltIdx+4;
5868 Elt = Op->getOperand(EltIdx);
5869 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5870 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5871 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5873 Mask[EltIdx] = EltIdx;
5877 // Let the shuffle legalizer deal with blend operations.
5878 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5879 if (V1.getSimpleValueType() != VT)
5880 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5881 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5884 // See if we can lower this build_vector to a INSERTPS.
5885 if (!Subtarget->hasSSE41())
5888 SDValue V2 = Elt.getOperand(0);
5889 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5892 bool CanFold = true;
5893 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5897 SDValue Current = Op->getOperand(i);
5898 SDValue SrcVector = Current->getOperand(0);
5901 CanFold = SrcVector == V1 &&
5902 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5908 assert(V1.getNode() && "Expected at least two non-zero elements!");
5909 if (V1.getSimpleValueType() != MVT::v4f32)
5910 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5911 if (V2.getSimpleValueType() != MVT::v4f32)
5912 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5914 // Ok, we can emit an INSERTPS instruction.
5916 for (int i = 0; i < 4; ++i)
5920 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5921 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5922 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5923 DAG.getIntPtrConstant(InsertPSMask));
5924 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5927 /// getVShift - Return a vector logical shift node.
5929 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5930 unsigned NumBits, SelectionDAG &DAG,
5931 const TargetLowering &TLI, SDLoc dl) {
5932 assert(VT.is128BitVector() && "Unknown type for VShift");
5933 EVT ShVT = MVT::v2i64;
5934 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5935 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5936 return DAG.getNode(ISD::BITCAST, dl, VT,
5937 DAG.getNode(Opc, dl, ShVT, SrcOp,
5938 DAG.getConstant(NumBits,
5939 TLI.getScalarShiftAmountTy(SrcOp.getValueType()))));
5943 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5945 // Check if the scalar load can be widened into a vector load. And if
5946 // the address is "base + cst" see if the cst can be "absorbed" into
5947 // the shuffle mask.
5948 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5949 SDValue Ptr = LD->getBasePtr();
5950 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5952 EVT PVT = LD->getValueType(0);
5953 if (PVT != MVT::i32 && PVT != MVT::f32)
5958 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5959 FI = FINode->getIndex();
5961 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5962 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5963 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5964 Offset = Ptr.getConstantOperandVal(1);
5965 Ptr = Ptr.getOperand(0);
5970 // FIXME: 256-bit vector instructions don't require a strict alignment,
5971 // improve this code to support it better.
5972 unsigned RequiredAlign = VT.getSizeInBits()/8;
5973 SDValue Chain = LD->getChain();
5974 // Make sure the stack object alignment is at least 16 or 32.
5975 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5976 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5977 if (MFI->isFixedObjectIndex(FI)) {
5978 // Can't change the alignment. FIXME: It's possible to compute
5979 // the exact stack offset and reference FI + adjust offset instead.
5980 // If someone *really* cares about this. That's the way to implement it.
5983 MFI->setObjectAlignment(FI, RequiredAlign);
5987 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5988 // Ptr + (Offset & ~15).
5991 if ((Offset % RequiredAlign) & 3)
5993 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5995 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5996 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5998 int EltNo = (Offset - StartOffset) >> 2;
5999 unsigned NumElems = VT.getVectorNumElements();
6001 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6002 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6003 LD->getPointerInfo().getWithOffset(StartOffset),
6004 false, false, false, 0);
6006 SmallVector<int, 8> Mask;
6007 for (unsigned i = 0; i != NumElems; ++i)
6008 Mask.push_back(EltNo);
6010 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6016 /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
6017 /// vector of type 'VT', see if the elements can be replaced by a single large
6018 /// load which has the same value as a build_vector whose operands are 'elts'.
6020 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6022 /// FIXME: we'd also like to handle the case where the last elements are zero
6023 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6024 /// There's even a handy isZeroNode for that purpose.
6025 static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
6026 SDLoc &DL, SelectionDAG &DAG,
6027 bool isAfterLegalize) {
6028 EVT EltVT = VT.getVectorElementType();
6029 unsigned NumElems = Elts.size();
6031 LoadSDNode *LDBase = nullptr;
6032 unsigned LastLoadedElt = -1U;
6034 // For each element in the initializer, see if we've found a load or an undef.
6035 // If we don't find an initial load element, or later load elements are
6036 // non-consecutive, bail out.
6037 for (unsigned i = 0; i < NumElems; ++i) {
6038 SDValue Elt = Elts[i];
6040 if (!Elt.getNode() ||
6041 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6044 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6046 LDBase = cast<LoadSDNode>(Elt.getNode());
6050 if (Elt.getOpcode() == ISD::UNDEF)
6053 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6054 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
6059 // If we have found an entire vector of loads and undefs, then return a large
6060 // load of the entire vector width starting at the base pointer. If we found
6061 // consecutive loads for the low half, generate a vzext_load node.
6062 if (LastLoadedElt == NumElems - 1) {
6064 if (isAfterLegalize &&
6065 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6068 SDValue NewLd = SDValue();
6070 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6071 LDBase->getPointerInfo(), LDBase->isVolatile(),
6072 LDBase->isNonTemporal(), LDBase->isInvariant(),
6073 LDBase->getAlignment());
6075 if (LDBase->hasAnyUseOfValue(1)) {
6076 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6078 SDValue(NewLd.getNode(), 1));
6079 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6080 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6081 SDValue(NewLd.getNode(), 1));
6087 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6088 //of a v4i32 / v4f32. It's probably worth generalizing.
6089 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6090 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6091 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6092 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6094 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6095 LDBase->getPointerInfo(),
6096 LDBase->getAlignment(),
6097 false/*isVolatile*/, true/*ReadMem*/,
6100 // Make sure the newly-created LOAD is in the same position as LDBase in
6101 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6102 // update uses of LDBase's output chain to use the TokenFactor.
6103 if (LDBase->hasAnyUseOfValue(1)) {
6104 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6105 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6106 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6107 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6108 SDValue(ResNode.getNode(), 1));
6111 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6116 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6117 /// to generate a splat value for the following cases:
6118 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6119 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6120 /// a scalar load, or a constant.
6121 /// The VBROADCAST node is returned when a pattern is found,
6122 /// or SDValue() otherwise.
6123 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6124 SelectionDAG &DAG) {
6125 // VBROADCAST requires AVX.
6126 // TODO: Splats could be generated for non-AVX CPUs using SSE
6127 // instructions, but there's less potential gain for only 128-bit vectors.
6128 if (!Subtarget->hasAVX())
6131 MVT VT = Op.getSimpleValueType();
6134 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6135 "Unsupported vector type for broadcast.");
6140 switch (Op.getOpcode()) {
6142 // Unknown pattern found.
6145 case ISD::BUILD_VECTOR: {
6146 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6147 BitVector UndefElements;
6148 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6150 // We need a splat of a single value to use broadcast, and it doesn't
6151 // make any sense if the value is only in one element of the vector.
6152 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6156 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6157 Ld.getOpcode() == ISD::ConstantFP);
6159 // Make sure that all of the users of a non-constant load are from the
6160 // BUILD_VECTOR node.
6161 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6166 case ISD::VECTOR_SHUFFLE: {
6167 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6169 // Shuffles must have a splat mask where the first element is
6171 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6174 SDValue Sc = Op.getOperand(0);
6175 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6176 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6178 if (!Subtarget->hasInt256())
6181 // Use the register form of the broadcast instruction available on AVX2.
6182 if (VT.getSizeInBits() >= 256)
6183 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6184 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6187 Ld = Sc.getOperand(0);
6188 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6189 Ld.getOpcode() == ISD::ConstantFP);
6191 // The scalar_to_vector node and the suspected
6192 // load node must have exactly one user.
6193 // Constants may have multiple users.
6195 // AVX-512 has register version of the broadcast
6196 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6197 Ld.getValueType().getSizeInBits() >= 32;
6198 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6205 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6206 bool IsGE256 = (VT.getSizeInBits() >= 256);
6208 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6209 // instruction to save 8 or more bytes of constant pool data.
6210 // TODO: If multiple splats are generated to load the same constant,
6211 // it may be detrimental to overall size. There needs to be a way to detect
6212 // that condition to know if this is truly a size win.
6213 const Function *F = DAG.getMachineFunction().getFunction();
6214 bool OptForSize = F->getAttributes().
6215 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
6217 // Handle broadcasting a single constant scalar from the constant pool
6219 // On Sandybridge (no AVX2), it is still better to load a constant vector
6220 // from the constant pool and not to broadcast it from a scalar.
6221 // But override that restriction when optimizing for size.
6222 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6223 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6224 EVT CVT = Ld.getValueType();
6225 assert(!CVT.isVector() && "Must not broadcast a vector type");
6227 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6228 // For size optimization, also splat v2f64 and v2i64, and for size opt
6229 // with AVX2, also splat i8 and i16.
6230 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6231 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6232 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6233 const Constant *C = nullptr;
6234 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6235 C = CI->getConstantIntValue();
6236 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6237 C = CF->getConstantFPValue();
6239 assert(C && "Invalid constant type");
6241 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6242 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6243 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6244 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6245 MachinePointerInfo::getConstantPool(),
6246 false, false, false, Alignment);
6248 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6252 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6254 // Handle AVX2 in-register broadcasts.
6255 if (!IsLoad && Subtarget->hasInt256() &&
6256 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6257 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6259 // The scalar source must be a normal load.
6263 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6264 (Subtarget->hasVLX() && ScalarSize == 64))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6268 // double since there is no vbroadcastsd xmm
6269 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6270 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6271 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6274 // Unsupported broadcast.
6278 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6279 /// underlying vector and index.
6281 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6283 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6285 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6286 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6289 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6291 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6293 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6294 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6297 // In this case the vector is the extract_subvector expression and the index
6298 // is 2, as specified by the shuffle.
6299 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6300 SDValue ShuffleVec = SVOp->getOperand(0);
6301 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6302 assert(ShuffleVecVT.getVectorElementType() ==
6303 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6305 int ShuffleIdx = SVOp->getMaskElt(Idx);
6306 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6307 ExtractedFromVec = ShuffleVec;
6313 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6314 MVT VT = Op.getSimpleValueType();
6316 // Skip if insert_vec_elt is not supported.
6317 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6318 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6322 unsigned NumElems = Op.getNumOperands();
6326 SmallVector<unsigned, 4> InsertIndices;
6327 SmallVector<int, 8> Mask(NumElems, -1);
6329 for (unsigned i = 0; i != NumElems; ++i) {
6330 unsigned Opc = Op.getOperand(i).getOpcode();
6332 if (Opc == ISD::UNDEF)
6335 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6336 // Quit if more than 1 elements need inserting.
6337 if (InsertIndices.size() > 1)
6340 InsertIndices.push_back(i);
6344 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6345 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6346 // Quit if non-constant index.
6347 if (!isa<ConstantSDNode>(ExtIdx))
6349 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6351 // Quit if extracted from vector of different type.
6352 if (ExtractedFromVec.getValueType() != VT)
6355 if (!VecIn1.getNode())
6356 VecIn1 = ExtractedFromVec;
6357 else if (VecIn1 != ExtractedFromVec) {
6358 if (!VecIn2.getNode())
6359 VecIn2 = ExtractedFromVec;
6360 else if (VecIn2 != ExtractedFromVec)
6361 // Quit if more than 2 vectors to shuffle
6365 if (ExtractedFromVec == VecIn1)
6367 else if (ExtractedFromVec == VecIn2)
6368 Mask[i] = Idx + NumElems;
6371 if (!VecIn1.getNode())
6374 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6375 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6376 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6377 unsigned Idx = InsertIndices[i];
6378 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6379 DAG.getIntPtrConstant(Idx));
6385 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6387 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6389 MVT VT = Op.getSimpleValueType();
6390 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6391 "Unexpected type in LowerBUILD_VECTORvXi1!");
6394 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6395 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6396 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6397 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6400 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6401 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6402 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6403 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6406 bool AllContants = true;
6407 uint64_t Immediate = 0;
6408 int NonConstIdx = -1;
6409 bool IsSplat = true;
6410 unsigned NumNonConsts = 0;
6411 unsigned NumConsts = 0;
6412 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6413 SDValue In = Op.getOperand(idx);
6414 if (In.getOpcode() == ISD::UNDEF)
6416 if (!isa<ConstantSDNode>(In)) {
6417 AllContants = false;
6422 if (cast<ConstantSDNode>(In)->getZExtValue())
6423 Immediate |= (1ULL << idx);
6425 if (In != Op.getOperand(0))
6430 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6431 DAG.getConstant(Immediate, MVT::i16));
6432 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6433 DAG.getIntPtrConstant(0));
6436 if (NumNonConsts == 1 && NonConstIdx != 0) {
6439 SDValue VecAsImm = DAG.getConstant(Immediate,
6440 MVT::getIntegerVT(VT.getSizeInBits()));
6441 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6444 DstVec = DAG.getUNDEF(VT);
6445 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6446 Op.getOperand(NonConstIdx),
6447 DAG.getIntPtrConstant(NonConstIdx));
6449 if (!IsSplat && (NonConstIdx != 0))
6450 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6451 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6454 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6455 DAG.getConstant(-1, SelectVT),
6456 DAG.getConstant(0, SelectVT));
6458 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6459 DAG.getConstant((Immediate | 1), SelectVT),
6460 DAG.getConstant(Immediate, SelectVT));
6461 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6464 /// \brief Return true if \p N implements a horizontal binop and return the
6465 /// operands for the horizontal binop into V0 and V1.
6467 /// This is a helper function of PerformBUILD_VECTORCombine.
6468 /// This function checks that the build_vector \p N in input implements a
6469 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6470 /// operation to match.
6471 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6472 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6473 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6476 /// This function only analyzes elements of \p N whose indices are
6477 /// in range [BaseIdx, LastIdx).
6478 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6480 unsigned BaseIdx, unsigned LastIdx,
6481 SDValue &V0, SDValue &V1) {
6482 EVT VT = N->getValueType(0);
6484 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6485 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6486 "Invalid Vector in input!");
6488 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6489 bool CanFold = true;
6490 unsigned ExpectedVExtractIdx = BaseIdx;
6491 unsigned NumElts = LastIdx - BaseIdx;
6492 V0 = DAG.getUNDEF(VT);
6493 V1 = DAG.getUNDEF(VT);
6495 // Check if N implements a horizontal binop.
6496 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6497 SDValue Op = N->getOperand(i + BaseIdx);
6500 if (Op->getOpcode() == ISD::UNDEF) {
6501 // Update the expected vector extract index.
6502 if (i * 2 == NumElts)
6503 ExpectedVExtractIdx = BaseIdx;
6504 ExpectedVExtractIdx += 2;
6508 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6513 SDValue Op0 = Op.getOperand(0);
6514 SDValue Op1 = Op.getOperand(1);
6516 // Try to match the following pattern:
6517 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6518 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6519 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6520 Op0.getOperand(0) == Op1.getOperand(0) &&
6521 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6522 isa<ConstantSDNode>(Op1.getOperand(1)));
6526 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6527 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6529 if (i * 2 < NumElts) {
6530 if (V0.getOpcode() == ISD::UNDEF)
6531 V0 = Op0.getOperand(0);
6533 if (V1.getOpcode() == ISD::UNDEF)
6534 V1 = Op0.getOperand(0);
6535 if (i * 2 == NumElts)
6536 ExpectedVExtractIdx = BaseIdx;
6539 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6540 if (I0 == ExpectedVExtractIdx)
6541 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6542 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6543 // Try to match the following dag sequence:
6544 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6545 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6549 ExpectedVExtractIdx += 2;
6555 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6556 /// a concat_vector.
6558 /// This is a helper function of PerformBUILD_VECTORCombine.
6559 /// This function expects two 256-bit vectors called V0 and V1.
6560 /// At first, each vector is split into two separate 128-bit vectors.
6561 /// Then, the resulting 128-bit vectors are used to implement two
6562 /// horizontal binary operations.
6564 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6566 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6567 /// the two new horizontal binop.
6568 /// When Mode is set, the first horizontal binop dag node would take as input
6569 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6570 /// horizontal binop dag node would take as input the lower 128-bit of V1
6571 /// and the upper 128-bit of V1.
6573 /// HADD V0_LO, V0_HI
6574 /// HADD V1_LO, V1_HI
6576 /// Otherwise, the first horizontal binop dag node takes as input the lower
6577 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6578 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6580 /// HADD V0_LO, V1_LO
6581 /// HADD V0_HI, V1_HI
6583 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6584 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6585 /// the upper 128-bits of the result.
6586 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6587 SDLoc DL, SelectionDAG &DAG,
6588 unsigned X86Opcode, bool Mode,
6589 bool isUndefLO, bool isUndefHI) {
6590 EVT VT = V0.getValueType();
6591 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6592 "Invalid nodes in input!");
6594 unsigned NumElts = VT.getVectorNumElements();
6595 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6596 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6597 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6598 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6599 EVT NewVT = V0_LO.getValueType();
6601 SDValue LO = DAG.getUNDEF(NewVT);
6602 SDValue HI = DAG.getUNDEF(NewVT);
6605 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6606 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6607 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6608 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6609 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6611 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6612 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6613 V1_LO->getOpcode() != ISD::UNDEF))
6614 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6616 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6617 V1_HI->getOpcode() != ISD::UNDEF))
6618 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6621 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6624 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6625 /// sequence of 'vadd + vsub + blendi'.
6626 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6627 const X86Subtarget *Subtarget) {
6629 EVT VT = BV->getValueType(0);
6630 unsigned NumElts = VT.getVectorNumElements();
6631 SDValue InVec0 = DAG.getUNDEF(VT);
6632 SDValue InVec1 = DAG.getUNDEF(VT);
6634 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6635 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6637 // Odd-numbered elements in the input build vector are obtained from
6638 // adding two integer/float elements.
6639 // Even-numbered elements in the input build vector are obtained from
6640 // subtracting two integer/float elements.
6641 unsigned ExpectedOpcode = ISD::FSUB;
6642 unsigned NextExpectedOpcode = ISD::FADD;
6643 bool AddFound = false;
6644 bool SubFound = false;
6646 for (unsigned i = 0, e = NumElts; i != e; i++) {
6647 SDValue Op = BV->getOperand(i);
6649 // Skip 'undef' values.
6650 unsigned Opcode = Op.getOpcode();
6651 if (Opcode == ISD::UNDEF) {
6652 std::swap(ExpectedOpcode, NextExpectedOpcode);
6656 // Early exit if we found an unexpected opcode.
6657 if (Opcode != ExpectedOpcode)
6660 SDValue Op0 = Op.getOperand(0);
6661 SDValue Op1 = Op.getOperand(1);
6663 // Try to match the following pattern:
6664 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6665 // Early exit if we cannot match that sequence.
6666 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6667 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6668 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6669 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6670 Op0.getOperand(1) != Op1.getOperand(1))
6673 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6677 // We found a valid add/sub node. Update the information accordingly.
6683 // Update InVec0 and InVec1.
6684 if (InVec0.getOpcode() == ISD::UNDEF)
6685 InVec0 = Op0.getOperand(0);
6686 if (InVec1.getOpcode() == ISD::UNDEF)
6687 InVec1 = Op1.getOperand(0);
6689 // Make sure that operands in input to each add/sub node always
6690 // come from a same pair of vectors.
6691 if (InVec0 != Op0.getOperand(0)) {
6692 if (ExpectedOpcode == ISD::FSUB)
6695 // FADD is commutable. Try to commute the operands
6696 // and then test again.
6697 std::swap(Op0, Op1);
6698 if (InVec0 != Op0.getOperand(0))
6702 if (InVec1 != Op1.getOperand(0))
6705 // Update the pair of expected opcodes.
6706 std::swap(ExpectedOpcode, NextExpectedOpcode);
6709 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6710 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6711 InVec1.getOpcode() != ISD::UNDEF)
6712 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6717 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6718 const X86Subtarget *Subtarget) {
6720 EVT VT = N->getValueType(0);
6721 unsigned NumElts = VT.getVectorNumElements();
6722 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6723 SDValue InVec0, InVec1;
6725 // Try to match an ADDSUB.
6726 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6727 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6728 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6729 if (Value.getNode())
6733 // Try to match horizontal ADD/SUB.
6734 unsigned NumUndefsLO = 0;
6735 unsigned NumUndefsHI = 0;
6736 unsigned Half = NumElts/2;
6738 // Count the number of UNDEF operands in the build_vector in input.
6739 for (unsigned i = 0, e = Half; i != e; ++i)
6740 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6743 for (unsigned i = Half, e = NumElts; i != e; ++i)
6744 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6747 // Early exit if this is either a build_vector of all UNDEFs or all the
6748 // operands but one are UNDEF.
6749 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6752 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6753 // Try to match an SSE3 float HADD/HSUB.
6754 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6755 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6757 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6758 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6759 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6760 // Try to match an SSSE3 integer HADD/HSUB.
6761 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6762 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6764 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6765 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6768 if (!Subtarget->hasAVX())
6771 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6772 // Try to match an AVX horizontal add/sub of packed single/double
6773 // precision floating point values from 256-bit vectors.
6774 SDValue InVec2, InVec3;
6775 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6776 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6777 ((InVec0.getOpcode() == ISD::UNDEF ||
6778 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6779 ((InVec1.getOpcode() == ISD::UNDEF ||
6780 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6781 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6783 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6790 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6791 // Try to match an AVX2 horizontal add/sub of signed integers.
6792 SDValue InVec2, InVec3;
6794 bool CanFold = true;
6796 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6797 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6798 ((InVec0.getOpcode() == ISD::UNDEF ||
6799 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6800 ((InVec1.getOpcode() == ISD::UNDEF ||
6801 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6802 X86Opcode = X86ISD::HADD;
6803 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6804 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6805 ((InVec0.getOpcode() == ISD::UNDEF ||
6806 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6807 ((InVec1.getOpcode() == ISD::UNDEF ||
6808 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6809 X86Opcode = X86ISD::HSUB;
6814 // Fold this build_vector into a single horizontal add/sub.
6815 // Do this only if the target has AVX2.
6816 if (Subtarget->hasAVX2())
6817 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6819 // Do not try to expand this build_vector into a pair of horizontal
6820 // add/sub if we can emit a pair of scalar add/sub.
6821 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6824 // Convert this build_vector into a pair of horizontal binop followed by
6826 bool isUndefLO = NumUndefsLO == Half;
6827 bool isUndefHI = NumUndefsHI == Half;
6828 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6829 isUndefLO, isUndefHI);
6833 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6834 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6836 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6837 X86Opcode = X86ISD::HADD;
6838 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6839 X86Opcode = X86ISD::HSUB;
6840 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6841 X86Opcode = X86ISD::FHADD;
6842 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6843 X86Opcode = X86ISD::FHSUB;
6847 // Don't try to expand this build_vector into a pair of horizontal add/sub
6848 // if we can simply emit a pair of scalar add/sub.
6849 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6852 // Convert this build_vector into two horizontal add/sub followed by
6854 bool isUndefLO = NumUndefsLO == Half;
6855 bool isUndefHI = NumUndefsHI == Half;
6856 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6857 isUndefLO, isUndefHI);
6864 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6867 MVT VT = Op.getSimpleValueType();
6868 MVT ExtVT = VT.getVectorElementType();
6869 unsigned NumElems = Op.getNumOperands();
6871 // Generate vectors for predicate vectors.
6872 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6873 return LowerBUILD_VECTORvXi1(Op, DAG);
6875 // Vectors containing all zeros can be matched by pxor and xorps later
6876 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6877 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6878 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6879 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6882 return getZeroVector(VT, Subtarget, DAG, dl);
6885 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6886 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6887 // vpcmpeqd on 256-bit vectors.
6888 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6889 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6892 if (!VT.is512BitVector())
6893 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6896 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6897 if (Broadcast.getNode())
6900 unsigned EVTBits = ExtVT.getSizeInBits();
6902 unsigned NumZero = 0;
6903 unsigned NumNonZero = 0;
6904 unsigned NonZeros = 0;
6905 bool IsAllConstants = true;
6906 SmallSet<SDValue, 8> Values;
6907 for (unsigned i = 0; i < NumElems; ++i) {
6908 SDValue Elt = Op.getOperand(i);
6909 if (Elt.getOpcode() == ISD::UNDEF)
6912 if (Elt.getOpcode() != ISD::Constant &&
6913 Elt.getOpcode() != ISD::ConstantFP)
6914 IsAllConstants = false;
6915 if (X86::isZeroNode(Elt))
6918 NonZeros |= (1 << i);
6923 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6924 if (NumNonZero == 0)
6925 return DAG.getUNDEF(VT);
6927 // Special case for single non-zero, non-undef, element.
6928 if (NumNonZero == 1) {
6929 unsigned Idx = countTrailingZeros(NonZeros);
6930 SDValue Item = Op.getOperand(Idx);
6932 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6933 // the value are obviously zero, truncate the value to i32 and do the
6934 // insertion that way. Only do this if the value is non-constant or if the
6935 // value is a constant being inserted into element 0. It is cheaper to do
6936 // a constant pool load than it is to do a movd + shuffle.
6937 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6938 (!IsAllConstants || Idx == 0)) {
6939 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6941 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6942 EVT VecVT = MVT::v4i32;
6943 unsigned VecElts = 4;
6945 // Truncate the value (which may itself be a constant) to i32, and
6946 // convert it to a vector with movd (S2V+shuffle to zero extend).
6947 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6948 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6950 // If using the new shuffle lowering, just directly insert this.
6951 if (ExperimentalVectorShuffleLowering)
6953 ISD::BITCAST, dl, VT,
6954 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6956 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6958 // Now we have our 32-bit value zero extended in the low element of
6959 // a vector. If Idx != 0, swizzle it into place.
6961 SmallVector<int, 4> Mask;
6962 Mask.push_back(Idx);
6963 for (unsigned i = 1; i != VecElts; ++i)
6965 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6968 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6972 // If we have a constant or non-constant insertion into the low element of
6973 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6974 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6975 // depending on what the source datatype is.
6978 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6980 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6981 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6982 if (VT.is256BitVector() || VT.is512BitVector()) {
6983 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6984 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6985 Item, DAG.getIntPtrConstant(0));
6987 assert(VT.is128BitVector() && "Expected an SSE value type!");
6988 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6989 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6990 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6993 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
6994 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
6995 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6996 if (VT.is256BitVector()) {
6997 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
6998 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7000 assert(VT.is128BitVector() && "Expected an SSE value type!");
7001 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7003 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7007 // Is it a vector logical left shift?
7008 if (NumElems == 2 && Idx == 1 &&
7009 X86::isZeroNode(Op.getOperand(0)) &&
7010 !X86::isZeroNode(Op.getOperand(1))) {
7011 unsigned NumBits = VT.getSizeInBits();
7012 return getVShift(true, VT,
7013 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7014 VT, Op.getOperand(1)),
7015 NumBits/2, DAG, *this, dl);
7018 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7021 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7022 // is a non-constant being inserted into an element other than the low one,
7023 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7024 // movd/movss) to move this into the low element, then shuffle it into
7026 if (EVTBits == 32) {
7027 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7029 // If using the new shuffle lowering, just directly insert this.
7030 if (ExperimentalVectorShuffleLowering)
7031 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7033 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7034 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7035 SmallVector<int, 8> MaskVec;
7036 for (unsigned i = 0; i != NumElems; ++i)
7037 MaskVec.push_back(i == Idx ? 0 : 1);
7038 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7042 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7043 if (Values.size() == 1) {
7044 if (EVTBits == 32) {
7045 // Instead of a shuffle like this:
7046 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7047 // Check if it's possible to issue this instead.
7048 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7049 unsigned Idx = countTrailingZeros(NonZeros);
7050 SDValue Item = Op.getOperand(Idx);
7051 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7052 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7057 // A vector full of immediates; various special cases are already
7058 // handled, so this is best done with a single constant-pool load.
7062 // For AVX-length vectors, see if we can use a vector load to get all of the
7063 // elements, otherwise build the individual 128-bit pieces and use
7064 // shuffles to put them in place.
7065 if (VT.is256BitVector() || VT.is512BitVector()) {
7066 SmallVector<SDValue, 64> V;
7067 for (unsigned i = 0; i != NumElems; ++i)
7068 V.push_back(Op.getOperand(i));
7070 // Check for a build vector of consecutive loads.
7071 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7074 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7076 // Build both the lower and upper subvector.
7077 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7078 makeArrayRef(&V[0], NumElems/2));
7079 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7080 makeArrayRef(&V[NumElems / 2], NumElems/2));
7082 // Recreate the wider vector with the lower and upper part.
7083 if (VT.is256BitVector())
7084 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7085 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7088 // Let legalizer expand 2-wide build_vectors.
7089 if (EVTBits == 64) {
7090 if (NumNonZero == 1) {
7091 // One half is zero or undef.
7092 unsigned Idx = countTrailingZeros(NonZeros);
7093 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7094 Op.getOperand(Idx));
7095 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7100 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7101 if (EVTBits == 8 && NumElems == 16) {
7102 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7104 if (V.getNode()) return V;
7107 if (EVTBits == 16 && NumElems == 8) {
7108 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7110 if (V.getNode()) return V;
7113 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7114 if (EVTBits == 32 && NumElems == 4) {
7115 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7120 // If element VT is == 32 bits, turn it into a number of shuffles.
7121 SmallVector<SDValue, 8> V(NumElems);
7122 if (NumElems == 4 && NumZero > 0) {
7123 for (unsigned i = 0; i < 4; ++i) {
7124 bool isZero = !(NonZeros & (1 << i));
7126 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7128 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7131 for (unsigned i = 0; i < 2; ++i) {
7132 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7135 V[i] = V[i*2]; // Must be a zero vector.
7138 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7141 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7144 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7149 bool Reverse1 = (NonZeros & 0x3) == 2;
7150 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7154 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7155 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7157 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7160 if (Values.size() > 1 && VT.is128BitVector()) {
7161 // Check for a build vector of consecutive loads.
7162 for (unsigned i = 0; i < NumElems; ++i)
7163 V[i] = Op.getOperand(i);
7165 // Check for elements which are consecutive loads.
7166 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7170 // Check for a build vector from mostly shuffle plus few inserting.
7171 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7175 // For SSE 4.1, use insertps to put the high elements into the low element.
7176 if (getSubtarget()->hasSSE41()) {
7178 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7179 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7181 Result = DAG.getUNDEF(VT);
7183 for (unsigned i = 1; i < NumElems; ++i) {
7184 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7185 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7186 Op.getOperand(i), DAG.getIntPtrConstant(i));
7191 // Otherwise, expand into a number of unpckl*, start by extending each of
7192 // our (non-undef) elements to the full vector width with the element in the
7193 // bottom slot of the vector (which generates no code for SSE).
7194 for (unsigned i = 0; i < NumElems; ++i) {
7195 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7196 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7198 V[i] = DAG.getUNDEF(VT);
7201 // Next, we iteratively mix elements, e.g. for v4f32:
7202 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7203 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7204 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7205 unsigned EltStride = NumElems >> 1;
7206 while (EltStride != 0) {
7207 for (unsigned i = 0; i < EltStride; ++i) {
7208 // If V[i+EltStride] is undef and this is the first round of mixing,
7209 // then it is safe to just drop this shuffle: V[i] is already in the
7210 // right place, the one element (since it's the first round) being
7211 // inserted as undef can be dropped. This isn't safe for successive
7212 // rounds because they will permute elements within both vectors.
7213 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7214 EltStride == NumElems/2)
7217 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7226 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7227 // to create 256-bit vectors from two other 128-bit ones.
7228 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7230 MVT ResVT = Op.getSimpleValueType();
7232 assert((ResVT.is256BitVector() ||
7233 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7235 SDValue V1 = Op.getOperand(0);
7236 SDValue V2 = Op.getOperand(1);
7237 unsigned NumElems = ResVT.getVectorNumElements();
7238 if(ResVT.is256BitVector())
7239 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7241 if (Op.getNumOperands() == 4) {
7242 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7243 ResVT.getVectorNumElements()/2);
7244 SDValue V3 = Op.getOperand(2);
7245 SDValue V4 = Op.getOperand(3);
7246 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7247 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7249 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7252 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7253 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7254 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7255 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7256 Op.getNumOperands() == 4)));
7258 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7259 // from two other 128-bit ones.
7261 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7262 return LowerAVXCONCAT_VECTORS(Op, DAG);
7266 //===----------------------------------------------------------------------===//
7267 // Vector shuffle lowering
7269 // This is an experimental code path for lowering vector shuffles on x86. It is
7270 // designed to handle arbitrary vector shuffles and blends, gracefully
7271 // degrading performance as necessary. It works hard to recognize idiomatic
7272 // shuffles and lower them to optimal instruction patterns without leaving
7273 // a framework that allows reasonably efficient handling of all vector shuffle
7275 //===----------------------------------------------------------------------===//
7277 /// \brief Tiny helper function to identify a no-op mask.
7279 /// This is a somewhat boring predicate function. It checks whether the mask
7280 /// array input, which is assumed to be a single-input shuffle mask of the kind
7281 /// used by the X86 shuffle instructions (not a fully general
7282 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7283 /// in-place shuffle are 'no-op's.
7284 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7285 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7286 if (Mask[i] != -1 && Mask[i] != i)
7291 /// \brief Helper function to classify a mask as a single-input mask.
7293 /// This isn't a generic single-input test because in the vector shuffle
7294 /// lowering we canonicalize single inputs to be the first input operand. This
7295 /// means we can more quickly test for a single input by only checking whether
7296 /// an input from the second operand exists. We also assume that the size of
7297 /// mask corresponds to the size of the input vectors which isn't true in the
7298 /// fully general case.
7299 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7301 if (M >= (int)Mask.size())
7306 /// \brief Test whether there are elements crossing 128-bit lanes in this
7309 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7310 /// and we routinely test for these.
7311 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7312 int LaneSize = 128 / VT.getScalarSizeInBits();
7313 int Size = Mask.size();
7314 for (int i = 0; i < Size; ++i)
7315 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7320 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7322 /// This checks a shuffle mask to see if it is performing the same
7323 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7324 /// that it is also not lane-crossing. It may however involve a blend from the
7325 /// same lane of a second vector.
7327 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7328 /// non-trivial to compute in the face of undef lanes. The representation is
7329 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7330 /// entries from both V1 and V2 inputs to the wider mask.
7332 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7333 SmallVectorImpl<int> &RepeatedMask) {
7334 int LaneSize = 128 / VT.getScalarSizeInBits();
7335 RepeatedMask.resize(LaneSize, -1);
7336 int Size = Mask.size();
7337 for (int i = 0; i < Size; ++i) {
7340 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7341 // This entry crosses lanes, so there is no way to model this shuffle.
7344 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7345 if (RepeatedMask[i % LaneSize] == -1)
7346 // This is the first non-undef entry in this slot of a 128-bit lane.
7347 RepeatedMask[i % LaneSize] =
7348 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7349 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7350 // Found a mismatch with the repeated mask.
7356 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7357 // 2013 will allow us to use it as a non-type template parameter.
7360 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7362 /// See its documentation for details.
7363 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7364 if (Mask.size() != Args.size())
7366 for (int i = 0, e = Mask.size(); i < e; ++i) {
7367 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7368 if (Mask[i] != -1 && Mask[i] != *Args[i])
7376 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7379 /// This is a fast way to test a shuffle mask against a fixed pattern:
7381 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7383 /// It returns true if the mask is exactly as wide as the argument list, and
7384 /// each element of the mask is either -1 (signifying undef) or the value given
7385 /// in the argument.
7386 static const VariadicFunction1<
7387 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7389 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7391 /// This helper function produces an 8-bit shuffle immediate corresponding to
7392 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7393 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7396 /// NB: We rely heavily on "undef" masks preserving the input lane.
7397 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7398 SelectionDAG &DAG) {
7399 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7400 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7401 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7402 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7403 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7406 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7407 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7408 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7409 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7410 return DAG.getConstant(Imm, MVT::i8);
7413 /// \brief Try to emit a blend instruction for a shuffle.
7415 /// This doesn't do any checks for the availability of instructions for blending
7416 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7417 /// be matched in the backend with the type given. What it does check for is
7418 /// that the shuffle mask is in fact a blend.
7419 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7420 SDValue V2, ArrayRef<int> Mask,
7421 const X86Subtarget *Subtarget,
7422 SelectionDAG &DAG) {
7424 unsigned BlendMask = 0;
7425 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7426 if (Mask[i] >= Size) {
7427 if (Mask[i] != i + Size)
7428 return SDValue(); // Shuffled V2 input!
7429 BlendMask |= 1u << i;
7432 if (Mask[i] >= 0 && Mask[i] != i)
7433 return SDValue(); // Shuffled V1 input!
7435 switch (VT.SimpleTy) {
7440 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7441 DAG.getConstant(BlendMask, MVT::i8));
7445 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7449 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7450 // that instruction.
7451 if (Subtarget->hasAVX2()) {
7452 // Scale the blend by the number of 32-bit dwords per element.
7453 int Scale = VT.getScalarSizeInBits() / 32;
7455 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7456 if (Mask[i] >= Size)
7457 for (int j = 0; j < Scale; ++j)
7458 BlendMask |= 1u << (i * Scale + j);
7460 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7461 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7462 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7463 return DAG.getNode(ISD::BITCAST, DL, VT,
7464 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7465 DAG.getConstant(BlendMask, MVT::i8)));
7469 // For integer shuffles we need to expand the mask and cast the inputs to
7470 // v8i16s prior to blending.
7471 int Scale = 8 / VT.getVectorNumElements();
7473 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7474 if (Mask[i] >= Size)
7475 for (int j = 0; j < Scale; ++j)
7476 BlendMask |= 1u << (i * Scale + j);
7478 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7479 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7480 return DAG.getNode(ISD::BITCAST, DL, VT,
7481 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7482 DAG.getConstant(BlendMask, MVT::i8)));
7486 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7487 SmallVector<int, 8> RepeatedMask;
7488 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7489 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7490 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7492 for (int i = 0; i < 8; ++i)
7493 if (RepeatedMask[i] >= 16)
7494 BlendMask |= 1u << i;
7495 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7496 DAG.getConstant(BlendMask, MVT::i8));
7501 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7502 // Scale the blend by the number of bytes per element.
7503 int Scale = VT.getScalarSizeInBits() / 8;
7504 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7506 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7507 // mix of LLVM's code generator and the x86 backend. We tell the code
7508 // generator that boolean values in the elements of an x86 vector register
7509 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7510 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7511 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7512 // of the element (the remaining are ignored) and 0 in that high bit would
7513 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7514 // the LLVM model for boolean values in vector elements gets the relevant
7515 // bit set, it is set backwards and over constrained relative to x86's
7517 SDValue VSELECTMask[32];
7518 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7519 for (int j = 0; j < Scale; ++j)
7520 VSELECTMask[Scale * i + j] =
7521 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7522 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7524 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7525 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7527 ISD::BITCAST, DL, VT,
7528 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7529 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7534 llvm_unreachable("Not a supported integer vector type!");
7538 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7539 /// unblended shuffles followed by an unshuffled blend.
7541 /// This matches the extremely common pattern for handling combined
7542 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7544 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7548 SelectionDAG &DAG) {
7549 // Shuffle the input elements into the desired positions in V1 and V2 and
7550 // blend them together.
7551 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7552 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7553 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7554 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7555 if (Mask[i] >= 0 && Mask[i] < Size) {
7556 V1Mask[i] = Mask[i];
7558 } else if (Mask[i] >= Size) {
7559 V2Mask[i] = Mask[i] - Size;
7560 BlendMask[i] = i + Size;
7563 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7564 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7565 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7568 /// \brief Try to lower a vector shuffle as a byte rotation.
7570 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7571 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7572 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7573 /// try to generically lower a vector shuffle through such an pattern. It
7574 /// does not check for the profitability of lowering either as PALIGNR or
7575 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7576 /// This matches shuffle vectors that look like:
7578 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7580 /// Essentially it concatenates V1 and V2, shifts right by some number of
7581 /// elements, and takes the low elements as the result. Note that while this is
7582 /// specified as a *right shift* because x86 is little-endian, it is a *left
7583 /// rotate* of the vector lanes.
7585 /// Note that this only handles 128-bit vector widths currently.
7586 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7589 const X86Subtarget *Subtarget,
7590 SelectionDAG &DAG) {
7591 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7593 // We need to detect various ways of spelling a rotation:
7594 // [11, 12, 13, 14, 15, 0, 1, 2]
7595 // [-1, 12, 13, 14, -1, -1, 1, -1]
7596 // [-1, -1, -1, -1, -1, -1, 1, 2]
7597 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7598 // [-1, 4, 5, 6, -1, -1, 9, -1]
7599 // [-1, 4, 5, 6, -1, -1, -1, -1]
7602 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7605 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7607 // Based on the mod-Size value of this mask element determine where
7608 // a rotated vector would have started.
7609 int StartIdx = i - (Mask[i] % Size);
7611 // The identity rotation isn't interesting, stop.
7614 // If we found the tail of a vector the rotation must be the missing
7615 // front. If we found the head of a vector, it must be how much of the head.
7616 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7619 Rotation = CandidateRotation;
7620 else if (Rotation != CandidateRotation)
7621 // The rotations don't match, so we can't match this mask.
7624 // Compute which value this mask is pointing at.
7625 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7627 // Compute which of the two target values this index should be assigned to.
7628 // This reflects whether the high elements are remaining or the low elements
7630 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7632 // Either set up this value if we've not encountered it before, or check
7633 // that it remains consistent.
7636 else if (TargetV != MaskV)
7637 // This may be a rotation, but it pulls from the inputs in some
7638 // unsupported interleaving.
7642 // Check that we successfully analyzed the mask, and normalize the results.
7643 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7644 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7650 assert(VT.getSizeInBits() == 128 &&
7651 "Rotate-based lowering only supports 128-bit lowering!");
7652 assert(Mask.size() <= 16 &&
7653 "Can shuffle at most 16 bytes in a 128-bit vector!");
7655 // The actual rotate instruction rotates bytes, so we need to scale the
7656 // rotation based on how many bytes are in the vector.
7657 int Scale = 16 / Mask.size();
7659 // SSSE3 targets can use the palignr instruction
7660 if (Subtarget->hasSSSE3()) {
7661 // Cast the inputs to v16i8 to match PALIGNR.
7662 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7663 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7665 return DAG.getNode(ISD::BITCAST, DL, VT,
7666 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7667 DAG.getConstant(Rotation * Scale, MVT::i8)));
7670 // Default SSE2 implementation
7671 int LoByteShift = 16 - Rotation * Scale;
7672 int HiByteShift = Rotation * Scale;
7674 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7675 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7676 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7678 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7679 DAG.getConstant(8 * LoByteShift, MVT::i8));
7680 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7681 DAG.getConstant(8 * HiByteShift, MVT::i8));
7682 return DAG.getNode(ISD::BITCAST, DL, VT,
7683 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7686 /// \brief Compute whether each element of a shuffle is zeroable.
7688 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7689 /// Either it is an undef element in the shuffle mask, the element of the input
7690 /// referenced is undef, or the element of the input referenced is known to be
7691 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7692 /// as many lanes with this technique as possible to simplify the remaining
7694 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7695 SDValue V1, SDValue V2) {
7696 SmallBitVector Zeroable(Mask.size(), false);
7698 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7699 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7701 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7703 // Handle the easy cases.
7704 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7709 // If this is an index into a build_vector node, dig out the input value and
7711 SDValue V = M < Size ? V1 : V2;
7712 if (V.getOpcode() != ISD::BUILD_VECTOR)
7715 SDValue Input = V.getOperand(M % Size);
7716 // The UNDEF opcode check really should be dead code here, but not quite
7717 // worth asserting on (it isn't invalid, just unexpected).
7718 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7725 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7727 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7728 /// byte-shift instructions. The mask must consist of a shifted sequential
7729 /// shuffle from one of the input vectors and zeroable elements for the
7730 /// remaining 'shifted in' elements.
7732 /// Note that this only handles 128-bit vector widths currently.
7733 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7734 SDValue V2, ArrayRef<int> Mask,
7735 SelectionDAG &DAG) {
7736 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7738 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7740 int Size = Mask.size();
7741 int Scale = 16 / Size;
7743 for (int Shift = 1; Shift < Size; Shift++) {
7744 int ByteShift = Shift * Scale;
7746 // PSRLDQ : (little-endian) right byte shift
7747 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7748 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7749 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7750 bool ZeroableRight = true;
7751 for (int i = Size - Shift; i < Size; i++) {
7752 ZeroableRight &= Zeroable[i];
7755 if (ZeroableRight) {
7756 bool ValidShiftRight1 =
7757 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7758 bool ValidShiftRight2 =
7759 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7761 if (ValidShiftRight1 || ValidShiftRight2) {
7762 // Cast the inputs to v2i64 to match PSRLDQ.
7763 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7764 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7765 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7766 DAG.getConstant(ByteShift * 8, MVT::i8));
7767 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7771 // PSLLDQ : (little-endian) left byte shift
7772 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7773 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7774 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7775 bool ZeroableLeft = true;
7776 for (int i = 0; i < Shift; i++) {
7777 ZeroableLeft &= Zeroable[i];
7781 bool ValidShiftLeft1 =
7782 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7783 bool ValidShiftLeft2 =
7784 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7786 if (ValidShiftLeft1 || ValidShiftLeft2) {
7787 // Cast the inputs to v2i64 to match PSLLDQ.
7788 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7789 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7790 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7791 DAG.getConstant(ByteShift * 8, MVT::i8));
7792 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7800 /// \brief Lower a vector shuffle as a zero or any extension.
7802 /// Given a specific number of elements, element bit width, and extension
7803 /// stride, produce either a zero or any extension based on the available
7804 /// features of the subtarget.
7805 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7806 SDLoc DL, MVT VT, int NumElements, int Scale, bool AnyExt, SDValue InputV,
7807 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7808 assert(Scale > 1 && "Need a scale to extend.");
7809 int EltBits = VT.getSizeInBits() / NumElements;
7810 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7811 "Only 8, 16, and 32 bit elements can be extended.");
7812 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7814 // Found a valid zext mask! Try various lowering strategies based on the
7815 // input type and available ISA extensions.
7816 if (Subtarget->hasSSE41()) {
7817 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7818 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7819 NumElements / Scale);
7820 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7821 return DAG.getNode(ISD::BITCAST, DL, VT,
7822 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7825 // For any extends we can cheat for larger element sizes and use shuffle
7826 // instructions that can fold with a load and/or copy.
7827 if (AnyExt && EltBits == 32) {
7828 int PSHUFDMask[4] = {0, -1, 1, -1};
7830 ISD::BITCAST, DL, VT,
7831 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7832 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7833 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7835 if (AnyExt && EltBits == 16 && Scale > 2) {
7836 int PSHUFDMask[4] = {0, -1, 0, -1};
7837 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7838 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7839 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7840 int PSHUFHWMask[4] = {1, -1, -1, -1};
7842 ISD::BITCAST, DL, VT,
7843 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
7844 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
7845 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
7848 // If this would require more than 2 unpack instructions to expand, use
7849 // pshufb when available. We can only use more than 2 unpack instructions
7850 // when zero extending i8 elements which also makes it easier to use pshufb.
7851 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7852 assert(NumElements == 16 && "Unexpected byte vector width!");
7853 SDValue PSHUFBMask[16];
7854 for (int i = 0; i < 16; ++i)
7856 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
7857 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
7858 return DAG.getNode(ISD::BITCAST, DL, VT,
7859 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7860 DAG.getNode(ISD::BUILD_VECTOR, DL,
7861 MVT::v16i8, PSHUFBMask)));
7864 // Otherwise emit a sequence of unpacks.
7866 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7867 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7868 : getZeroVector(InputVT, Subtarget, DAG, DL);
7869 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7870 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
7874 } while (Scale > 1);
7875 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
7878 /// \brief Try to lower a vector shuffle as a zero extension on any micrarch.
7880 /// This routine will try to do everything in its power to cleverly lower
7881 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
7882 /// check for the profitability of this lowering, it tries to aggressively
7883 /// match this pattern. It will use all of the micro-architectural details it
7884 /// can to emit an efficient lowering. It handles both blends with all-zero
7885 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
7886 /// masking out later).
7888 /// The reason we have dedicated lowering for zext-style shuffles is that they
7889 /// are both incredibly common and often quite performance sensitive.
7890 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
7891 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7892 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7893 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7895 int Bits = VT.getSizeInBits();
7896 int NumElements = Mask.size();
7898 // Define a helper function to check a particular ext-scale and lower to it if
7900 auto Lower = [&](int Scale) -> SDValue {
7903 for (int i = 0; i < NumElements; ++i) {
7905 continue; // Valid anywhere but doesn't tell us anything.
7906 if (i % Scale != 0) {
7907 // Each of the extend elements needs to be zeroable.
7911 // We no lorger are in the anyext case.
7916 // Each of the base elements needs to be consecutive indices into the
7917 // same input vector.
7918 SDValue V = Mask[i] < NumElements ? V1 : V2;
7921 else if (InputV != V)
7922 return SDValue(); // Flip-flopping inputs.
7924 if (Mask[i] % NumElements != i / Scale)
7925 return SDValue(); // Non-consecutive strided elemenst.
7928 // If we fail to find an input, we have a zero-shuffle which should always
7929 // have already been handled.
7930 // FIXME: Maybe handle this here in case during blending we end up with one?
7934 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7935 DL, VT, NumElements, Scale, AnyExt, InputV, Subtarget, DAG);
7938 // The widest scale possible for extending is to a 64-bit integer.
7939 assert(Bits % 64 == 0 &&
7940 "The number of bits in a vector must be divisible by 64 on x86!");
7941 int NumExtElements = Bits / 64;
7943 // Each iteration, try extending the elements half as much, but into twice as
7945 for (; NumExtElements < NumElements; NumExtElements *= 2) {
7946 assert(NumElements % NumExtElements == 0 &&
7947 "The input vector size must be divisble by the extended size.");
7948 if (SDValue V = Lower(NumElements / NumExtElements))
7952 // No viable ext lowering found.
7956 /// \brief Try to get a scalar value for a specific element of a vector.
7958 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
7959 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
7960 SelectionDAG &DAG) {
7961 MVT VT = V.getSimpleValueType();
7962 MVT EltVT = VT.getVectorElementType();
7963 while (V.getOpcode() == ISD::BITCAST)
7964 V = V.getOperand(0);
7965 // If the bitcasts shift the element size, we can't extract an equivalent
7967 MVT NewVT = V.getSimpleValueType();
7968 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
7971 if (V.getOpcode() == ISD::BUILD_VECTOR ||
7972 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
7973 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
7978 /// \brief Helper to test for a load that can be folded with x86 shuffles.
7980 /// This is particularly important because the set of instructions varies
7981 /// significantly based on whether the operand is a load or not.
7982 static bool isShuffleFoldableLoad(SDValue V) {
7983 while (V.getOpcode() == ISD::BITCAST)
7984 V = V.getOperand(0);
7986 return ISD::isNON_EXTLoad(V.getNode());
7989 /// \brief Try to lower insertion of a single element into a zero vector.
7991 /// This is a common pattern that we have especially efficient patterns to lower
7992 /// across all subtarget feature sets.
7993 static SDValue lowerVectorShuffleAsElementInsertion(
7994 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7995 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7996 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7998 MVT EltVT = VT.getVectorElementType();
8000 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8001 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8003 bool IsV1Zeroable = true;
8004 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8005 if (i != V2Index && !Zeroable[i]) {
8006 IsV1Zeroable = false;
8010 // Check for a single input from a SCALAR_TO_VECTOR node.
8011 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8012 // all the smarts here sunk into that routine. However, the current
8013 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8014 // vector shuffle lowering is dead.
8015 if (SDValue V2S = getScalarValueForVectorElement(
8016 V2, Mask[V2Index] - Mask.size(), DAG)) {
8017 // We need to zext the scalar if it is smaller than an i32.
8018 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8019 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8020 // Using zext to expand a narrow element won't work for non-zero
8025 // Zero-extend directly to i32.
8027 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8029 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8030 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8031 EltVT == MVT::i16) {
8032 // Either not inserting from the low element of the input or the input
8033 // element size is too small to use VZEXT_MOVL to clear the high bits.
8037 if (!IsV1Zeroable) {
8038 // If V1 can't be treated as a zero vector we have fewer options to lower
8039 // this. We can't support integer vectors or non-zero targets cheaply, and
8040 // the V1 elements can't be permuted in any way.
8041 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8042 if (!VT.isFloatingPoint() || V2Index != 0)
8044 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8045 V1Mask[V2Index] = -1;
8046 if (!isNoopShuffleMask(V1Mask))
8048 // This is essentially a special case blend operation, but if we have
8049 // general purpose blend operations, they are always faster. Bail and let
8050 // the rest of the lowering handle these as blends.
8051 if (Subtarget->hasSSE41())
8054 // Otherwise, use MOVSD or MOVSS.
8055 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8056 "Only two types of floating point element types to handle!");
8057 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8061 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8063 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8066 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8067 // the desired position. Otherwise it is more efficient to do a vector
8068 // shift left. We know that we can do a vector shift left because all
8069 // the inputs are zero.
8070 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8071 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8072 V2Shuffle[V2Index] = 0;
8073 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8075 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8077 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8079 V2Index * EltVT.getSizeInBits(),
8080 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8081 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8087 /// \brief Try to lower broadcast of a single element.
8089 /// For convenience, this code also bundles all of the subtarget feature set
8090 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8091 /// a convenient way to factor it out.
8092 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8094 const X86Subtarget *Subtarget,
8095 SelectionDAG &DAG) {
8096 if (!Subtarget->hasAVX())
8098 if (VT.isInteger() && !Subtarget->hasAVX2())
8101 // Check that the mask is a broadcast.
8102 int BroadcastIdx = -1;
8104 if (M >= 0 && BroadcastIdx == -1)
8106 else if (M >= 0 && M != BroadcastIdx)
8109 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8110 "a sorted mask where the broadcast "
8113 // Go up the chain of (vector) values to try and find a scalar load that
8114 // we can combine with the broadcast.
8116 switch (V.getOpcode()) {
8117 case ISD::CONCAT_VECTORS: {
8118 int OperandSize = Mask.size() / V.getNumOperands();
8119 V = V.getOperand(BroadcastIdx / OperandSize);
8120 BroadcastIdx %= OperandSize;
8124 case ISD::INSERT_SUBVECTOR: {
8125 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8126 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8130 int BeginIdx = (int)ConstantIdx->getZExtValue();
8132 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8133 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8134 BroadcastIdx -= BeginIdx;
8145 // Check if this is a broadcast of a scalar. We special case lowering
8146 // for scalars so that we can more effectively fold with loads.
8147 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8148 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8149 V = V.getOperand(BroadcastIdx);
8151 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8153 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8155 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8156 // We can't broadcast from a vector register w/o AVX2, and we can only
8157 // broadcast from the zero-element of a vector register.
8161 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8164 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8165 // INSERTPS when the V1 elements are already in the correct locations
8166 // because otherwise we can just always use two SHUFPS instructions which
8167 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8168 // perform INSERTPS if a single V1 element is out of place and all V2
8169 // elements are zeroable.
8170 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8172 SelectionDAG &DAG) {
8173 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8174 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8175 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8176 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8178 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8181 int V1DstIndex = -1;
8182 int V2DstIndex = -1;
8183 bool V1UsedInPlace = false;
8185 for (int i = 0; i < 4; i++) {
8186 // Synthesize a zero mask from the zeroable elements (includes undefs).
8192 // Flag if we use any V1 inputs in place.
8194 V1UsedInPlace = true;
8198 // We can only insert a single non-zeroable element.
8199 if (V1DstIndex != -1 || V2DstIndex != -1)
8203 // V1 input out of place for insertion.
8206 // V2 input for insertion.
8211 // Don't bother if we have no (non-zeroable) element for insertion.
8212 if (V1DstIndex == -1 && V2DstIndex == -1)
8215 // Determine element insertion src/dst indices. The src index is from the
8216 // start of the inserted vector, not the start of the concatenated vector.
8217 unsigned V2SrcIndex = 0;
8218 if (V1DstIndex != -1) {
8219 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8220 // and don't use the original V2 at all.
8221 V2SrcIndex = Mask[V1DstIndex];
8222 V2DstIndex = V1DstIndex;
8225 V2SrcIndex = Mask[V2DstIndex] - 4;
8228 // If no V1 inputs are used in place, then the result is created only from
8229 // the zero mask and the V2 insertion - so remove V1 dependency.
8231 V1 = DAG.getUNDEF(MVT::v4f32);
8233 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8234 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8236 // Insert the V2 element into the desired position.
8238 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8239 DAG.getConstant(InsertPSMask, MVT::i8));
8242 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8244 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8245 /// support for floating point shuffles but not integer shuffles. These
8246 /// instructions will incur a domain crossing penalty on some chips though so
8247 /// it is better to avoid lowering through this for integer vectors where
8249 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8250 const X86Subtarget *Subtarget,
8251 SelectionDAG &DAG) {
8253 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8254 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8255 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8256 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8257 ArrayRef<int> Mask = SVOp->getMask();
8258 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8260 if (isSingleInputShuffleMask(Mask)) {
8261 // Straight shuffle of a single input vector. Simulate this by using the
8262 // single input as both of the "inputs" to this instruction..
8263 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8265 if (Subtarget->hasAVX()) {
8266 // If we have AVX, we can use VPERMILPS which will allow folding a load
8267 // into the shuffle.
8268 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8269 DAG.getConstant(SHUFPDMask, MVT::i8));
8272 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8273 DAG.getConstant(SHUFPDMask, MVT::i8));
8275 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8276 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8278 // Use dedicated unpack instructions for masks that match their pattern.
8279 if (isShuffleEquivalent(Mask, 0, 2))
8280 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8281 if (isShuffleEquivalent(Mask, 1, 3))
8282 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8284 // If we have a single input, insert that into V1 if we can do so cheaply.
8285 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8286 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8287 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8289 // Try inverting the insertion since for v2 masks it is easy to do and we
8290 // can't reliably sort the mask one way or the other.
8291 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8292 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8293 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8294 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8298 // Try to use one of the special instruction patterns to handle two common
8299 // blend patterns if a zero-blend above didn't work.
8300 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8301 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8302 // We can either use a special instruction to load over the low double or
8303 // to move just the low double.
8305 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8307 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8309 if (Subtarget->hasSSE41())
8310 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8314 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8315 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8316 DAG.getConstant(SHUFPDMask, MVT::i8));
8319 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8321 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8322 /// the integer unit to minimize domain crossing penalties. However, for blends
8323 /// it falls back to the floating point shuffle operation with appropriate bit
8325 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8326 const X86Subtarget *Subtarget,
8327 SelectionDAG &DAG) {
8329 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8330 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8331 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8332 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8333 ArrayRef<int> Mask = SVOp->getMask();
8334 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8336 if (isSingleInputShuffleMask(Mask)) {
8337 // Check for being able to broadcast a single element.
8338 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8339 Mask, Subtarget, DAG))
8342 // Straight shuffle of a single input vector. For everything from SSE2
8343 // onward this has a single fast instruction with no scary immediates.
8344 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8345 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8346 int WidenedMask[4] = {
8347 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8348 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8350 ISD::BITCAST, DL, MVT::v2i64,
8351 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8352 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8355 // Try to use byte shift instructions.
8356 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8357 DL, MVT::v2i64, V1, V2, Mask, DAG))
8360 // If we have a single input from V2 insert that into V1 if we can do so
8362 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8363 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8364 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8366 // Try inverting the insertion since for v2 masks it is easy to do and we
8367 // can't reliably sort the mask one way or the other.
8368 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8369 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8370 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8371 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8375 // Use dedicated unpack instructions for masks that match their pattern.
8376 if (isShuffleEquivalent(Mask, 0, 2))
8377 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8378 if (isShuffleEquivalent(Mask, 1, 3))
8379 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8381 if (Subtarget->hasSSE41())
8382 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8386 // Try to use byte rotation instructions.
8387 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8388 if (Subtarget->hasSSSE3())
8389 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8390 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8393 // We implement this with SHUFPD which is pretty lame because it will likely
8394 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8395 // However, all the alternatives are still more cycles and newer chips don't
8396 // have this problem. It would be really nice if x86 had better shuffles here.
8397 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8398 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8399 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8400 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8403 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8405 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8406 /// It makes no assumptions about whether this is the *best* lowering, it simply
8408 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8409 ArrayRef<int> Mask, SDValue V1,
8410 SDValue V2, SelectionDAG &DAG) {
8411 SDValue LowV = V1, HighV = V2;
8412 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8415 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8417 if (NumV2Elements == 1) {
8419 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8422 // Compute the index adjacent to V2Index and in the same half by toggling
8424 int V2AdjIndex = V2Index ^ 1;
8426 if (Mask[V2AdjIndex] == -1) {
8427 // Handles all the cases where we have a single V2 element and an undef.
8428 // This will only ever happen in the high lanes because we commute the
8429 // vector otherwise.
8431 std::swap(LowV, HighV);
8432 NewMask[V2Index] -= 4;
8434 // Handle the case where the V2 element ends up adjacent to a V1 element.
8435 // To make this work, blend them together as the first step.
8436 int V1Index = V2AdjIndex;
8437 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8438 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8439 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8441 // Now proceed to reconstruct the final blend as we have the necessary
8442 // high or low half formed.
8449 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8450 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8452 } else if (NumV2Elements == 2) {
8453 if (Mask[0] < 4 && Mask[1] < 4) {
8454 // Handle the easy case where we have V1 in the low lanes and V2 in the
8458 } else if (Mask[2] < 4 && Mask[3] < 4) {
8459 // We also handle the reversed case because this utility may get called
8460 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8461 // arrange things in the right direction.
8467 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8468 // trying to place elements directly, just blend them and set up the final
8469 // shuffle to place them.
8471 // The first two blend mask elements are for V1, the second two are for
8473 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8474 Mask[2] < 4 ? Mask[2] : Mask[3],
8475 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8476 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8477 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8478 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8480 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8483 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8484 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8485 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8486 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8489 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8490 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8493 /// \brief Lower 4-lane 32-bit floating point shuffles.
8495 /// Uses instructions exclusively from the floating point unit to minimize
8496 /// domain crossing penalties, as these are sufficient to implement all v4f32
8498 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8499 const X86Subtarget *Subtarget,
8500 SelectionDAG &DAG) {
8502 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8503 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8504 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8505 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8506 ArrayRef<int> Mask = SVOp->getMask();
8507 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8510 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8512 if (NumV2Elements == 0) {
8513 // Check for being able to broadcast a single element.
8514 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8515 Mask, Subtarget, DAG))
8518 if (Subtarget->hasAVX()) {
8519 // If we have AVX, we can use VPERMILPS which will allow folding a load
8520 // into the shuffle.
8521 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8522 getV4X86ShuffleImm8ForMask(Mask, DAG));
8525 // Otherwise, use a straight shuffle of a single input vector. We pass the
8526 // input vector to both operands to simulate this with a SHUFPS.
8527 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8528 getV4X86ShuffleImm8ForMask(Mask, DAG));
8531 // Use dedicated unpack instructions for masks that match their pattern.
8532 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8533 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8534 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8535 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8537 // There are special ways we can lower some single-element blends. However, we
8538 // have custom ways we can lower more complex single-element blends below that
8539 // we defer to if both this and BLENDPS fail to match, so restrict this to
8540 // when the V2 input is targeting element 0 of the mask -- that is the fast
8542 if (NumV2Elements == 1 && Mask[0] >= 4)
8543 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8544 Mask, Subtarget, DAG))
8547 if (Subtarget->hasSSE41()) {
8548 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8552 // Use INSERTPS if we can complete the shuffle efficiently.
8553 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8557 // Otherwise fall back to a SHUFPS lowering strategy.
8558 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8561 /// \brief Lower 4-lane i32 vector shuffles.
8563 /// We try to handle these with integer-domain shuffles where we can, but for
8564 /// blends we use the floating point domain blend instructions.
8565 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8566 const X86Subtarget *Subtarget,
8567 SelectionDAG &DAG) {
8569 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8570 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8571 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8572 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8573 ArrayRef<int> Mask = SVOp->getMask();
8574 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8576 // Whenever we can lower this as a zext, that instruction is strictly faster
8577 // than any alternative. It also allows us to fold memory operands into the
8578 // shuffle in many cases.
8579 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8580 Mask, Subtarget, DAG))
8584 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8586 if (NumV2Elements == 0) {
8587 // Check for being able to broadcast a single element.
8588 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8589 Mask, Subtarget, DAG))
8592 // Straight shuffle of a single input vector. For everything from SSE2
8593 // onward this has a single fast instruction with no scary immediates.
8594 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8595 // but we aren't actually going to use the UNPCK instruction because doing
8596 // so prevents folding a load into this instruction or making a copy.
8597 const int UnpackLoMask[] = {0, 0, 1, 1};
8598 const int UnpackHiMask[] = {2, 2, 3, 3};
8599 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8600 Mask = UnpackLoMask;
8601 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8602 Mask = UnpackHiMask;
8604 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8605 getV4X86ShuffleImm8ForMask(Mask, DAG));
8608 // Try to use byte shift instructions.
8609 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8610 DL, MVT::v4i32, V1, V2, Mask, DAG))
8613 // There are special ways we can lower some single-element blends.
8614 if (NumV2Elements == 1)
8615 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8616 Mask, Subtarget, DAG))
8619 // Use dedicated unpack instructions for masks that match their pattern.
8620 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8621 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8622 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8623 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8625 if (Subtarget->hasSSE41())
8626 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8630 // Try to use byte rotation instructions.
8631 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8632 if (Subtarget->hasSSSE3())
8633 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8634 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8637 // We implement this with SHUFPS because it can blend from two vectors.
8638 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8639 // up the inputs, bypassing domain shift penalties that we would encur if we
8640 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8642 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8643 DAG.getVectorShuffle(
8645 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8646 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8649 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8650 /// shuffle lowering, and the most complex part.
8652 /// The lowering strategy is to try to form pairs of input lanes which are
8653 /// targeted at the same half of the final vector, and then use a dword shuffle
8654 /// to place them onto the right half, and finally unpack the paired lanes into
8655 /// their final position.
8657 /// The exact breakdown of how to form these dword pairs and align them on the
8658 /// correct sides is really tricky. See the comments within the function for
8659 /// more of the details.
8660 static SDValue lowerV8I16SingleInputVectorShuffle(
8661 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8662 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8663 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8664 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8665 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8667 SmallVector<int, 4> LoInputs;
8668 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8669 [](int M) { return M >= 0; });
8670 std::sort(LoInputs.begin(), LoInputs.end());
8671 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8672 SmallVector<int, 4> HiInputs;
8673 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8674 [](int M) { return M >= 0; });
8675 std::sort(HiInputs.begin(), HiInputs.end());
8676 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8678 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8679 int NumHToL = LoInputs.size() - NumLToL;
8681 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8682 int NumHToH = HiInputs.size() - NumLToH;
8683 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8684 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8685 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8686 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8688 // Check for being able to broadcast a single element.
8689 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8690 Mask, Subtarget, DAG))
8693 // Try to use byte shift instructions.
8694 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8695 DL, MVT::v8i16, V, V, Mask, DAG))
8698 // Use dedicated unpack instructions for masks that match their pattern.
8699 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8700 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8701 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8702 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8704 // Try to use byte rotation instructions.
8705 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8706 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8709 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8710 // such inputs we can swap two of the dwords across the half mark and end up
8711 // with <=2 inputs to each half in each half. Once there, we can fall through
8712 // to the generic code below. For example:
8714 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8715 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8717 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8718 // and an existing 2-into-2 on the other half. In this case we may have to
8719 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8720 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8721 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8722 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8723 // half than the one we target for fixing) will be fixed when we re-enter this
8724 // path. We will also combine away any sequence of PSHUFD instructions that
8725 // result into a single instruction. Here is an example of the tricky case:
8727 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8728 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8730 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8732 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8733 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8735 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8736 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8738 // The result is fine to be handled by the generic logic.
8739 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8740 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8741 int AOffset, int BOffset) {
8742 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8743 "Must call this with A having 3 or 1 inputs from the A half.");
8744 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8745 "Must call this with B having 1 or 3 inputs from the B half.");
8746 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8747 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8749 // Compute the index of dword with only one word among the three inputs in
8750 // a half by taking the sum of the half with three inputs and subtracting
8751 // the sum of the actual three inputs. The difference is the remaining
8754 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8755 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8756 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8757 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8758 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8759 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8760 int TripleNonInputIdx =
8761 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8762 TripleDWord = TripleNonInputIdx / 2;
8764 // We use xor with one to compute the adjacent DWord to whichever one the
8766 OneInputDWord = (OneInput / 2) ^ 1;
8768 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8769 // and BToA inputs. If there is also such a problem with the BToB and AToB
8770 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8771 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8772 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8773 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8774 // Compute how many inputs will be flipped by swapping these DWords. We
8776 // to balance this to ensure we don't form a 3-1 shuffle in the other
8778 int NumFlippedAToBInputs =
8779 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
8780 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
8781 int NumFlippedBToBInputs =
8782 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
8783 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
8784 if ((NumFlippedAToBInputs == 1 &&
8785 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
8786 (NumFlippedBToBInputs == 1 &&
8787 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
8788 // We choose whether to fix the A half or B half based on whether that
8789 // half has zero flipped inputs. At zero, we may not be able to fix it
8790 // with that half. We also bias towards fixing the B half because that
8791 // will more commonly be the high half, and we have to bias one way.
8792 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
8793 ArrayRef<int> Inputs) {
8794 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
8795 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
8796 PinnedIdx ^ 1) != Inputs.end();
8797 // Determine whether the free index is in the flipped dword or the
8798 // unflipped dword based on where the pinned index is. We use this bit
8799 // in an xor to conditionally select the adjacent dword.
8800 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
8801 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8802 FixFreeIdx) != Inputs.end();
8803 if (IsFixIdxInput == IsFixFreeIdxInput)
8805 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8806 FixFreeIdx) != Inputs.end();
8807 assert(IsFixIdxInput != IsFixFreeIdxInput &&
8808 "We need to be changing the number of flipped inputs!");
8809 int PSHUFHalfMask[] = {0, 1, 2, 3};
8810 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
8811 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
8813 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
8816 if (M != -1 && M == FixIdx)
8818 else if (M != -1 && M == FixFreeIdx)
8821 if (NumFlippedBToBInputs != 0) {
8823 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8824 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
8826 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
8828 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8829 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
8834 int PSHUFDMask[] = {0, 1, 2, 3};
8835 PSHUFDMask[ADWord] = BDWord;
8836 PSHUFDMask[BDWord] = ADWord;
8837 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8838 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8839 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
8840 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8842 // Adjust the mask to match the new locations of A and B.
8844 if (M != -1 && M/2 == ADWord)
8845 M = 2 * BDWord + M % 2;
8846 else if (M != -1 && M/2 == BDWord)
8847 M = 2 * ADWord + M % 2;
8849 // Recurse back into this routine to re-compute state now that this isn't
8850 // a 3 and 1 problem.
8851 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
8854 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
8855 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
8856 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
8857 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
8859 // At this point there are at most two inputs to the low and high halves from
8860 // each half. That means the inputs can always be grouped into dwords and
8861 // those dwords can then be moved to the correct half with a dword shuffle.
8862 // We use at most one low and one high word shuffle to collect these paired
8863 // inputs into dwords, and finally a dword shuffle to place them.
8864 int PSHUFLMask[4] = {-1, -1, -1, -1};
8865 int PSHUFHMask[4] = {-1, -1, -1, -1};
8866 int PSHUFDMask[4] = {-1, -1, -1, -1};
8868 // First fix the masks for all the inputs that are staying in their
8869 // original halves. This will then dictate the targets of the cross-half
8871 auto fixInPlaceInputs =
8872 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
8873 MutableArrayRef<int> SourceHalfMask,
8874 MutableArrayRef<int> HalfMask, int HalfOffset) {
8875 if (InPlaceInputs.empty())
8877 if (InPlaceInputs.size() == 1) {
8878 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8879 InPlaceInputs[0] - HalfOffset;
8880 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
8883 if (IncomingInputs.empty()) {
8884 // Just fix all of the in place inputs.
8885 for (int Input : InPlaceInputs) {
8886 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
8887 PSHUFDMask[Input / 2] = Input / 2;
8892 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
8893 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8894 InPlaceInputs[0] - HalfOffset;
8895 // Put the second input next to the first so that they are packed into
8896 // a dword. We find the adjacent index by toggling the low bit.
8897 int AdjIndex = InPlaceInputs[0] ^ 1;
8898 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
8899 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
8900 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
8902 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
8903 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
8905 // Now gather the cross-half inputs and place them into a free dword of
8906 // their target half.
8907 // FIXME: This operation could almost certainly be simplified dramatically to
8908 // look more like the 3-1 fixing operation.
8909 auto moveInputsToRightHalf = [&PSHUFDMask](
8910 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
8911 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
8912 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
8914 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
8915 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
8917 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
8919 int LowWord = Word & ~1;
8920 int HighWord = Word | 1;
8921 return isWordClobbered(SourceHalfMask, LowWord) ||
8922 isWordClobbered(SourceHalfMask, HighWord);
8925 if (IncomingInputs.empty())
8928 if (ExistingInputs.empty()) {
8929 // Map any dwords with inputs from them into the right half.
8930 for (int Input : IncomingInputs) {
8931 // If the source half mask maps over the inputs, turn those into
8932 // swaps and use the swapped lane.
8933 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
8934 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
8935 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
8936 Input - SourceOffset;
8937 // We have to swap the uses in our half mask in one sweep.
8938 for (int &M : HalfMask)
8939 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
8941 else if (M == Input)
8942 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8944 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
8945 Input - SourceOffset &&
8946 "Previous placement doesn't match!");
8948 // Note that this correctly re-maps both when we do a swap and when
8949 // we observe the other side of the swap above. We rely on that to
8950 // avoid swapping the members of the input list directly.
8951 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8954 // Map the input's dword into the correct half.
8955 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
8956 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
8958 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
8960 "Previous placement doesn't match!");
8963 // And just directly shift any other-half mask elements to be same-half
8964 // as we will have mirrored the dword containing the element into the
8965 // same position within that half.
8966 for (int &M : HalfMask)
8967 if (M >= SourceOffset && M < SourceOffset + 4) {
8968 M = M - SourceOffset + DestOffset;
8969 assert(M >= 0 && "This should never wrap below zero!");
8974 // Ensure we have the input in a viable dword of its current half. This
8975 // is particularly tricky because the original position may be clobbered
8976 // by inputs being moved and *staying* in that half.
8977 if (IncomingInputs.size() == 1) {
8978 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
8979 int InputFixed = std::find(std::begin(SourceHalfMask),
8980 std::end(SourceHalfMask), -1) -
8981 std::begin(SourceHalfMask) + SourceOffset;
8982 SourceHalfMask[InputFixed - SourceOffset] =
8983 IncomingInputs[0] - SourceOffset;
8984 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
8986 IncomingInputs[0] = InputFixed;
8988 } else if (IncomingInputs.size() == 2) {
8989 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
8990 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
8991 // We have two non-adjacent or clobbered inputs we need to extract from
8992 // the source half. To do this, we need to map them into some adjacent
8993 // dword slot in the source mask.
8994 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
8995 IncomingInputs[1] - SourceOffset};
8997 // If there is a free slot in the source half mask adjacent to one of
8998 // the inputs, place the other input in it. We use (Index XOR 1) to
8999 // compute an adjacent index.
9000 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9001 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9002 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9003 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9004 InputsFixed[1] = InputsFixed[0] ^ 1;
9005 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9006 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9007 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9008 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9009 InputsFixed[0] = InputsFixed[1] ^ 1;
9010 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9011 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9012 // The two inputs are in the same DWord but it is clobbered and the
9013 // adjacent DWord isn't used at all. Move both inputs to the free
9015 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9016 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9017 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9018 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9020 // The only way we hit this point is if there is no clobbering
9021 // (because there are no off-half inputs to this half) and there is no
9022 // free slot adjacent to one of the inputs. In this case, we have to
9023 // swap an input with a non-input.
9024 for (int i = 0; i < 4; ++i)
9025 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9026 "We can't handle any clobbers here!");
9027 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9028 "Cannot have adjacent inputs here!");
9030 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9031 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9033 // We also have to update the final source mask in this case because
9034 // it may need to undo the above swap.
9035 for (int &M : FinalSourceHalfMask)
9036 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9037 M = InputsFixed[1] + SourceOffset;
9038 else if (M == InputsFixed[1] + SourceOffset)
9039 M = (InputsFixed[0] ^ 1) + SourceOffset;
9041 InputsFixed[1] = InputsFixed[0] ^ 1;
9044 // Point everything at the fixed inputs.
9045 for (int &M : HalfMask)
9046 if (M == IncomingInputs[0])
9047 M = InputsFixed[0] + SourceOffset;
9048 else if (M == IncomingInputs[1])
9049 M = InputsFixed[1] + SourceOffset;
9051 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9052 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9055 llvm_unreachable("Unhandled input size!");
9058 // Now hoist the DWord down to the right half.
9059 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9060 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9061 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9062 for (int &M : HalfMask)
9063 for (int Input : IncomingInputs)
9065 M = FreeDWord * 2 + Input % 2;
9067 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9068 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9069 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9070 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9072 // Now enact all the shuffles we've computed to move the inputs into their
9074 if (!isNoopShuffleMask(PSHUFLMask))
9075 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9076 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9077 if (!isNoopShuffleMask(PSHUFHMask))
9078 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9079 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9080 if (!isNoopShuffleMask(PSHUFDMask))
9081 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9082 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9083 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9084 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9086 // At this point, each half should contain all its inputs, and we can then
9087 // just shuffle them into their final position.
9088 assert(std::count_if(LoMask.begin(), LoMask.end(),
9089 [](int M) { return M >= 4; }) == 0 &&
9090 "Failed to lift all the high half inputs to the low mask!");
9091 assert(std::count_if(HiMask.begin(), HiMask.end(),
9092 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9093 "Failed to lift all the low half inputs to the high mask!");
9095 // Do a half shuffle for the low mask.
9096 if (!isNoopShuffleMask(LoMask))
9097 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9098 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9100 // Do a half shuffle with the high mask after shifting its values down.
9101 for (int &M : HiMask)
9104 if (!isNoopShuffleMask(HiMask))
9105 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9106 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9111 /// \brief Detect whether the mask pattern should be lowered through
9114 /// This essentially tests whether viewing the mask as an interleaving of two
9115 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9116 /// lowering it through interleaving is a significantly better strategy.
9117 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9118 int NumEvenInputs[2] = {0, 0};
9119 int NumOddInputs[2] = {0, 0};
9120 int NumLoInputs[2] = {0, 0};
9121 int NumHiInputs[2] = {0, 0};
9122 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9126 int InputIdx = Mask[i] >= Size;
9129 ++NumLoInputs[InputIdx];
9131 ++NumHiInputs[InputIdx];
9134 ++NumEvenInputs[InputIdx];
9136 ++NumOddInputs[InputIdx];
9139 // The minimum number of cross-input results for both the interleaved and
9140 // split cases. If interleaving results in fewer cross-input results, return
9142 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9143 NumEvenInputs[0] + NumOddInputs[1]);
9144 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9145 NumLoInputs[0] + NumHiInputs[1]);
9146 return InterleavedCrosses < SplitCrosses;
9149 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9151 /// This strategy only works when the inputs from each vector fit into a single
9152 /// half of that vector, and generally there are not so many inputs as to leave
9153 /// the in-place shuffles required highly constrained (and thus expensive). It
9154 /// shifts all the inputs into a single side of both input vectors and then
9155 /// uses an unpack to interleave these inputs in a single vector. At that
9156 /// point, we will fall back on the generic single input shuffle lowering.
9157 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9159 MutableArrayRef<int> Mask,
9160 const X86Subtarget *Subtarget,
9161 SelectionDAG &DAG) {
9162 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9163 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9164 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9165 for (int i = 0; i < 8; ++i)
9166 if (Mask[i] >= 0 && Mask[i] < 4)
9167 LoV1Inputs.push_back(i);
9168 else if (Mask[i] >= 4 && Mask[i] < 8)
9169 HiV1Inputs.push_back(i);
9170 else if (Mask[i] >= 8 && Mask[i] < 12)
9171 LoV2Inputs.push_back(i);
9172 else if (Mask[i] >= 12)
9173 HiV2Inputs.push_back(i);
9175 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9176 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9179 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9180 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9181 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9183 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9184 HiV1Inputs.size() + HiV2Inputs.size();
9186 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9187 ArrayRef<int> HiInputs, bool MoveToLo,
9189 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9190 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9191 if (BadInputs.empty())
9194 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9195 int MoveOffset = MoveToLo ? 0 : 4;
9197 if (GoodInputs.empty()) {
9198 for (int BadInput : BadInputs) {
9199 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9200 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9203 if (GoodInputs.size() == 2) {
9204 // If the low inputs are spread across two dwords, pack them into
9206 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9207 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9208 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9209 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9211 // Otherwise pin the good inputs.
9212 for (int GoodInput : GoodInputs)
9213 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9216 if (BadInputs.size() == 2) {
9217 // If we have two bad inputs then there may be either one or two good
9218 // inputs fixed in place. Find a fixed input, and then find the *other*
9219 // two adjacent indices by using modular arithmetic.
9221 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9222 [](int M) { return M >= 0; }) -
9223 std::begin(MoveMask);
9225 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9226 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9227 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9228 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9229 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9230 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9231 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9233 assert(BadInputs.size() == 1 && "All sizes handled");
9234 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9235 std::end(MoveMask), -1) -
9236 std::begin(MoveMask);
9237 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9238 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9242 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9245 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9247 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9250 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9251 // cross-half traffic in the final shuffle.
9253 // Munge the mask to be a single-input mask after the unpack merges the
9257 M = 2 * (M % 4) + (M / 8);
9259 return DAG.getVectorShuffle(
9260 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9261 DL, MVT::v8i16, V1, V2),
9262 DAG.getUNDEF(MVT::v8i16), Mask);
9265 /// \brief Generic lowering of 8-lane i16 shuffles.
9267 /// This handles both single-input shuffles and combined shuffle/blends with
9268 /// two inputs. The single input shuffles are immediately delegated to
9269 /// a dedicated lowering routine.
9271 /// The blends are lowered in one of three fundamental ways. If there are few
9272 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9273 /// of the input is significantly cheaper when lowered as an interleaving of
9274 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9275 /// halves of the inputs separately (making them have relatively few inputs)
9276 /// and then concatenate them.
9277 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9278 const X86Subtarget *Subtarget,
9279 SelectionDAG &DAG) {
9281 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9282 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9283 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9284 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9285 ArrayRef<int> OrigMask = SVOp->getMask();
9286 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9287 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9288 MutableArrayRef<int> Mask(MaskStorage);
9290 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9292 // Whenever we can lower this as a zext, that instruction is strictly faster
9293 // than any alternative.
9294 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9295 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9298 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9299 auto isV2 = [](int M) { return M >= 8; };
9301 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9302 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9304 if (NumV2Inputs == 0)
9305 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9307 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9308 "to be V1-input shuffles.");
9310 // Try to use byte shift instructions.
9311 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9312 DL, MVT::v8i16, V1, V2, Mask, DAG))
9315 // There are special ways we can lower some single-element blends.
9316 if (NumV2Inputs == 1)
9317 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9318 Mask, Subtarget, DAG))
9321 // Use dedicated unpack instructions for masks that match their pattern.
9322 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9323 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9324 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9325 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9327 if (Subtarget->hasSSE41())
9328 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9332 // Try to use byte rotation instructions.
9333 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9334 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9337 if (NumV1Inputs + NumV2Inputs <= 4)
9338 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9340 // Check whether an interleaving lowering is likely to be more efficient.
9341 // This isn't perfect but it is a strong heuristic that tends to work well on
9342 // the kinds of shuffles that show up in practice.
9344 // FIXME: Handle 1x, 2x, and 4x interleaving.
9345 if (shouldLowerAsInterleaving(Mask)) {
9346 // FIXME: Figure out whether we should pack these into the low or high
9349 int EMask[8], OMask[8];
9350 for (int i = 0; i < 4; ++i) {
9351 EMask[i] = Mask[2*i];
9352 OMask[i] = Mask[2*i + 1];
9357 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9358 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9360 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9363 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9364 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9366 for (int i = 0; i < 4; ++i) {
9367 LoBlendMask[i] = Mask[i];
9368 HiBlendMask[i] = Mask[i + 4];
9371 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9372 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9373 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9374 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9376 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9377 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9380 /// \brief Check whether a compaction lowering can be done by dropping even
9381 /// elements and compute how many times even elements must be dropped.
9383 /// This handles shuffles which take every Nth element where N is a power of
9384 /// two. Example shuffle masks:
9386 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9387 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9388 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9389 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9390 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9391 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9393 /// Any of these lanes can of course be undef.
9395 /// This routine only supports N <= 3.
9396 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9399 /// \returns N above, or the number of times even elements must be dropped if
9400 /// there is such a number. Otherwise returns zero.
9401 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9402 // Figure out whether we're looping over two inputs or just one.
9403 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9405 // The modulus for the shuffle vector entries is based on whether this is
9406 // a single input or not.
9407 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9408 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9409 "We should only be called with masks with a power-of-2 size!");
9411 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9413 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9414 // and 2^3 simultaneously. This is because we may have ambiguity with
9415 // partially undef inputs.
9416 bool ViableForN[3] = {true, true, true};
9418 for (int i = 0, e = Mask.size(); i < e; ++i) {
9419 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9424 bool IsAnyViable = false;
9425 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9426 if (ViableForN[j]) {
9429 // The shuffle mask must be equal to (i * 2^N) % M.
9430 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9433 ViableForN[j] = false;
9435 // Early exit if we exhaust the possible powers of two.
9440 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9444 // Return 0 as there is no viable power of two.
9448 /// \brief Generic lowering of v16i8 shuffles.
9450 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9451 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9452 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9453 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9455 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9456 const X86Subtarget *Subtarget,
9457 SelectionDAG &DAG) {
9459 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9460 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9461 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9462 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9463 ArrayRef<int> OrigMask = SVOp->getMask();
9464 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9466 // Try to use byte shift instructions.
9467 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9468 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9471 // Try to use byte rotation instructions.
9472 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9473 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9476 // Try to use a zext lowering.
9477 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9478 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9481 int MaskStorage[16] = {
9482 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9483 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9484 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9485 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9486 MutableArrayRef<int> Mask(MaskStorage);
9487 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9488 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9491 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9493 // For single-input shuffles, there are some nicer lowering tricks we can use.
9494 if (NumV2Elements == 0) {
9495 // Check for being able to broadcast a single element.
9496 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9497 Mask, Subtarget, DAG))
9500 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9501 // Notably, this handles splat and partial-splat shuffles more efficiently.
9502 // However, it only makes sense if the pre-duplication shuffle simplifies
9503 // things significantly. Currently, this means we need to be able to
9504 // express the pre-duplication shuffle as an i16 shuffle.
9506 // FIXME: We should check for other patterns which can be widened into an
9507 // i16 shuffle as well.
9508 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9509 for (int i = 0; i < 16; i += 2)
9510 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9515 auto tryToWidenViaDuplication = [&]() -> SDValue {
9516 if (!canWidenViaDuplication(Mask))
9518 SmallVector<int, 4> LoInputs;
9519 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9520 [](int M) { return M >= 0 && M < 8; });
9521 std::sort(LoInputs.begin(), LoInputs.end());
9522 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9524 SmallVector<int, 4> HiInputs;
9525 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9526 [](int M) { return M >= 8; });
9527 std::sort(HiInputs.begin(), HiInputs.end());
9528 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9531 bool TargetLo = LoInputs.size() >= HiInputs.size();
9532 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9533 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9535 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9536 SmallDenseMap<int, int, 8> LaneMap;
9537 for (int I : InPlaceInputs) {
9538 PreDupI16Shuffle[I/2] = I/2;
9541 int j = TargetLo ? 0 : 4, je = j + 4;
9542 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9543 // Check if j is already a shuffle of this input. This happens when
9544 // there are two adjacent bytes after we move the low one.
9545 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9546 // If we haven't yet mapped the input, search for a slot into which
9548 while (j < je && PreDupI16Shuffle[j] != -1)
9552 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9555 // Map this input with the i16 shuffle.
9556 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9559 // Update the lane map based on the mapping we ended up with.
9560 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9563 ISD::BITCAST, DL, MVT::v16i8,
9564 DAG.getVectorShuffle(MVT::v8i16, DL,
9565 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9566 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9568 // Unpack the bytes to form the i16s that will be shuffled into place.
9569 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9570 MVT::v16i8, V1, V1);
9572 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9573 for (int i = 0; i < 16; ++i)
9574 if (Mask[i] != -1) {
9575 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9576 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9577 if (PostDupI16Shuffle[i / 2] == -1)
9578 PostDupI16Shuffle[i / 2] = MappedMask;
9580 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9581 "Conflicting entrties in the original shuffle!");
9584 ISD::BITCAST, DL, MVT::v16i8,
9585 DAG.getVectorShuffle(MVT::v8i16, DL,
9586 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9587 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9589 if (SDValue V = tryToWidenViaDuplication())
9593 // Check whether an interleaving lowering is likely to be more efficient.
9594 // This isn't perfect but it is a strong heuristic that tends to work well on
9595 // the kinds of shuffles that show up in practice.
9597 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9598 if (shouldLowerAsInterleaving(Mask)) {
9599 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9600 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9602 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9603 return (M >= 8 && M < 16) || M >= 24;
9605 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9606 -1, -1, -1, -1, -1, -1, -1, -1};
9607 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9608 -1, -1, -1, -1, -1, -1, -1, -1};
9609 bool UnpackLo = NumLoHalf >= NumHiHalf;
9610 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9611 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9612 for (int i = 0; i < 8; ++i) {
9613 TargetEMask[i] = Mask[2 * i];
9614 TargetOMask[i] = Mask[2 * i + 1];
9617 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9618 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9620 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9621 MVT::v16i8, Evens, Odds);
9624 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9625 // with PSHUFB. It is important to do this before we attempt to generate any
9626 // blends but after all of the single-input lowerings. If the single input
9627 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9628 // want to preserve that and we can DAG combine any longer sequences into
9629 // a PSHUFB in the end. But once we start blending from multiple inputs,
9630 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9631 // and there are *very* few patterns that would actually be faster than the
9632 // PSHUFB approach because of its ability to zero lanes.
9634 // FIXME: The only exceptions to the above are blends which are exact
9635 // interleavings with direct instructions supporting them. We currently don't
9636 // handle those well here.
9637 if (Subtarget->hasSSSE3()) {
9640 bool V1InUse = false;
9641 bool V2InUse = false;
9642 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9644 for (int i = 0; i < 16; ++i) {
9645 if (Mask[i] == -1) {
9646 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9648 const int ZeroMask = 0x80;
9649 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9650 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9652 V1Idx = V2Idx = ZeroMask;
9653 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9654 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9655 V1InUse |= (ZeroMask != V1Idx);
9656 V2InUse |= (ZeroMask != V2Idx);
9661 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9662 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9664 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9665 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9667 // If we need shuffled inputs from both, blend the two.
9668 if (V1InUse && V2InUse)
9669 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9671 return V1; // Single inputs are easy.
9673 return V2; // Single inputs are easy.
9674 // Shuffling to a zeroable vector.
9675 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9678 // There are special ways we can lower some single-element blends.
9679 if (NumV2Elements == 1)
9680 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9681 Mask, Subtarget, DAG))
9684 // Check whether a compaction lowering can be done. This handles shuffles
9685 // which take every Nth element for some even N. See the helper function for
9688 // We special case these as they can be particularly efficiently handled with
9689 // the PACKUSB instruction on x86 and they show up in common patterns of
9690 // rearranging bytes to truncate wide elements.
9691 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9692 // NumEvenDrops is the power of two stride of the elements. Another way of
9693 // thinking about it is that we need to drop the even elements this many
9694 // times to get the original input.
9695 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9697 // First we need to zero all the dropped bytes.
9698 assert(NumEvenDrops <= 3 &&
9699 "No support for dropping even elements more than 3 times.");
9700 // We use the mask type to pick which bytes are preserved based on how many
9701 // elements are dropped.
9702 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9703 SDValue ByteClearMask =
9704 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9705 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9706 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9708 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9710 // Now pack things back together.
9711 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9712 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9713 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9714 for (int i = 1; i < NumEvenDrops; ++i) {
9715 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9716 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9722 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9723 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9724 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9725 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9727 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9728 MutableArrayRef<int> V1HalfBlendMask,
9729 MutableArrayRef<int> V2HalfBlendMask) {
9730 for (int i = 0; i < 8; ++i)
9731 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9732 V1HalfBlendMask[i] = HalfMask[i];
9734 } else if (HalfMask[i] >= 16) {
9735 V2HalfBlendMask[i] = HalfMask[i] - 16;
9736 HalfMask[i] = i + 8;
9739 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9740 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9742 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9744 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9745 MutableArrayRef<int> HiBlendMask) {
9747 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9748 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9750 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
9751 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9752 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
9753 [](int M) { return M >= 0 && M % 2 == 1; })) {
9754 // Use a mask to drop the high bytes.
9755 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
9756 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
9757 DAG.getConstant(0x00FF, MVT::v8i16));
9759 // This will be a single vector shuffle instead of a blend so nuke V2.
9760 V2 = DAG.getUNDEF(MVT::v8i16);
9762 // Squash the masks to point directly into V1.
9763 for (int &M : LoBlendMask)
9766 for (int &M : HiBlendMask)
9770 // Otherwise just unpack the low half of V into V1 and the high half into
9771 // V2 so that we can blend them as i16s.
9772 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9773 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9774 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9775 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9778 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9779 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9780 return std::make_pair(BlendedLo, BlendedHi);
9782 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
9783 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
9784 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
9786 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
9787 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
9789 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9792 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9794 /// This routine breaks down the specific type of 128-bit shuffle and
9795 /// dispatches to the lowering routines accordingly.
9796 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9797 MVT VT, const X86Subtarget *Subtarget,
9798 SelectionDAG &DAG) {
9799 switch (VT.SimpleTy) {
9801 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9803 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9805 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9807 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9809 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9811 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9814 llvm_unreachable("Unimplemented!");
9818 /// \brief Helper function to test whether a shuffle mask could be
9819 /// simplified by widening the elements being shuffled.
9821 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
9822 /// leaves it in an unspecified state.
9824 /// NOTE: This must handle normal vector shuffle masks and *target* vector
9825 /// shuffle masks. The latter have the special property of a '-2' representing
9826 /// a zero-ed lane of a vector.
9827 static bool canWidenShuffleElements(ArrayRef<int> Mask,
9828 SmallVectorImpl<int> &WidenedMask) {
9829 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
9830 // If both elements are undef, its trivial.
9831 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
9832 WidenedMask.push_back(SM_SentinelUndef);
9836 // Check for an undef mask and a mask value properly aligned to fit with
9837 // a pair of values. If we find such a case, use the non-undef mask's value.
9838 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
9839 WidenedMask.push_back(Mask[i + 1] / 2);
9842 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
9843 WidenedMask.push_back(Mask[i] / 2);
9847 // When zeroing, we need to spread the zeroing across both lanes to widen.
9848 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
9849 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
9850 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
9851 WidenedMask.push_back(SM_SentinelZero);
9857 // Finally check if the two mask values are adjacent and aligned with
9859 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
9860 WidenedMask.push_back(Mask[i] / 2);
9864 // Otherwise we can't safely widen the elements used in this shuffle.
9867 assert(WidenedMask.size() == Mask.size() / 2 &&
9868 "Incorrect size of mask after widening the elements!");
9873 /// \brief Generic routine to split ector shuffle into half-sized shuffles.
9875 /// This routine just extracts two subvectors, shuffles them independently, and
9876 /// then concatenates them back together. This should work effectively with all
9877 /// AVX vector shuffle types.
9878 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
9879 SDValue V2, ArrayRef<int> Mask,
9880 SelectionDAG &DAG) {
9881 assert(VT.getSizeInBits() >= 256 &&
9882 "Only for 256-bit or wider vector shuffles!");
9883 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
9884 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
9886 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
9887 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
9889 int NumElements = VT.getVectorNumElements();
9890 int SplitNumElements = NumElements / 2;
9891 MVT ScalarVT = VT.getScalarType();
9892 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
9894 SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9895 DAG.getIntPtrConstant(0));
9896 SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9897 DAG.getIntPtrConstant(SplitNumElements));
9898 SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9899 DAG.getIntPtrConstant(0));
9900 SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9901 DAG.getIntPtrConstant(SplitNumElements));
9903 // Now create two 4-way blends of these half-width vectors.
9904 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
9905 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
9906 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
9907 for (int i = 0; i < SplitNumElements; ++i) {
9908 int M = HalfMask[i];
9909 if (M >= NumElements) {
9910 if (M >= NumElements + SplitNumElements)
9914 V2BlendMask.push_back(M - NumElements);
9915 V1BlendMask.push_back(-1);
9916 BlendMask.push_back(SplitNumElements + i);
9917 } else if (M >= 0) {
9918 if (M >= SplitNumElements)
9922 V2BlendMask.push_back(-1);
9923 V1BlendMask.push_back(M);
9924 BlendMask.push_back(i);
9926 V2BlendMask.push_back(-1);
9927 V1BlendMask.push_back(-1);
9928 BlendMask.push_back(-1);
9932 // Because the lowering happens after all combining takes place, we need to
9933 // manually combine these blend masks as much as possible so that we create
9934 // a minimal number of high-level vector shuffle nodes.
9936 // First try just blending the halves of V1 or V2.
9937 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
9938 return DAG.getUNDEF(SplitVT);
9939 if (!UseLoV2 && !UseHiV2)
9940 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9941 if (!UseLoV1 && !UseHiV1)
9942 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
9944 SDValue V1Blend, V2Blend;
9945 if (UseLoV1 && UseHiV1) {
9947 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9949 // We only use half of V1 so map the usage down into the final blend mask.
9950 V1Blend = UseLoV1 ? LoV1 : HiV1;
9951 for (int i = 0; i < SplitNumElements; ++i)
9952 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
9953 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
9955 if (UseLoV2 && UseHiV2) {
9957 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
9959 // We only use half of V2 so map the usage down into the final blend mask.
9960 V2Blend = UseLoV2 ? LoV2 : HiV2;
9961 for (int i = 0; i < SplitNumElements; ++i)
9962 if (BlendMask[i] >= SplitNumElements)
9963 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
9965 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
9967 SDValue Lo = HalfBlend(LoMask);
9968 SDValue Hi = HalfBlend(HiMask);
9969 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
9972 /// \brief Either split a vector in halves or decompose the shuffles and the
9975 /// This is provided as a good fallback for many lowerings of non-single-input
9976 /// shuffles with more than one 128-bit lane. In those cases, we want to select
9977 /// between splitting the shuffle into 128-bit components and stitching those
9978 /// back together vs. extracting the single-input shuffles and blending those
9980 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
9981 SDValue V2, ArrayRef<int> Mask,
9982 SelectionDAG &DAG) {
9983 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
9984 "lower single-input shuffles as it "
9985 "could then recurse on itself.");
9986 int Size = Mask.size();
9988 // If this can be modeled as a broadcast of two elements followed by a blend,
9989 // prefer that lowering. This is especially important because broadcasts can
9990 // often fold with memory operands.
9991 auto DoBothBroadcast = [&] {
9992 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
9995 if (V2BroadcastIdx == -1)
9996 V2BroadcastIdx = M - Size;
9997 else if (M - Size != V2BroadcastIdx)
9999 } else if (M >= 0) {
10000 if (V1BroadcastIdx == -1)
10001 V1BroadcastIdx = M;
10002 else if (M != V1BroadcastIdx)
10007 if (DoBothBroadcast())
10008 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10011 // If the inputs all stem from a single 128-bit lane of each input, then we
10012 // split them rather than blending because the split will decompose to
10013 // unusually few instructions.
10014 int LaneCount = VT.getSizeInBits() / 128;
10015 int LaneSize = Size / LaneCount;
10016 SmallBitVector LaneInputs[2];
10017 LaneInputs[0].resize(LaneCount, false);
10018 LaneInputs[1].resize(LaneCount, false);
10019 for (int i = 0; i < Size; ++i)
10021 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10022 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10023 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10025 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10026 // that the decomposed single-input shuffles don't end up here.
10027 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10030 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10031 /// a permutation and blend of those lanes.
10033 /// This essentially blends the out-of-lane inputs to each lane into the lane
10034 /// from a permuted copy of the vector. This lowering strategy results in four
10035 /// instructions in the worst case for a single-input cross lane shuffle which
10036 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10037 /// of. Special cases for each particular shuffle pattern should be handled
10038 /// prior to trying this lowering.
10039 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10040 SDValue V1, SDValue V2,
10041 ArrayRef<int> Mask,
10042 SelectionDAG &DAG) {
10043 // FIXME: This should probably be generalized for 512-bit vectors as well.
10044 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10045 int LaneSize = Mask.size() / 2;
10047 // If there are only inputs from one 128-bit lane, splitting will in fact be
10048 // less expensive. The flags track wether the given lane contains an element
10049 // that crosses to another lane.
10050 bool LaneCrossing[2] = {false, false};
10051 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10052 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10053 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10054 if (!LaneCrossing[0] || !LaneCrossing[1])
10055 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10057 if (isSingleInputShuffleMask(Mask)) {
10058 SmallVector<int, 32> FlippedBlendMask;
10059 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10060 FlippedBlendMask.push_back(
10061 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10063 : Mask[i] % LaneSize +
10064 (i / LaneSize) * LaneSize + Size));
10066 // Flip the vector, and blend the results which should now be in-lane. The
10067 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10068 // 5 for the high source. The value 3 selects the high half of source 2 and
10069 // the value 2 selects the low half of source 2. We only use source 2 to
10070 // allow folding it into a memory operand.
10071 unsigned PERMMask = 3 | 2 << 4;
10072 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10073 V1, DAG.getConstant(PERMMask, MVT::i8));
10074 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10077 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10078 // will be handled by the above logic and a blend of the results, much like
10079 // other patterns in AVX.
10080 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10083 /// \brief Handle lowering 2-lane 128-bit shuffles.
10084 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10085 SDValue V2, ArrayRef<int> Mask,
10086 const X86Subtarget *Subtarget,
10087 SelectionDAG &DAG) {
10088 // Blends are faster and handle all the non-lane-crossing cases.
10089 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10093 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10094 VT.getVectorNumElements() / 2);
10095 // Check for patterns which can be matched with a single insert of a 128-bit
10097 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10098 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10099 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10100 DAG.getIntPtrConstant(0));
10101 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10102 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10103 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10105 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10106 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10107 DAG.getIntPtrConstant(0));
10108 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10109 DAG.getIntPtrConstant(2));
10110 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10113 // Otherwise form a 128-bit permutation.
10114 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10115 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10116 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10117 DAG.getConstant(PermMask, MVT::i8));
10120 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10121 /// shuffling each lane.
10123 /// This will only succeed when the result of fixing the 128-bit lanes results
10124 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10125 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10126 /// the lane crosses early and then use simpler shuffles within each lane.
10128 /// FIXME: It might be worthwhile at some point to support this without
10129 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10130 /// in x86 only floating point has interesting non-repeating shuffles, and even
10131 /// those are still *marginally* more expensive.
10132 static SDValue lowerVectorShuffleByMerging128BitLanes(
10133 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10134 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10135 assert(!isSingleInputShuffleMask(Mask) &&
10136 "This is only useful with multiple inputs.");
10138 int Size = Mask.size();
10139 int LaneSize = 128 / VT.getScalarSizeInBits();
10140 int NumLanes = Size / LaneSize;
10141 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10143 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10144 // check whether the in-128-bit lane shuffles share a repeating pattern.
10145 SmallVector<int, 4> Lanes;
10146 Lanes.resize(NumLanes, -1);
10147 SmallVector<int, 4> InLaneMask;
10148 InLaneMask.resize(LaneSize, -1);
10149 for (int i = 0; i < Size; ++i) {
10153 int j = i / LaneSize;
10155 if (Lanes[j] < 0) {
10156 // First entry we've seen for this lane.
10157 Lanes[j] = Mask[i] / LaneSize;
10158 } else if (Lanes[j] != Mask[i] / LaneSize) {
10159 // This doesn't match the lane selected previously!
10163 // Check that within each lane we have a consistent shuffle mask.
10164 int k = i % LaneSize;
10165 if (InLaneMask[k] < 0) {
10166 InLaneMask[k] = Mask[i] % LaneSize;
10167 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10168 // This doesn't fit a repeating in-lane mask.
10173 // First shuffle the lanes into place.
10174 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10175 VT.getSizeInBits() / 64);
10176 SmallVector<int, 8> LaneMask;
10177 LaneMask.resize(NumLanes * 2, -1);
10178 for (int i = 0; i < NumLanes; ++i)
10179 if (Lanes[i] >= 0) {
10180 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10181 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10184 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10185 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10186 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10188 // Cast it back to the type we actually want.
10189 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10191 // Now do a simple shuffle that isn't lane crossing.
10192 SmallVector<int, 8> NewMask;
10193 NewMask.resize(Size, -1);
10194 for (int i = 0; i < Size; ++i)
10196 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10197 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10198 "Must not introduce lane crosses at this point!");
10200 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10203 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10206 /// This returns true if the elements from a particular input are already in the
10207 /// slot required by the given mask and require no permutation.
10208 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10209 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10210 int Size = Mask.size();
10211 for (int i = 0; i < Size; ++i)
10212 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10218 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10220 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10221 /// isn't available.
10222 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10223 const X86Subtarget *Subtarget,
10224 SelectionDAG &DAG) {
10226 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10227 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10228 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10229 ArrayRef<int> Mask = SVOp->getMask();
10230 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10232 SmallVector<int, 4> WidenedMask;
10233 if (canWidenShuffleElements(Mask, WidenedMask))
10234 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10237 if (isSingleInputShuffleMask(Mask)) {
10238 // Check for being able to broadcast a single element.
10239 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10240 Mask, Subtarget, DAG))
10243 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10244 // Non-half-crossing single input shuffles can be lowerid with an
10245 // interleaved permutation.
10246 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10247 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10248 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10249 DAG.getConstant(VPERMILPMask, MVT::i8));
10252 // With AVX2 we have direct support for this permutation.
10253 if (Subtarget->hasAVX2())
10254 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10255 getV4X86ShuffleImm8ForMask(Mask, DAG));
10257 // Otherwise, fall back.
10258 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10262 // X86 has dedicated unpack instructions that can handle specific blend
10263 // operations: UNPCKH and UNPCKL.
10264 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10265 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10266 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10267 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10269 // If we have a single input to the zero element, insert that into V1 if we
10270 // can do so cheaply.
10271 int NumV2Elements =
10272 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10273 if (NumV2Elements == 1 && Mask[0] >= 4)
10274 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10275 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10278 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10282 // Check if the blend happens to exactly fit that of SHUFPD.
10283 if ((Mask[0] == -1 || Mask[0] < 2) &&
10284 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10285 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10286 (Mask[3] == -1 || Mask[3] >= 6)) {
10287 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10288 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10289 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10290 DAG.getConstant(SHUFPDMask, MVT::i8));
10292 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10293 (Mask[1] == -1 || Mask[1] < 2) &&
10294 (Mask[2] == -1 || Mask[2] >= 6) &&
10295 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10296 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10297 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10298 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10299 DAG.getConstant(SHUFPDMask, MVT::i8));
10302 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10303 // shuffle. However, if we have AVX2 and either inputs are already in place,
10304 // we will be able to shuffle even across lanes the other input in a single
10305 // instruction so skip this pattern.
10306 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10307 isShuffleMaskInputInPlace(1, Mask))))
10308 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10309 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10312 // If we have AVX2 then we always want to lower with a blend because an v4 we
10313 // can fully permute the elements.
10314 if (Subtarget->hasAVX2())
10315 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10318 // Otherwise fall back on generic lowering.
10319 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10322 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10324 /// This routine is only called when we have AVX2 and thus a reasonable
10325 /// instruction set for v4i64 shuffling..
10326 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10327 const X86Subtarget *Subtarget,
10328 SelectionDAG &DAG) {
10330 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10331 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10332 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10333 ArrayRef<int> Mask = SVOp->getMask();
10334 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10335 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10337 SmallVector<int, 4> WidenedMask;
10338 if (canWidenShuffleElements(Mask, WidenedMask))
10339 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10342 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10346 // Check for being able to broadcast a single element.
10347 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10348 Mask, Subtarget, DAG))
10351 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10352 // use lower latency instructions that will operate on both 128-bit lanes.
10353 SmallVector<int, 2> RepeatedMask;
10354 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10355 if (isSingleInputShuffleMask(Mask)) {
10356 int PSHUFDMask[] = {-1, -1, -1, -1};
10357 for (int i = 0; i < 2; ++i)
10358 if (RepeatedMask[i] >= 0) {
10359 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10360 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10362 return DAG.getNode(
10363 ISD::BITCAST, DL, MVT::v4i64,
10364 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10365 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10366 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10369 // Use dedicated unpack instructions for masks that match their pattern.
10370 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10371 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10372 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10373 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10376 // AVX2 provides a direct instruction for permuting a single input across
10378 if (isSingleInputShuffleMask(Mask))
10379 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10380 getV4X86ShuffleImm8ForMask(Mask, DAG));
10382 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10383 // shuffle. However, if we have AVX2 and either inputs are already in place,
10384 // we will be able to shuffle even across lanes the other input in a single
10385 // instruction so skip this pattern.
10386 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10387 isShuffleMaskInputInPlace(1, Mask))))
10388 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10389 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10392 // Otherwise fall back on generic blend lowering.
10393 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10397 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10399 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10400 /// isn't available.
10401 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10402 const X86Subtarget *Subtarget,
10403 SelectionDAG &DAG) {
10405 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10406 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10407 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10408 ArrayRef<int> Mask = SVOp->getMask();
10409 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10411 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10415 // Check for being able to broadcast a single element.
10416 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10417 Mask, Subtarget, DAG))
10420 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10421 // options to efficiently lower the shuffle.
10422 SmallVector<int, 4> RepeatedMask;
10423 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10424 assert(RepeatedMask.size() == 4 &&
10425 "Repeated masks must be half the mask width!");
10426 if (isSingleInputShuffleMask(Mask))
10427 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10428 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10430 // Use dedicated unpack instructions for masks that match their pattern.
10431 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10432 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10433 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10434 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10436 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10437 // have already handled any direct blends. We also need to squash the
10438 // repeated mask into a simulated v4f32 mask.
10439 for (int i = 0; i < 4; ++i)
10440 if (RepeatedMask[i] >= 8)
10441 RepeatedMask[i] -= 4;
10442 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10445 // If we have a single input shuffle with different shuffle patterns in the
10446 // two 128-bit lanes use the variable mask to VPERMILPS.
10447 if (isSingleInputShuffleMask(Mask)) {
10448 SDValue VPermMask[8];
10449 for (int i = 0; i < 8; ++i)
10450 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10451 : DAG.getConstant(Mask[i], MVT::i32);
10452 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10453 return DAG.getNode(
10454 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10455 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10457 if (Subtarget->hasAVX2())
10458 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10459 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10460 DAG.getNode(ISD::BUILD_VECTOR, DL,
10461 MVT::v8i32, VPermMask)),
10464 // Otherwise, fall back.
10465 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10469 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10471 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10472 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10475 // If we have AVX2 then we always want to lower with a blend because at v8 we
10476 // can fully permute the elements.
10477 if (Subtarget->hasAVX2())
10478 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10481 // Otherwise fall back on generic lowering.
10482 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10485 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10487 /// This routine is only called when we have AVX2 and thus a reasonable
10488 /// instruction set for v8i32 shuffling..
10489 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10490 const X86Subtarget *Subtarget,
10491 SelectionDAG &DAG) {
10493 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10494 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10495 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10496 ArrayRef<int> Mask = SVOp->getMask();
10497 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10498 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10500 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10504 // Check for being able to broadcast a single element.
10505 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10506 Mask, Subtarget, DAG))
10509 // If the shuffle mask is repeated in each 128-bit lane we can use more
10510 // efficient instructions that mirror the shuffles across the two 128-bit
10512 SmallVector<int, 4> RepeatedMask;
10513 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10514 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10515 if (isSingleInputShuffleMask(Mask))
10516 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10517 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10519 // Use dedicated unpack instructions for masks that match their pattern.
10520 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10521 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10522 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10523 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10526 // If the shuffle patterns aren't repeated but it is a single input, directly
10527 // generate a cross-lane VPERMD instruction.
10528 if (isSingleInputShuffleMask(Mask)) {
10529 SDValue VPermMask[8];
10530 for (int i = 0; i < 8; ++i)
10531 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10532 : DAG.getConstant(Mask[i], MVT::i32);
10533 return DAG.getNode(
10534 X86ISD::VPERMV, DL, MVT::v8i32,
10535 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10538 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10540 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10541 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10544 // Otherwise fall back on generic blend lowering.
10545 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10549 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10551 /// This routine is only called when we have AVX2 and thus a reasonable
10552 /// instruction set for v16i16 shuffling..
10553 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10554 const X86Subtarget *Subtarget,
10555 SelectionDAG &DAG) {
10557 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10558 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10559 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10560 ArrayRef<int> Mask = SVOp->getMask();
10561 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10562 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10564 // Check for being able to broadcast a single element.
10565 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10566 Mask, Subtarget, DAG))
10569 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10573 // Use dedicated unpack instructions for masks that match their pattern.
10574 if (isShuffleEquivalent(Mask,
10575 // First 128-bit lane:
10576 0, 16, 1, 17, 2, 18, 3, 19,
10577 // Second 128-bit lane:
10578 8, 24, 9, 25, 10, 26, 11, 27))
10579 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10580 if (isShuffleEquivalent(Mask,
10581 // First 128-bit lane:
10582 4, 20, 5, 21, 6, 22, 7, 23,
10583 // Second 128-bit lane:
10584 12, 28, 13, 29, 14, 30, 15, 31))
10585 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10587 if (isSingleInputShuffleMask(Mask)) {
10588 // There are no generalized cross-lane shuffle operations available on i16
10590 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10591 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10594 SDValue PSHUFBMask[32];
10595 for (int i = 0; i < 16; ++i) {
10596 if (Mask[i] == -1) {
10597 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10601 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10602 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10603 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10604 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10606 return DAG.getNode(
10607 ISD::BITCAST, DL, MVT::v16i16,
10609 X86ISD::PSHUFB, DL, MVT::v32i8,
10610 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10611 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10614 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10616 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10617 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10620 // Otherwise fall back on generic lowering.
10621 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10624 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10626 /// This routine is only called when we have AVX2 and thus a reasonable
10627 /// instruction set for v32i8 shuffling..
10628 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10629 const X86Subtarget *Subtarget,
10630 SelectionDAG &DAG) {
10632 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10633 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10634 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10635 ArrayRef<int> Mask = SVOp->getMask();
10636 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10637 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10639 // Check for being able to broadcast a single element.
10640 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10641 Mask, Subtarget, DAG))
10644 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10648 // Use dedicated unpack instructions for masks that match their pattern.
10649 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10651 if (isShuffleEquivalent(
10653 // First 128-bit lane:
10654 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10655 // Second 128-bit lane:
10656 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10657 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10658 if (isShuffleEquivalent(
10660 // First 128-bit lane:
10661 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10662 // Second 128-bit lane:
10663 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10664 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10666 if (isSingleInputShuffleMask(Mask)) {
10667 // There are no generalized cross-lane shuffle operations available on i8
10669 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10670 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10673 SDValue PSHUFBMask[32];
10674 for (int i = 0; i < 32; ++i)
10677 ? DAG.getUNDEF(MVT::i8)
10678 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
10680 return DAG.getNode(
10681 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10682 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10685 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10687 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10688 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10691 // Otherwise fall back on generic lowering.
10692 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
10695 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
10697 /// This routine either breaks down the specific type of a 256-bit x86 vector
10698 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
10699 /// together based on the available instructions.
10700 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10701 MVT VT, const X86Subtarget *Subtarget,
10702 SelectionDAG &DAG) {
10704 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10705 ArrayRef<int> Mask = SVOp->getMask();
10707 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
10708 // check for those subtargets here and avoid much of the subtarget querying in
10709 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
10710 // ability to manipulate a 256-bit vector with integer types. Since we'll use
10711 // floating point types there eventually, just immediately cast everything to
10712 // a float and operate entirely in that domain.
10713 if (VT.isInteger() && !Subtarget->hasAVX2()) {
10714 int ElementBits = VT.getScalarSizeInBits();
10715 if (ElementBits < 32)
10716 // No floating point type available, decompose into 128-bit vectors.
10717 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10719 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
10720 VT.getVectorNumElements());
10721 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
10722 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
10723 return DAG.getNode(ISD::BITCAST, DL, VT,
10724 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
10727 switch (VT.SimpleTy) {
10729 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10731 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10733 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10735 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10737 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10739 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10742 llvm_unreachable("Not a valid 256-bit x86 vector type!");
10746 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
10747 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10748 const X86Subtarget *Subtarget,
10749 SelectionDAG &DAG) {
10751 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10752 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10753 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10754 ArrayRef<int> Mask = SVOp->getMask();
10755 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10757 // X86 has dedicated unpack instructions that can handle specific blend
10758 // operations: UNPCKH and UNPCKL.
10759 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10760 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
10761 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10762 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
10764 // FIXME: Implement direct support for this type!
10765 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
10768 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
10769 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10770 const X86Subtarget *Subtarget,
10771 SelectionDAG &DAG) {
10773 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10774 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10775 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10776 ArrayRef<int> Mask = SVOp->getMask();
10777 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10779 // Use dedicated unpack instructions for masks that match their pattern.
10780 if (isShuffleEquivalent(Mask,
10781 0, 16, 1, 17, 4, 20, 5, 21,
10782 8, 24, 9, 25, 12, 28, 13, 29))
10783 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
10784 if (isShuffleEquivalent(Mask,
10785 2, 18, 3, 19, 6, 22, 7, 23,
10786 10, 26, 11, 27, 14, 30, 15, 31))
10787 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
10789 // FIXME: Implement direct support for this type!
10790 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
10793 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
10794 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10795 const X86Subtarget *Subtarget,
10796 SelectionDAG &DAG) {
10798 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10799 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10800 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10801 ArrayRef<int> Mask = SVOp->getMask();
10802 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10804 // X86 has dedicated unpack instructions that can handle specific blend
10805 // operations: UNPCKH and UNPCKL.
10806 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10807 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
10808 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10809 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
10811 // FIXME: Implement direct support for this type!
10812 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
10815 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
10816 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10817 const X86Subtarget *Subtarget,
10818 SelectionDAG &DAG) {
10820 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10821 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10822 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10823 ArrayRef<int> Mask = SVOp->getMask();
10824 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10826 // Use dedicated unpack instructions for masks that match their pattern.
10827 if (isShuffleEquivalent(Mask,
10828 0, 16, 1, 17, 4, 20, 5, 21,
10829 8, 24, 9, 25, 12, 28, 13, 29))
10830 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
10831 if (isShuffleEquivalent(Mask,
10832 2, 18, 3, 19, 6, 22, 7, 23,
10833 10, 26, 11, 27, 14, 30, 15, 31))
10834 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
10836 // FIXME: Implement direct support for this type!
10837 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
10840 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
10841 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10842 const X86Subtarget *Subtarget,
10843 SelectionDAG &DAG) {
10845 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10846 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10847 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10848 ArrayRef<int> Mask = SVOp->getMask();
10849 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10850 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
10852 // FIXME: Implement direct support for this type!
10853 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
10856 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
10857 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10858 const X86Subtarget *Subtarget,
10859 SelectionDAG &DAG) {
10861 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10862 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10863 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10864 ArrayRef<int> Mask = SVOp->getMask();
10865 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
10866 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
10868 // FIXME: Implement direct support for this type!
10869 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
10872 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
10874 /// This routine either breaks down the specific type of a 512-bit x86 vector
10875 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
10876 /// together based on the available instructions.
10877 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10878 MVT VT, const X86Subtarget *Subtarget,
10879 SelectionDAG &DAG) {
10881 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10882 ArrayRef<int> Mask = SVOp->getMask();
10883 assert(Subtarget->hasAVX512() &&
10884 "Cannot lower 512-bit vectors w/ basic ISA!");
10886 // Check for being able to broadcast a single element.
10887 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
10888 Mask, Subtarget, DAG))
10891 // Dispatch to each element type for lowering. If we don't have supprot for
10892 // specific element type shuffles at 512 bits, immediately split them and
10893 // lower them. Each lowering routine of a given type is allowed to assume that
10894 // the requisite ISA extensions for that element type are available.
10895 switch (VT.SimpleTy) {
10897 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10899 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10901 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10903 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10905 if (Subtarget->hasBWI())
10906 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10909 if (Subtarget->hasBWI())
10910 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10914 llvm_unreachable("Not a valid 512-bit x86 vector type!");
10917 // Otherwise fall back on splitting.
10918 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10921 /// \brief Top-level lowering for x86 vector shuffles.
10923 /// This handles decomposition, canonicalization, and lowering of all x86
10924 /// vector shuffles. Most of the specific lowering strategies are encapsulated
10925 /// above in helper routines. The canonicalization attempts to widen shuffles
10926 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
10927 /// s.t. only one of the two inputs needs to be tested, etc.
10928 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
10929 SelectionDAG &DAG) {
10930 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10931 ArrayRef<int> Mask = SVOp->getMask();
10932 SDValue V1 = Op.getOperand(0);
10933 SDValue V2 = Op.getOperand(1);
10934 MVT VT = Op.getSimpleValueType();
10935 int NumElements = VT.getVectorNumElements();
10938 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
10940 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
10941 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
10942 if (V1IsUndef && V2IsUndef)
10943 return DAG.getUNDEF(VT);
10945 // When we create a shuffle node we put the UNDEF node to second operand,
10946 // but in some cases the first operand may be transformed to UNDEF.
10947 // In this case we should just commute the node.
10949 return DAG.getCommutedVectorShuffle(*SVOp);
10951 // Check for non-undef masks pointing at an undef vector and make the masks
10952 // undef as well. This makes it easier to match the shuffle based solely on
10956 if (M >= NumElements) {
10957 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
10958 for (int &M : NewMask)
10959 if (M >= NumElements)
10961 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
10964 // Try to collapse shuffles into using a vector type with fewer elements but
10965 // wider element types. We cap this to not form integers or floating point
10966 // elements wider than 64 bits, but it might be interesting to form i128
10967 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
10968 SmallVector<int, 16> WidenedMask;
10969 if (VT.getScalarSizeInBits() < 64 &&
10970 canWidenShuffleElements(Mask, WidenedMask)) {
10971 MVT NewEltVT = VT.isFloatingPoint()
10972 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
10973 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
10974 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
10975 // Make sure that the new vector type is legal. For example, v2f64 isn't
10977 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
10978 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
10979 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
10980 return DAG.getNode(ISD::BITCAST, dl, VT,
10981 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
10985 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
10986 for (int M : SVOp->getMask())
10988 ++NumUndefElements;
10989 else if (M < NumElements)
10994 // Commute the shuffle as needed such that more elements come from V1 than
10995 // V2. This allows us to match the shuffle pattern strictly on how many
10996 // elements come from V1 without handling the symmetric cases.
10997 if (NumV2Elements > NumV1Elements)
10998 return DAG.getCommutedVectorShuffle(*SVOp);
11000 // When the number of V1 and V2 elements are the same, try to minimize the
11001 // number of uses of V2 in the low half of the vector. When that is tied,
11002 // ensure that the sum of indices for V1 is equal to or lower than the sum
11003 // indices for V2. When those are equal, try to ensure that the number of odd
11004 // indices for V1 is lower than the number of odd indices for V2.
11005 if (NumV1Elements == NumV2Elements) {
11006 int LowV1Elements = 0, LowV2Elements = 0;
11007 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11008 if (M >= NumElements)
11012 if (LowV2Elements > LowV1Elements) {
11013 return DAG.getCommutedVectorShuffle(*SVOp);
11014 } else if (LowV2Elements == LowV1Elements) {
11015 int SumV1Indices = 0, SumV2Indices = 0;
11016 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11017 if (SVOp->getMask()[i] >= NumElements)
11019 else if (SVOp->getMask()[i] >= 0)
11021 if (SumV2Indices < SumV1Indices) {
11022 return DAG.getCommutedVectorShuffle(*SVOp);
11023 } else if (SumV2Indices == SumV1Indices) {
11024 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11025 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11026 if (SVOp->getMask()[i] >= NumElements)
11027 NumV2OddIndices += i % 2;
11028 else if (SVOp->getMask()[i] >= 0)
11029 NumV1OddIndices += i % 2;
11030 if (NumV2OddIndices < NumV1OddIndices)
11031 return DAG.getCommutedVectorShuffle(*SVOp);
11036 // For each vector width, delegate to a specialized lowering routine.
11037 if (VT.getSizeInBits() == 128)
11038 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11040 if (VT.getSizeInBits() == 256)
11041 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11043 // Force AVX-512 vectors to be scalarized for now.
11044 // FIXME: Implement AVX-512 support!
11045 if (VT.getSizeInBits() == 512)
11046 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11048 llvm_unreachable("Unimplemented!");
11052 //===----------------------------------------------------------------------===//
11053 // Legacy vector shuffle lowering
11055 // This code is the legacy code handling vector shuffles until the above
11056 // replaces its functionality and performance.
11057 //===----------------------------------------------------------------------===//
11059 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11060 bool hasInt256, unsigned *MaskOut = nullptr) {
11061 MVT EltVT = VT.getVectorElementType();
11063 // There is no blend with immediate in AVX-512.
11064 if (VT.is512BitVector())
11067 if (!hasSSE41 || EltVT == MVT::i8)
11069 if (!hasInt256 && VT == MVT::v16i16)
11072 unsigned MaskValue = 0;
11073 unsigned NumElems = VT.getVectorNumElements();
11074 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11075 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11076 unsigned NumElemsInLane = NumElems / NumLanes;
11078 // Blend for v16i16 should be symetric for the both lanes.
11079 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11081 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11082 int EltIdx = MaskVals[i];
11084 if ((EltIdx < 0 || EltIdx == (int)i) &&
11085 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11088 if (((unsigned)EltIdx == (i + NumElems)) &&
11089 (SndLaneEltIdx < 0 ||
11090 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11091 MaskValue |= (1 << i);
11097 *MaskOut = MaskValue;
11101 // Try to lower a shuffle node into a simple blend instruction.
11102 // This function assumes isBlendMask returns true for this
11103 // SuffleVectorSDNode
11104 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11105 unsigned MaskValue,
11106 const X86Subtarget *Subtarget,
11107 SelectionDAG &DAG) {
11108 MVT VT = SVOp->getSimpleValueType(0);
11109 MVT EltVT = VT.getVectorElementType();
11110 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11111 Subtarget->hasInt256() && "Trying to lower a "
11112 "VECTOR_SHUFFLE to a Blend but "
11113 "with the wrong mask"));
11114 SDValue V1 = SVOp->getOperand(0);
11115 SDValue V2 = SVOp->getOperand(1);
11117 unsigned NumElems = VT.getVectorNumElements();
11119 // Convert i32 vectors to floating point if it is not AVX2.
11120 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11122 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11123 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11125 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11126 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11129 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11130 DAG.getConstant(MaskValue, MVT::i32));
11131 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11134 /// In vector type \p VT, return true if the element at index \p InputIdx
11135 /// falls on a different 128-bit lane than \p OutputIdx.
11136 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11137 unsigned OutputIdx) {
11138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11139 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11142 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11143 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11144 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11145 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11147 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11148 SelectionDAG &DAG) {
11149 MVT VT = V1.getSimpleValueType();
11150 assert(VT.is128BitVector() || VT.is256BitVector());
11152 MVT EltVT = VT.getVectorElementType();
11153 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11154 unsigned NumElts = VT.getVectorNumElements();
11156 SmallVector<SDValue, 32> PshufbMask;
11157 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11158 int InputIdx = MaskVals[OutputIdx];
11159 unsigned InputByteIdx;
11161 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11162 InputByteIdx = 0x80;
11164 // Cross lane is not allowed.
11165 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11167 InputByteIdx = InputIdx * EltSizeInBytes;
11168 // Index is an byte offset within the 128-bit lane.
11169 InputByteIdx &= 0xf;
11172 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11173 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11174 if (InputByteIdx != 0x80)
11179 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11181 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11182 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11183 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11186 // v8i16 shuffles - Prefer shuffles in the following order:
11187 // 1. [all] pshuflw, pshufhw, optional move
11188 // 2. [ssse3] 1 x pshufb
11189 // 3. [ssse3] 2 x pshufb + 1 x por
11190 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11192 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11193 SelectionDAG &DAG) {
11194 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11195 SDValue V1 = SVOp->getOperand(0);
11196 SDValue V2 = SVOp->getOperand(1);
11198 SmallVector<int, 8> MaskVals;
11200 // Determine if more than 1 of the words in each of the low and high quadwords
11201 // of the result come from the same quadword of one of the two inputs. Undef
11202 // mask values count as coming from any quadword, for better codegen.
11204 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11205 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11206 unsigned LoQuad[] = { 0, 0, 0, 0 };
11207 unsigned HiQuad[] = { 0, 0, 0, 0 };
11208 // Indices of quads used.
11209 std::bitset<4> InputQuads;
11210 for (unsigned i = 0; i < 8; ++i) {
11211 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11212 int EltIdx = SVOp->getMaskElt(i);
11213 MaskVals.push_back(EltIdx);
11221 ++Quad[EltIdx / 4];
11222 InputQuads.set(EltIdx / 4);
11225 int BestLoQuad = -1;
11226 unsigned MaxQuad = 1;
11227 for (unsigned i = 0; i < 4; ++i) {
11228 if (LoQuad[i] > MaxQuad) {
11230 MaxQuad = LoQuad[i];
11234 int BestHiQuad = -1;
11236 for (unsigned i = 0; i < 4; ++i) {
11237 if (HiQuad[i] > MaxQuad) {
11239 MaxQuad = HiQuad[i];
11243 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11244 // of the two input vectors, shuffle them into one input vector so only a
11245 // single pshufb instruction is necessary. If there are more than 2 input
11246 // quads, disable the next transformation since it does not help SSSE3.
11247 bool V1Used = InputQuads[0] || InputQuads[1];
11248 bool V2Used = InputQuads[2] || InputQuads[3];
11249 if (Subtarget->hasSSSE3()) {
11250 if (InputQuads.count() == 2 && V1Used && V2Used) {
11251 BestLoQuad = InputQuads[0] ? 0 : 1;
11252 BestHiQuad = InputQuads[2] ? 2 : 3;
11254 if (InputQuads.count() > 2) {
11260 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11261 // the shuffle mask. If a quad is scored as -1, that means that it contains
11262 // words from all 4 input quadwords.
11264 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11266 BestLoQuad < 0 ? 0 : BestLoQuad,
11267 BestHiQuad < 0 ? 1 : BestHiQuad
11269 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11270 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11271 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11272 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11274 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11275 // source words for the shuffle, to aid later transformations.
11276 bool AllWordsInNewV = true;
11277 bool InOrder[2] = { true, true };
11278 for (unsigned i = 0; i != 8; ++i) {
11279 int idx = MaskVals[i];
11281 InOrder[i/4] = false;
11282 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11284 AllWordsInNewV = false;
11288 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11289 if (AllWordsInNewV) {
11290 for (int i = 0; i != 8; ++i) {
11291 int idx = MaskVals[i];
11294 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11295 if ((idx != i) && idx < 4)
11297 if ((idx != i) && idx > 3)
11306 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11307 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11308 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11309 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11310 unsigned TargetMask = 0;
11311 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11312 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11313 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11314 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11315 getShufflePSHUFLWImmediate(SVOp);
11316 V1 = NewV.getOperand(0);
11317 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11321 // Promote splats to a larger type which usually leads to more efficient code.
11322 // FIXME: Is this true if pshufb is available?
11323 if (SVOp->isSplat())
11324 return PromoteSplat(SVOp, DAG);
11326 // If we have SSSE3, and all words of the result are from 1 input vector,
11327 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11328 // is present, fall back to case 4.
11329 if (Subtarget->hasSSSE3()) {
11330 SmallVector<SDValue,16> pshufbMask;
11332 // If we have elements from both input vectors, set the high bit of the
11333 // shuffle mask element to zero out elements that come from V2 in the V1
11334 // mask, and elements that come from V1 in the V2 mask, so that the two
11335 // results can be OR'd together.
11336 bool TwoInputs = V1Used && V2Used;
11337 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11339 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11341 // Calculate the shuffle mask for the second input, shuffle it, and
11342 // OR it with the first shuffled input.
11343 CommuteVectorShuffleMask(MaskVals, 8);
11344 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11345 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11346 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11349 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11350 // and update MaskVals with new element order.
11351 std::bitset<8> InOrder;
11352 if (BestLoQuad >= 0) {
11353 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11354 for (int i = 0; i != 4; ++i) {
11355 int idx = MaskVals[i];
11358 } else if ((idx / 4) == BestLoQuad) {
11359 MaskV[i] = idx & 3;
11363 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11366 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11367 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11368 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11369 NewV.getOperand(0),
11370 getShufflePSHUFLWImmediate(SVOp), DAG);
11374 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11375 // and update MaskVals with the new element order.
11376 if (BestHiQuad >= 0) {
11377 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11378 for (unsigned i = 4; i != 8; ++i) {
11379 int idx = MaskVals[i];
11382 } else if ((idx / 4) == BestHiQuad) {
11383 MaskV[i] = (idx & 3) + 4;
11387 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11390 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11391 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11392 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11393 NewV.getOperand(0),
11394 getShufflePSHUFHWImmediate(SVOp), DAG);
11398 // In case BestHi & BestLo were both -1, which means each quadword has a word
11399 // from each of the four input quadwords, calculate the InOrder bitvector now
11400 // before falling through to the insert/extract cleanup.
11401 if (BestLoQuad == -1 && BestHiQuad == -1) {
11403 for (int i = 0; i != 8; ++i)
11404 if (MaskVals[i] < 0 || MaskVals[i] == i)
11408 // The other elements are put in the right place using pextrw and pinsrw.
11409 for (unsigned i = 0; i != 8; ++i) {
11412 int EltIdx = MaskVals[i];
11415 SDValue ExtOp = (EltIdx < 8) ?
11416 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11417 DAG.getIntPtrConstant(EltIdx)) :
11418 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11419 DAG.getIntPtrConstant(EltIdx - 8));
11420 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11421 DAG.getIntPtrConstant(i));
11426 /// \brief v16i16 shuffles
11428 /// FIXME: We only support generation of a single pshufb currently. We can
11429 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11430 /// well (e.g 2 x pshufb + 1 x por).
11432 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11433 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11434 SDValue V1 = SVOp->getOperand(0);
11435 SDValue V2 = SVOp->getOperand(1);
11438 if (V2.getOpcode() != ISD::UNDEF)
11441 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11442 return getPSHUFB(MaskVals, V1, dl, DAG);
11445 // v16i8 shuffles - Prefer shuffles in the following order:
11446 // 1. [ssse3] 1 x pshufb
11447 // 2. [ssse3] 2 x pshufb + 1 x por
11448 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11449 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11450 const X86Subtarget* Subtarget,
11451 SelectionDAG &DAG) {
11452 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11453 SDValue V1 = SVOp->getOperand(0);
11454 SDValue V2 = SVOp->getOperand(1);
11456 ArrayRef<int> MaskVals = SVOp->getMask();
11458 // Promote splats to a larger type which usually leads to more efficient code.
11459 // FIXME: Is this true if pshufb is available?
11460 if (SVOp->isSplat())
11461 return PromoteSplat(SVOp, DAG);
11463 // If we have SSSE3, case 1 is generated when all result bytes come from
11464 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11465 // present, fall back to case 3.
11467 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11468 if (Subtarget->hasSSSE3()) {
11469 SmallVector<SDValue,16> pshufbMask;
11471 // If all result elements are from one input vector, then only translate
11472 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11474 // Otherwise, we have elements from both input vectors, and must zero out
11475 // elements that come from V2 in the first mask, and V1 in the second mask
11476 // so that we can OR them together.
11477 for (unsigned i = 0; i != 16; ++i) {
11478 int EltIdx = MaskVals[i];
11479 if (EltIdx < 0 || EltIdx >= 16)
11481 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11483 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11484 DAG.getNode(ISD::BUILD_VECTOR, dl,
11485 MVT::v16i8, pshufbMask));
11487 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11488 // the 2nd operand if it's undefined or zero.
11489 if (V2.getOpcode() == ISD::UNDEF ||
11490 ISD::isBuildVectorAllZeros(V2.getNode()))
11493 // Calculate the shuffle mask for the second input, shuffle it, and
11494 // OR it with the first shuffled input.
11495 pshufbMask.clear();
11496 for (unsigned i = 0; i != 16; ++i) {
11497 int EltIdx = MaskVals[i];
11498 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11499 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11501 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11502 DAG.getNode(ISD::BUILD_VECTOR, dl,
11503 MVT::v16i8, pshufbMask));
11504 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11507 // No SSSE3 - Calculate in place words and then fix all out of place words
11508 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11509 // the 16 different words that comprise the two doublequadword input vectors.
11510 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11511 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11513 for (int i = 0; i != 8; ++i) {
11514 int Elt0 = MaskVals[i*2];
11515 int Elt1 = MaskVals[i*2+1];
11517 // This word of the result is all undef, skip it.
11518 if (Elt0 < 0 && Elt1 < 0)
11521 // This word of the result is already in the correct place, skip it.
11522 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11525 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11526 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11529 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11530 // using a single extract together, load it and store it.
11531 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11532 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11533 DAG.getIntPtrConstant(Elt1 / 2));
11534 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11535 DAG.getIntPtrConstant(i));
11539 // If Elt1 is defined, extract it from the appropriate source. If the
11540 // source byte is not also odd, shift the extracted word left 8 bits
11541 // otherwise clear the bottom 8 bits if we need to do an or.
11543 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11544 DAG.getIntPtrConstant(Elt1 / 2));
11545 if ((Elt1 & 1) == 0)
11546 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11548 TLI.getShiftAmountTy(InsElt.getValueType())));
11549 else if (Elt0 >= 0)
11550 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11551 DAG.getConstant(0xFF00, MVT::i16));
11553 // If Elt0 is defined, extract it from the appropriate source. If the
11554 // source byte is not also even, shift the extracted word right 8 bits. If
11555 // Elt1 was also defined, OR the extracted values together before
11556 // inserting them in the result.
11558 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11559 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11560 if ((Elt0 & 1) != 0)
11561 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11563 TLI.getShiftAmountTy(InsElt0.getValueType())));
11564 else if (Elt1 >= 0)
11565 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11566 DAG.getConstant(0x00FF, MVT::i16));
11567 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11570 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11571 DAG.getIntPtrConstant(i));
11573 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11576 // v32i8 shuffles - Translate to VPSHUFB if possible.
11578 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11579 const X86Subtarget *Subtarget,
11580 SelectionDAG &DAG) {
11581 MVT VT = SVOp->getSimpleValueType(0);
11582 SDValue V1 = SVOp->getOperand(0);
11583 SDValue V2 = SVOp->getOperand(1);
11585 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11587 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11588 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11589 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11591 // VPSHUFB may be generated if
11592 // (1) one of input vector is undefined or zeroinitializer.
11593 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11594 // And (2) the mask indexes don't cross the 128-bit lane.
11595 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11596 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11599 if (V1IsAllZero && !V2IsAllZero) {
11600 CommuteVectorShuffleMask(MaskVals, 32);
11603 return getPSHUFB(MaskVals, V1, dl, DAG);
11606 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11607 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11608 /// done when every pair / quad of shuffle mask elements point to elements in
11609 /// the right sequence. e.g.
11610 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11612 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11613 SelectionDAG &DAG) {
11614 MVT VT = SVOp->getSimpleValueType(0);
11616 unsigned NumElems = VT.getVectorNumElements();
11619 switch (VT.SimpleTy) {
11620 default: llvm_unreachable("Unexpected!");
11623 return SDValue(SVOp, 0);
11624 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11625 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11626 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11627 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11628 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11629 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11632 SmallVector<int, 8> MaskVec;
11633 for (unsigned i = 0; i != NumElems; i += Scale) {
11635 for (unsigned j = 0; j != Scale; ++j) {
11636 int EltIdx = SVOp->getMaskElt(i+j);
11640 StartIdx = (EltIdx / Scale);
11641 if (EltIdx != (int)(StartIdx*Scale + j))
11644 MaskVec.push_back(StartIdx);
11647 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11648 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11649 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11652 /// getVZextMovL - Return a zero-extending vector move low node.
11654 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11655 SDValue SrcOp, SelectionDAG &DAG,
11656 const X86Subtarget *Subtarget, SDLoc dl) {
11657 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11658 LoadSDNode *LD = nullptr;
11659 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11660 LD = dyn_cast<LoadSDNode>(SrcOp);
11662 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11664 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
11665 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
11666 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
11667 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
11668 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
11670 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
11671 return DAG.getNode(ISD::BITCAST, dl, VT,
11672 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11673 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11675 SrcOp.getOperand(0)
11681 return DAG.getNode(ISD::BITCAST, dl, VT,
11682 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11683 DAG.getNode(ISD::BITCAST, dl,
11687 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
11688 /// which could not be matched by any known target speficic shuffle
11690 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11692 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
11693 if (NewOp.getNode())
11696 MVT VT = SVOp->getSimpleValueType(0);
11698 unsigned NumElems = VT.getVectorNumElements();
11699 unsigned NumLaneElems = NumElems / 2;
11702 MVT EltVT = VT.getVectorElementType();
11703 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
11706 SmallVector<int, 16> Mask;
11707 for (unsigned l = 0; l < 2; ++l) {
11708 // Build a shuffle mask for the output, discovering on the fly which
11709 // input vectors to use as shuffle operands (recorded in InputUsed).
11710 // If building a suitable shuffle vector proves too hard, then bail
11711 // out with UseBuildVector set.
11712 bool UseBuildVector = false;
11713 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
11714 unsigned LaneStart = l * NumLaneElems;
11715 for (unsigned i = 0; i != NumLaneElems; ++i) {
11716 // The mask element. This indexes into the input.
11717 int Idx = SVOp->getMaskElt(i+LaneStart);
11719 // the mask element does not index into any input vector.
11720 Mask.push_back(-1);
11724 // The input vector this mask element indexes into.
11725 int Input = Idx / NumLaneElems;
11727 // Turn the index into an offset from the start of the input vector.
11728 Idx -= Input * NumLaneElems;
11730 // Find or create a shuffle vector operand to hold this input.
11732 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
11733 if (InputUsed[OpNo] == Input)
11734 // This input vector is already an operand.
11736 if (InputUsed[OpNo] < 0) {
11737 // Create a new operand for this input vector.
11738 InputUsed[OpNo] = Input;
11743 if (OpNo >= array_lengthof(InputUsed)) {
11744 // More than two input vectors used! Give up on trying to create a
11745 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
11746 UseBuildVector = true;
11750 // Add the mask index for the new shuffle vector.
11751 Mask.push_back(Idx + OpNo * NumLaneElems);
11754 if (UseBuildVector) {
11755 SmallVector<SDValue, 16> SVOps;
11756 for (unsigned i = 0; i != NumLaneElems; ++i) {
11757 // The mask element. This indexes into the input.
11758 int Idx = SVOp->getMaskElt(i+LaneStart);
11760 SVOps.push_back(DAG.getUNDEF(EltVT));
11764 // The input vector this mask element indexes into.
11765 int Input = Idx / NumElems;
11767 // Turn the index into an offset from the start of the input vector.
11768 Idx -= Input * NumElems;
11770 // Extract the vector element by hand.
11771 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
11772 SVOp->getOperand(Input),
11773 DAG.getIntPtrConstant(Idx)));
11776 // Construct the output using a BUILD_VECTOR.
11777 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
11778 } else if (InputUsed[0] < 0) {
11779 // No input vectors were used! The result is undefined.
11780 Output[l] = DAG.getUNDEF(NVT);
11782 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
11783 (InputUsed[0] % 2) * NumLaneElems,
11785 // If only one input was used, use an undefined vector for the other.
11786 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
11787 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
11788 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
11789 // At least one input vector was used. Create a new shuffle vector.
11790 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
11796 // Concatenate the result back
11797 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
11800 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
11801 /// 4 elements, and match them with several different shuffle types.
11803 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11804 SDValue V1 = SVOp->getOperand(0);
11805 SDValue V2 = SVOp->getOperand(1);
11807 MVT VT = SVOp->getSimpleValueType(0);
11809 assert(VT.is128BitVector() && "Unsupported vector size");
11811 std::pair<int, int> Locs[4];
11812 int Mask1[] = { -1, -1, -1, -1 };
11813 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
11815 unsigned NumHi = 0;
11816 unsigned NumLo = 0;
11817 for (unsigned i = 0; i != 4; ++i) {
11818 int Idx = PermMask[i];
11820 Locs[i] = std::make_pair(-1, -1);
11822 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
11824 Locs[i] = std::make_pair(0, NumLo);
11825 Mask1[NumLo] = Idx;
11828 Locs[i] = std::make_pair(1, NumHi);
11830 Mask1[2+NumHi] = Idx;
11836 if (NumLo <= 2 && NumHi <= 2) {
11837 // If no more than two elements come from either vector. This can be
11838 // implemented with two shuffles. First shuffle gather the elements.
11839 // The second shuffle, which takes the first shuffle as both of its
11840 // vector operands, put the elements into the right order.
11841 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11843 int Mask2[] = { -1, -1, -1, -1 };
11845 for (unsigned i = 0; i != 4; ++i)
11846 if (Locs[i].first != -1) {
11847 unsigned Idx = (i < 2) ? 0 : 4;
11848 Idx += Locs[i].first * 2 + Locs[i].second;
11852 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
11855 if (NumLo == 3 || NumHi == 3) {
11856 // Otherwise, we must have three elements from one vector, call it X, and
11857 // one element from the other, call it Y. First, use a shufps to build an
11858 // intermediate vector with the one element from Y and the element from X
11859 // that will be in the same half in the final destination (the indexes don't
11860 // matter). Then, use a shufps to build the final vector, taking the half
11861 // containing the element from Y from the intermediate, and the other half
11864 // Normalize it so the 3 elements come from V1.
11865 CommuteVectorShuffleMask(PermMask, 4);
11869 // Find the element from V2.
11871 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
11872 int Val = PermMask[HiIndex];
11879 Mask1[0] = PermMask[HiIndex];
11881 Mask1[2] = PermMask[HiIndex^1];
11883 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11885 if (HiIndex >= 2) {
11886 Mask1[0] = PermMask[0];
11887 Mask1[1] = PermMask[1];
11888 Mask1[2] = HiIndex & 1 ? 6 : 4;
11889 Mask1[3] = HiIndex & 1 ? 4 : 6;
11890 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11893 Mask1[0] = HiIndex & 1 ? 2 : 0;
11894 Mask1[1] = HiIndex & 1 ? 0 : 2;
11895 Mask1[2] = PermMask[2];
11896 Mask1[3] = PermMask[3];
11901 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
11904 // Break it into (shuffle shuffle_hi, shuffle_lo).
11905 int LoMask[] = { -1, -1, -1, -1 };
11906 int HiMask[] = { -1, -1, -1, -1 };
11908 int *MaskPtr = LoMask;
11909 unsigned MaskIdx = 0;
11910 unsigned LoIdx = 0;
11911 unsigned HiIdx = 2;
11912 for (unsigned i = 0; i != 4; ++i) {
11919 int Idx = PermMask[i];
11921 Locs[i] = std::make_pair(-1, -1);
11922 } else if (Idx < 4) {
11923 Locs[i] = std::make_pair(MaskIdx, LoIdx);
11924 MaskPtr[LoIdx] = Idx;
11927 Locs[i] = std::make_pair(MaskIdx, HiIdx);
11928 MaskPtr[HiIdx] = Idx;
11933 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
11934 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
11935 int MaskOps[] = { -1, -1, -1, -1 };
11936 for (unsigned i = 0; i != 4; ++i)
11937 if (Locs[i].first != -1)
11938 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
11939 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
11942 static bool MayFoldVectorLoad(SDValue V) {
11943 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
11944 V = V.getOperand(0);
11946 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
11947 V = V.getOperand(0);
11948 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
11949 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
11950 // BUILD_VECTOR (load), undef
11951 V = V.getOperand(0);
11953 return MayFoldLoad(V);
11957 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
11958 MVT VT = Op.getSimpleValueType();
11960 // Canonizalize to v2f64.
11961 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
11962 return DAG.getNode(ISD::BITCAST, dl, VT,
11963 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
11968 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
11970 SDValue V1 = Op.getOperand(0);
11971 SDValue V2 = Op.getOperand(1);
11972 MVT VT = Op.getSimpleValueType();
11974 assert(VT != MVT::v2i64 && "unsupported shuffle type");
11976 if (HasSSE2 && VT == MVT::v2f64)
11977 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
11979 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
11980 return DAG.getNode(ISD::BITCAST, dl, VT,
11981 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
11982 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
11983 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
11987 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
11988 SDValue V1 = Op.getOperand(0);
11989 SDValue V2 = Op.getOperand(1);
11990 MVT VT = Op.getSimpleValueType();
11992 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
11993 "unsupported shuffle type");
11995 if (V2.getOpcode() == ISD::UNDEF)
11999 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12003 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12004 SDValue V1 = Op.getOperand(0);
12005 SDValue V2 = Op.getOperand(1);
12006 MVT VT = Op.getSimpleValueType();
12007 unsigned NumElems = VT.getVectorNumElements();
12009 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12010 // operand of these instructions is only memory, so check if there's a
12011 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12013 bool CanFoldLoad = false;
12015 // Trivial case, when V2 comes from a load.
12016 if (MayFoldVectorLoad(V2))
12017 CanFoldLoad = true;
12019 // When V1 is a load, it can be folded later into a store in isel, example:
12020 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12022 // (MOVLPSmr addr:$src1, VR128:$src2)
12023 // So, recognize this potential and also use MOVLPS or MOVLPD
12024 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12025 CanFoldLoad = true;
12027 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12029 if (HasSSE2 && NumElems == 2)
12030 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12033 // If we don't care about the second element, proceed to use movss.
12034 if (SVOp->getMaskElt(1) != -1)
12035 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12038 // movl and movlp will both match v2i64, but v2i64 is never matched by
12039 // movl earlier because we make it strict to avoid messing with the movlp load
12040 // folding logic (see the code above getMOVLP call). Match it here then,
12041 // this is horrible, but will stay like this until we move all shuffle
12042 // matching to x86 specific nodes. Note that for the 1st condition all
12043 // types are matched with movsd.
12045 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12046 // as to remove this logic from here, as much as possible
12047 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12048 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12049 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12052 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12054 // Invert the operand order and use SHUFPS to match it.
12055 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12056 getShuffleSHUFImmediate(SVOp), DAG);
12059 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12060 SelectionDAG &DAG) {
12062 MVT VT = Load->getSimpleValueType(0);
12063 MVT EVT = VT.getVectorElementType();
12064 SDValue Addr = Load->getOperand(1);
12065 SDValue NewAddr = DAG.getNode(
12066 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12067 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12070 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12071 DAG.getMachineFunction().getMachineMemOperand(
12072 Load->getMemOperand(), 0, EVT.getStoreSize()));
12076 // It is only safe to call this function if isINSERTPSMask is true for
12077 // this shufflevector mask.
12078 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12079 SelectionDAG &DAG) {
12080 // Generate an insertps instruction when inserting an f32 from memory onto a
12081 // v4f32 or when copying a member from one v4f32 to another.
12082 // We also use it for transferring i32 from one register to another,
12083 // since it simply copies the same bits.
12084 // If we're transferring an i32 from memory to a specific element in a
12085 // register, we output a generic DAG that will match the PINSRD
12087 MVT VT = SVOp->getSimpleValueType(0);
12088 MVT EVT = VT.getVectorElementType();
12089 SDValue V1 = SVOp->getOperand(0);
12090 SDValue V2 = SVOp->getOperand(1);
12091 auto Mask = SVOp->getMask();
12092 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12093 "unsupported vector type for insertps/pinsrd");
12095 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12096 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12097 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12101 unsigned DestIndex;
12105 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12108 // If we have 1 element from each vector, we have to check if we're
12109 // changing V1's element's place. If so, we're done. Otherwise, we
12110 // should assume we're changing V2's element's place and behave
12112 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12113 assert(DestIndex <= INT32_MAX && "truncated destination index");
12114 if (FromV1 == FromV2 &&
12115 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12119 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12122 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12123 "More than one element from V1 and from V2, or no elements from one "
12124 "of the vectors. This case should not have returned true from "
12129 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12132 // Get an index into the source vector in the range [0,4) (the mask is
12133 // in the range [0,8) because it can address V1 and V2)
12134 unsigned SrcIndex = Mask[DestIndex] % 4;
12135 if (MayFoldLoad(From)) {
12136 // Trivial case, when From comes from a load and is only used by the
12137 // shuffle. Make it use insertps from the vector that we need from that
12140 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12141 if (!NewLoad.getNode())
12144 if (EVT == MVT::f32) {
12145 // Create this as a scalar to vector to match the instruction pattern.
12146 SDValue LoadScalarToVector =
12147 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12148 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12149 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12151 } else { // EVT == MVT::i32
12152 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12153 // instruction, to match the PINSRD instruction, which loads an i32 to a
12154 // certain vector element.
12155 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12156 DAG.getConstant(DestIndex, MVT::i32));
12160 // Vector-element-to-vector
12161 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12162 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12165 // Reduce a vector shuffle to zext.
12166 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12167 SelectionDAG &DAG) {
12168 // PMOVZX is only available from SSE41.
12169 if (!Subtarget->hasSSE41())
12172 MVT VT = Op.getSimpleValueType();
12174 // Only AVX2 support 256-bit vector integer extending.
12175 if (!Subtarget->hasInt256() && VT.is256BitVector())
12178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12180 SDValue V1 = Op.getOperand(0);
12181 SDValue V2 = Op.getOperand(1);
12182 unsigned NumElems = VT.getVectorNumElements();
12184 // Extending is an unary operation and the element type of the source vector
12185 // won't be equal to or larger than i64.
12186 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12187 VT.getVectorElementType() == MVT::i64)
12190 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12191 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12192 while ((1U << Shift) < NumElems) {
12193 if (SVOp->getMaskElt(1U << Shift) == 1)
12196 // The maximal ratio is 8, i.e. from i8 to i64.
12201 // Check the shuffle mask.
12202 unsigned Mask = (1U << Shift) - 1;
12203 for (unsigned i = 0; i != NumElems; ++i) {
12204 int EltIdx = SVOp->getMaskElt(i);
12205 if ((i & Mask) != 0 && EltIdx != -1)
12207 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12211 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12212 MVT NeVT = MVT::getIntegerVT(NBits);
12213 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12215 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12218 return DAG.getNode(ISD::BITCAST, DL, VT,
12219 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12222 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12223 SelectionDAG &DAG) {
12224 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12225 MVT VT = Op.getSimpleValueType();
12227 SDValue V1 = Op.getOperand(0);
12228 SDValue V2 = Op.getOperand(1);
12230 if (isZeroShuffle(SVOp))
12231 return getZeroVector(VT, Subtarget, DAG, dl);
12233 // Handle splat operations
12234 if (SVOp->isSplat()) {
12235 // Use vbroadcast whenever the splat comes from a foldable load
12236 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12237 if (Broadcast.getNode())
12241 // Check integer expanding shuffles.
12242 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12243 if (NewOp.getNode())
12246 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12248 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12249 VT == MVT::v32i8) {
12250 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12251 if (NewOp.getNode())
12252 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12253 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12254 // FIXME: Figure out a cleaner way to do this.
12255 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12256 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12257 if (NewOp.getNode()) {
12258 MVT NewVT = NewOp.getSimpleValueType();
12259 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12260 NewVT, true, false))
12261 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12264 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12265 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12266 if (NewOp.getNode()) {
12267 MVT NewVT = NewOp.getSimpleValueType();
12268 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12269 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12278 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12279 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12280 SDValue V1 = Op.getOperand(0);
12281 SDValue V2 = Op.getOperand(1);
12282 MVT VT = Op.getSimpleValueType();
12284 unsigned NumElems = VT.getVectorNumElements();
12285 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12286 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12287 bool V1IsSplat = false;
12288 bool V2IsSplat = false;
12289 bool HasSSE2 = Subtarget->hasSSE2();
12290 bool HasFp256 = Subtarget->hasFp256();
12291 bool HasInt256 = Subtarget->hasInt256();
12292 MachineFunction &MF = DAG.getMachineFunction();
12293 bool OptForSize = MF.getFunction()->getAttributes().
12294 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
12296 // Check if we should use the experimental vector shuffle lowering. If so,
12297 // delegate completely to that code path.
12298 if (ExperimentalVectorShuffleLowering)
12299 return lowerVectorShuffle(Op, Subtarget, DAG);
12301 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12303 if (V1IsUndef && V2IsUndef)
12304 return DAG.getUNDEF(VT);
12306 // When we create a shuffle node we put the UNDEF node to second operand,
12307 // but in some cases the first operand may be transformed to UNDEF.
12308 // In this case we should just commute the node.
12310 return DAG.getCommutedVectorShuffle(*SVOp);
12312 // Vector shuffle lowering takes 3 steps:
12314 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12315 // narrowing and commutation of operands should be handled.
12316 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12318 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12319 // so the shuffle can be broken into other shuffles and the legalizer can
12320 // try the lowering again.
12322 // The general idea is that no vector_shuffle operation should be left to
12323 // be matched during isel, all of them must be converted to a target specific
12326 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12327 // narrowing and commutation of operands should be handled. The actual code
12328 // doesn't include all of those, work in progress...
12329 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12330 if (NewOp.getNode())
12333 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12335 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12336 // unpckh_undef). Only use pshufd if speed is more important than size.
12337 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12338 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12339 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12340 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12342 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12343 V2IsUndef && MayFoldVectorLoad(V1))
12344 return getMOVDDup(Op, dl, V1, DAG);
12346 if (isMOVHLPS_v_undef_Mask(M, VT))
12347 return getMOVHighToLow(Op, dl, DAG);
12349 // Use to match splats
12350 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12351 (VT == MVT::v2f64 || VT == MVT::v2i64))
12352 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12354 if (isPSHUFDMask(M, VT)) {
12355 // The actual implementation will match the mask in the if above and then
12356 // during isel it can match several different instructions, not only pshufd
12357 // as its name says, sad but true, emulate the behavior for now...
12358 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12359 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12361 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12363 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12364 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12366 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12367 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12370 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12374 if (isPALIGNRMask(M, VT, Subtarget))
12375 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12376 getShufflePALIGNRImmediate(SVOp),
12379 if (isVALIGNMask(M, VT, Subtarget))
12380 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12381 getShuffleVALIGNImmediate(SVOp),
12384 // Check if this can be converted into a logical shift.
12385 bool isLeft = false;
12386 unsigned ShAmt = 0;
12388 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12389 if (isShift && ShVal.hasOneUse()) {
12390 // If the shifted value has multiple uses, it may be cheaper to use
12391 // v_set0 + movlhps or movhlps, etc.
12392 MVT EltVT = VT.getVectorElementType();
12393 ShAmt *= EltVT.getSizeInBits();
12394 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12397 if (isMOVLMask(M, VT)) {
12398 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12399 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12400 if (!isMOVLPMask(M, VT)) {
12401 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12402 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12404 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12405 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12409 // FIXME: fold these into legal mask.
12410 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12411 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12413 if (isMOVHLPSMask(M, VT))
12414 return getMOVHighToLow(Op, dl, DAG);
12416 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12417 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12419 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12420 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12422 if (isMOVLPMask(M, VT))
12423 return getMOVLP(Op, dl, DAG, HasSSE2);
12425 if (ShouldXformToMOVHLPS(M, VT) ||
12426 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12427 return DAG.getCommutedVectorShuffle(*SVOp);
12430 // No better options. Use a vshldq / vsrldq.
12431 MVT EltVT = VT.getVectorElementType();
12432 ShAmt *= EltVT.getSizeInBits();
12433 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12436 bool Commuted = false;
12437 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12438 // 1,1,1,1 -> v8i16 though.
12439 BitVector UndefElements;
12440 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12441 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12443 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12444 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12447 // Canonicalize the splat or undef, if present, to be on the RHS.
12448 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12449 CommuteVectorShuffleMask(M, NumElems);
12451 std::swap(V1IsSplat, V2IsSplat);
12455 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12456 // Shuffling low element of v1 into undef, just return v1.
12459 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12460 // the instruction selector will not match, so get a canonical MOVL with
12461 // swapped operands to undo the commute.
12462 return getMOVL(DAG, dl, VT, V2, V1);
12465 if (isUNPCKLMask(M, VT, HasInt256))
12466 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12468 if (isUNPCKHMask(M, VT, HasInt256))
12469 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12472 // Normalize mask so all entries that point to V2 points to its first
12473 // element then try to match unpck{h|l} again. If match, return a
12474 // new vector_shuffle with the corrected mask.p
12475 SmallVector<int, 8> NewMask(M.begin(), M.end());
12476 NormalizeMask(NewMask, NumElems);
12477 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12478 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12479 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12480 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12484 // Commute is back and try unpck* again.
12485 // FIXME: this seems wrong.
12486 CommuteVectorShuffleMask(M, NumElems);
12488 std::swap(V1IsSplat, V2IsSplat);
12490 if (isUNPCKLMask(M, VT, HasInt256))
12491 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12493 if (isUNPCKHMask(M, VT, HasInt256))
12494 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12497 // Normalize the node to match x86 shuffle ops if needed
12498 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12499 return DAG.getCommutedVectorShuffle(*SVOp);
12501 // The checks below are all present in isShuffleMaskLegal, but they are
12502 // inlined here right now to enable us to directly emit target specific
12503 // nodes, and remove one by one until they don't return Op anymore.
12505 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12506 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12507 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12508 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12511 if (isPSHUFHWMask(M, VT, HasInt256))
12512 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12513 getShufflePSHUFHWImmediate(SVOp),
12516 if (isPSHUFLWMask(M, VT, HasInt256))
12517 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12518 getShufflePSHUFLWImmediate(SVOp),
12521 unsigned MaskValue;
12522 if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
12524 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12526 if (isSHUFPMask(M, VT))
12527 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12528 getShuffleSHUFImmediate(SVOp), DAG);
12530 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12531 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12532 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12533 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12535 //===--------------------------------------------------------------------===//
12536 // Generate target specific nodes for 128 or 256-bit shuffles only
12537 // supported in the AVX instruction set.
12540 // Handle VMOVDDUPY permutations
12541 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12542 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12544 // Handle VPERMILPS/D* permutations
12545 if (isVPERMILPMask(M, VT)) {
12546 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12547 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12548 getShuffleSHUFImmediate(SVOp), DAG);
12549 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12550 getShuffleSHUFImmediate(SVOp), DAG);
12554 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12555 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12556 Idx*(NumElems/2), DAG, dl);
12558 // Handle VPERM2F128/VPERM2I128 permutations
12559 if (isVPERM2X128Mask(M, VT, HasFp256))
12560 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12561 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12563 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12564 return getINSERTPS(SVOp, dl, DAG);
12567 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12568 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12570 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12571 VT.is512BitVector()) {
12572 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12573 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12574 SmallVector<SDValue, 16> permclMask;
12575 for (unsigned i = 0; i != NumElems; ++i) {
12576 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12579 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12581 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12582 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12583 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12584 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12585 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12588 //===--------------------------------------------------------------------===//
12589 // Since no target specific shuffle was selected for this generic one,
12590 // lower it into other known shuffles. FIXME: this isn't true yet, but
12591 // this is the plan.
12594 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12595 if (VT == MVT::v8i16) {
12596 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12597 if (NewOp.getNode())
12601 if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
12602 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12603 if (NewOp.getNode())
12607 if (VT == MVT::v16i8) {
12608 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12609 if (NewOp.getNode())
12613 if (VT == MVT::v32i8) {
12614 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12615 if (NewOp.getNode())
12619 // Handle all 128-bit wide vectors with 4 elements, and match them with
12620 // several different shuffle types.
12621 if (NumElems == 4 && VT.is128BitVector())
12622 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12624 // Handle general 256-bit shuffles
12625 if (VT.is256BitVector())
12626 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12631 // This function assumes its argument is a BUILD_VECTOR of constants or
12632 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12634 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12635 unsigned &MaskValue) {
12637 unsigned NumElems = BuildVector->getNumOperands();
12638 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12639 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12640 unsigned NumElemsInLane = NumElems / NumLanes;
12642 // Blend for v16i16 should be symetric for the both lanes.
12643 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12644 SDValue EltCond = BuildVector->getOperand(i);
12645 SDValue SndLaneEltCond =
12646 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12648 int Lane1Cond = -1, Lane2Cond = -1;
12649 if (isa<ConstantSDNode>(EltCond))
12650 Lane1Cond = !isZero(EltCond);
12651 if (isa<ConstantSDNode>(SndLaneEltCond))
12652 Lane2Cond = !isZero(SndLaneEltCond);
12654 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12655 // Lane1Cond != 0, means we want the first argument.
12656 // Lane1Cond == 0, means we want the second argument.
12657 // The encoding of this argument is 0 for the first argument, 1
12658 // for the second. Therefore, invert the condition.
12659 MaskValue |= !Lane1Cond << i;
12660 else if (Lane1Cond < 0)
12661 MaskValue |= !Lane2Cond << i;
12668 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
12670 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
12671 SelectionDAG &DAG) {
12672 SDValue Cond = Op.getOperand(0);
12673 SDValue LHS = Op.getOperand(1);
12674 SDValue RHS = Op.getOperand(2);
12676 MVT VT = Op.getSimpleValueType();
12677 MVT EltVT = VT.getVectorElementType();
12678 unsigned NumElems = VT.getVectorNumElements();
12680 // There is no blend with immediate in AVX-512.
12681 if (VT.is512BitVector())
12684 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
12686 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
12689 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12692 // Check the mask for BLEND and build the value.
12693 unsigned MaskValue = 0;
12694 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
12697 // Convert i32 vectors to floating point if it is not AVX2.
12698 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
12700 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
12701 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
12703 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
12704 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
12707 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
12708 DAG.getConstant(MaskValue, MVT::i32));
12709 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
12712 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12713 // A vselect where all conditions and data are constants can be optimized into
12714 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12715 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12716 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12717 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12720 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
12721 if (BlendOp.getNode())
12724 // Some types for vselect were previously set to Expand, not Legal or
12725 // Custom. Return an empty SDValue so we fall-through to Expand, after
12726 // the Custom lowering phase.
12727 MVT VT = Op.getSimpleValueType();
12728 switch (VT.SimpleTy) {
12733 if (Subtarget->hasBWI() && Subtarget->hasVLX())
12738 // We couldn't create a "Blend with immediate" node.
12739 // This node should still be legal, but we'll have to emit a blendv*
12744 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12745 MVT VT = Op.getSimpleValueType();
12748 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12751 if (VT.getSizeInBits() == 8) {
12752 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12753 Op.getOperand(0), Op.getOperand(1));
12754 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12755 DAG.getValueType(VT));
12756 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12759 if (VT.getSizeInBits() == 16) {
12760 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12761 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
12763 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12764 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12765 DAG.getNode(ISD::BITCAST, dl,
12768 Op.getOperand(1)));
12769 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
12770 Op.getOperand(0), Op.getOperand(1));
12771 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12772 DAG.getValueType(VT));
12773 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12776 if (VT == MVT::f32) {
12777 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
12778 // the result back to FR32 register. It's only worth matching if the
12779 // result has a single use which is a store or a bitcast to i32. And in
12780 // the case of a store, it's not worth it if the index is a constant 0,
12781 // because a MOVSSmr can be used instead, which is smaller and faster.
12782 if (!Op.hasOneUse())
12784 SDNode *User = *Op.getNode()->use_begin();
12785 if ((User->getOpcode() != ISD::STORE ||
12786 (isa<ConstantSDNode>(Op.getOperand(1)) &&
12787 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
12788 (User->getOpcode() != ISD::BITCAST ||
12789 User->getValueType(0) != MVT::i32))
12791 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12792 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
12795 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
12798 if (VT == MVT::i32 || VT == MVT::i64) {
12799 // ExtractPS/pextrq works with constant index.
12800 if (isa<ConstantSDNode>(Op.getOperand(1)))
12806 /// Extract one bit from mask vector, like v16i1 or v8i1.
12807 /// AVX-512 feature.
12809 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
12810 SDValue Vec = Op.getOperand(0);
12812 MVT VecVT = Vec.getSimpleValueType();
12813 SDValue Idx = Op.getOperand(1);
12814 MVT EltVT = Op.getSimpleValueType();
12816 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
12818 // variable index can't be handled in mask registers,
12819 // extend vector to VR512
12820 if (!isa<ConstantSDNode>(Idx)) {
12821 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12822 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
12823 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
12824 ExtVT.getVectorElementType(), Ext, Idx);
12825 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
12828 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12829 const TargetRegisterClass* rc = getRegClassFor(VecVT);
12830 unsigned MaxSift = rc->getSize()*8 - 1;
12831 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
12832 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
12833 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
12834 DAG.getConstant(MaxSift, MVT::i8));
12835 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
12836 DAG.getIntPtrConstant(0));
12840 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
12841 SelectionDAG &DAG) const {
12843 SDValue Vec = Op.getOperand(0);
12844 MVT VecVT = Vec.getSimpleValueType();
12845 SDValue Idx = Op.getOperand(1);
12847 if (Op.getSimpleValueType() == MVT::i1)
12848 return ExtractBitFromMaskVector(Op, DAG);
12850 if (!isa<ConstantSDNode>(Idx)) {
12851 if (VecVT.is512BitVector() ||
12852 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
12853 VecVT.getVectorElementType().getSizeInBits() == 32)) {
12856 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
12857 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
12858 MaskEltVT.getSizeInBits());
12860 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
12861 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
12862 getZeroVector(MaskVT, Subtarget, DAG, dl),
12863 Idx, DAG.getConstant(0, getPointerTy()));
12864 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
12865 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
12866 Perm, DAG.getConstant(0, getPointerTy()));
12871 // If this is a 256-bit vector result, first extract the 128-bit vector and
12872 // then extract the element from the 128-bit vector.
12873 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
12875 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12876 // Get the 128-bit vector.
12877 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
12878 MVT EltVT = VecVT.getVectorElementType();
12880 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
12882 //if (IdxVal >= NumElems/2)
12883 // IdxVal -= NumElems/2;
12884 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
12885 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
12886 DAG.getConstant(IdxVal, MVT::i32));
12889 assert(VecVT.is128BitVector() && "Unexpected vector length");
12891 if (Subtarget->hasSSE41()) {
12892 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
12897 MVT VT = Op.getSimpleValueType();
12898 // TODO: handle v16i8.
12899 if (VT.getSizeInBits() == 16) {
12900 SDValue Vec = Op.getOperand(0);
12901 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12903 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12904 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12905 DAG.getNode(ISD::BITCAST, dl,
12907 Op.getOperand(1)));
12908 // Transform it so it match pextrw which produces a 32-bit result.
12909 MVT EltVT = MVT::i32;
12910 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
12911 Op.getOperand(0), Op.getOperand(1));
12912 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
12913 DAG.getValueType(VT));
12914 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12917 if (VT.getSizeInBits() == 32) {
12918 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12922 // SHUFPS the element to the lowest double word, then movss.
12923 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
12924 MVT VVT = Op.getOperand(0).getSimpleValueType();
12925 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
12926 DAG.getUNDEF(VVT), Mask);
12927 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12928 DAG.getIntPtrConstant(0));
12931 if (VT.getSizeInBits() == 64) {
12932 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
12933 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
12934 // to match extract_elt for f64.
12935 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12939 // UNPCKHPD the element to the lowest double word, then movsd.
12940 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
12941 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
12942 int Mask[2] = { 1, -1 };
12943 MVT VVT = Op.getOperand(0).getSimpleValueType();
12944 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
12945 DAG.getUNDEF(VVT), Mask);
12946 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12947 DAG.getIntPtrConstant(0));
12953 /// Insert one bit to mask vector, like v16i1 or v8i1.
12954 /// AVX-512 feature.
12956 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
12958 SDValue Vec = Op.getOperand(0);
12959 SDValue Elt = Op.getOperand(1);
12960 SDValue Idx = Op.getOperand(2);
12961 MVT VecVT = Vec.getSimpleValueType();
12963 if (!isa<ConstantSDNode>(Idx)) {
12964 // Non constant index. Extend source and destination,
12965 // insert element and then truncate the result.
12966 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12967 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
12968 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
12969 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
12970 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
12971 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
12974 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12975 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
12976 if (Vec.getOpcode() == ISD::UNDEF)
12977 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
12978 DAG.getConstant(IdxVal, MVT::i8));
12979 const TargetRegisterClass* rc = getRegClassFor(VecVT);
12980 unsigned MaxSift = rc->getSize()*8 - 1;
12981 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
12982 DAG.getConstant(MaxSift, MVT::i8));
12983 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
12984 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
12985 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
12988 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
12989 SelectionDAG &DAG) const {
12990 MVT VT = Op.getSimpleValueType();
12991 MVT EltVT = VT.getVectorElementType();
12993 if (EltVT == MVT::i1)
12994 return InsertBitToMaskVector(Op, DAG);
12997 SDValue N0 = Op.getOperand(0);
12998 SDValue N1 = Op.getOperand(1);
12999 SDValue N2 = Op.getOperand(2);
13000 if (!isa<ConstantSDNode>(N2))
13002 auto *N2C = cast<ConstantSDNode>(N2);
13003 unsigned IdxVal = N2C->getZExtValue();
13005 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13006 // into that, and then insert the subvector back into the result.
13007 if (VT.is256BitVector() || VT.is512BitVector()) {
13008 // Get the desired 128-bit vector half.
13009 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13011 // Insert the element into the desired half.
13012 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13013 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13015 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13016 DAG.getConstant(IdxIn128, MVT::i32));
13018 // Insert the changed part back to the 256-bit vector
13019 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13021 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13023 if (Subtarget->hasSSE41()) {
13024 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13026 if (VT == MVT::v8i16) {
13027 Opc = X86ISD::PINSRW;
13029 assert(VT == MVT::v16i8);
13030 Opc = X86ISD::PINSRB;
13033 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13035 if (N1.getValueType() != MVT::i32)
13036 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13037 if (N2.getValueType() != MVT::i32)
13038 N2 = DAG.getIntPtrConstant(IdxVal);
13039 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13042 if (EltVT == MVT::f32) {
13043 // Bits [7:6] of the constant are the source select. This will always be
13044 // zero here. The DAG Combiner may combine an extract_elt index into
13046 // bits. For example (insert (extract, 3), 2) could be matched by
13048 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13049 // Bits [5:4] of the constant are the destination select. This is the
13050 // value of the incoming immediate.
13051 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13052 // combine either bitwise AND or insert of float 0.0 to set these bits.
13053 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13054 // Create this as a scalar to vector..
13055 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13056 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13059 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13060 // PINSR* works with constant index.
13065 if (EltVT == MVT::i8)
13068 if (EltVT.getSizeInBits() == 16) {
13069 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13070 // as its second argument.
13071 if (N1.getValueType() != MVT::i32)
13072 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13073 if (N2.getValueType() != MVT::i32)
13074 N2 = DAG.getIntPtrConstant(IdxVal);
13075 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13080 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13082 MVT OpVT = Op.getSimpleValueType();
13084 // If this is a 256-bit vector result, first insert into a 128-bit
13085 // vector and then insert into the 256-bit vector.
13086 if (!OpVT.is128BitVector()) {
13087 // Insert into a 128-bit vector.
13088 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13089 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13090 OpVT.getVectorNumElements() / SizeFactor);
13092 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13094 // Insert the 128-bit vector.
13095 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13098 if (OpVT == MVT::v1i64 &&
13099 Op.getOperand(0).getValueType() == MVT::i64)
13100 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13102 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13103 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13104 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13105 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13108 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13109 // a simple subregister reference or explicit instructions to grab
13110 // upper bits of a vector.
13111 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13112 SelectionDAG &DAG) {
13114 SDValue In = Op.getOperand(0);
13115 SDValue Idx = Op.getOperand(1);
13116 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13117 MVT ResVT = Op.getSimpleValueType();
13118 MVT InVT = In.getSimpleValueType();
13120 if (Subtarget->hasFp256()) {
13121 if (ResVT.is128BitVector() &&
13122 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13123 isa<ConstantSDNode>(Idx)) {
13124 return Extract128BitVector(In, IdxVal, DAG, dl);
13126 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13127 isa<ConstantSDNode>(Idx)) {
13128 return Extract256BitVector(In, IdxVal, DAG, dl);
13134 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13135 // simple superregister reference or explicit instructions to insert
13136 // the upper bits of a vector.
13137 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13138 SelectionDAG &DAG) {
13139 if (Subtarget->hasFp256()) {
13140 SDLoc dl(Op.getNode());
13141 SDValue Vec = Op.getNode()->getOperand(0);
13142 SDValue SubVec = Op.getNode()->getOperand(1);
13143 SDValue Idx = Op.getNode()->getOperand(2);
13145 if ((Op.getNode()->getSimpleValueType(0).is256BitVector() ||
13146 Op.getNode()->getSimpleValueType(0).is512BitVector()) &&
13147 SubVec.getNode()->getSimpleValueType(0).is128BitVector() &&
13148 isa<ConstantSDNode>(Idx)) {
13149 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13150 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13153 if (Op.getNode()->getSimpleValueType(0).is512BitVector() &&
13154 SubVec.getNode()->getSimpleValueType(0).is256BitVector() &&
13155 isa<ConstantSDNode>(Idx)) {
13156 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13157 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13163 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13164 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13165 // one of the above mentioned nodes. It has to be wrapped because otherwise
13166 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13167 // be used to form addressing mode. These wrapped nodes will be selected
13170 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13171 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13173 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13174 // global base reg.
13175 unsigned char OpFlag = 0;
13176 unsigned WrapperKind = X86ISD::Wrapper;
13177 CodeModel::Model M = DAG.getTarget().getCodeModel();
13179 if (Subtarget->isPICStyleRIPRel() &&
13180 (M == CodeModel::Small || M == CodeModel::Kernel))
13181 WrapperKind = X86ISD::WrapperRIP;
13182 else if (Subtarget->isPICStyleGOT())
13183 OpFlag = X86II::MO_GOTOFF;
13184 else if (Subtarget->isPICStyleStubPIC())
13185 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13187 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13188 CP->getAlignment(),
13189 CP->getOffset(), OpFlag);
13191 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13192 // With PIC, the address is actually $g + Offset.
13194 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13195 DAG.getNode(X86ISD::GlobalBaseReg,
13196 SDLoc(), getPointerTy()),
13203 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13204 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13206 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13207 // global base reg.
13208 unsigned char OpFlag = 0;
13209 unsigned WrapperKind = X86ISD::Wrapper;
13210 CodeModel::Model M = DAG.getTarget().getCodeModel();
13212 if (Subtarget->isPICStyleRIPRel() &&
13213 (M == CodeModel::Small || M == CodeModel::Kernel))
13214 WrapperKind = X86ISD::WrapperRIP;
13215 else if (Subtarget->isPICStyleGOT())
13216 OpFlag = X86II::MO_GOTOFF;
13217 else if (Subtarget->isPICStyleStubPIC())
13218 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13220 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13223 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13225 // With PIC, the address is actually $g + Offset.
13227 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13228 DAG.getNode(X86ISD::GlobalBaseReg,
13229 SDLoc(), getPointerTy()),
13236 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13237 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13239 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13240 // global base reg.
13241 unsigned char OpFlag = 0;
13242 unsigned WrapperKind = X86ISD::Wrapper;
13243 CodeModel::Model M = DAG.getTarget().getCodeModel();
13245 if (Subtarget->isPICStyleRIPRel() &&
13246 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13247 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13248 OpFlag = X86II::MO_GOTPCREL;
13249 WrapperKind = X86ISD::WrapperRIP;
13250 } else if (Subtarget->isPICStyleGOT()) {
13251 OpFlag = X86II::MO_GOT;
13252 } else if (Subtarget->isPICStyleStubPIC()) {
13253 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13254 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13255 OpFlag = X86II::MO_DARWIN_NONLAZY;
13258 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13261 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13263 // With PIC, the address is actually $g + Offset.
13264 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13265 !Subtarget->is64Bit()) {
13266 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13267 DAG.getNode(X86ISD::GlobalBaseReg,
13268 SDLoc(), getPointerTy()),
13272 // For symbols that require a load from a stub to get the address, emit the
13274 if (isGlobalStubReference(OpFlag))
13275 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13276 MachinePointerInfo::getGOT(), false, false, false, 0);
13282 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13283 // Create the TargetBlockAddressAddress node.
13284 unsigned char OpFlags =
13285 Subtarget->ClassifyBlockAddressReference();
13286 CodeModel::Model M = DAG.getTarget().getCodeModel();
13287 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13288 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13290 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13293 if (Subtarget->isPICStyleRIPRel() &&
13294 (M == CodeModel::Small || M == CodeModel::Kernel))
13295 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13297 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13299 // With PIC, the address is actually $g + Offset.
13300 if (isGlobalRelativeToPICBase(OpFlags)) {
13301 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13302 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13310 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13311 int64_t Offset, SelectionDAG &DAG) const {
13312 // Create the TargetGlobalAddress node, folding in the constant
13313 // offset if it is legal.
13314 unsigned char OpFlags =
13315 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13316 CodeModel::Model M = DAG.getTarget().getCodeModel();
13318 if (OpFlags == X86II::MO_NO_FLAG &&
13319 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13320 // A direct static reference to a global.
13321 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13324 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13327 if (Subtarget->isPICStyleRIPRel() &&
13328 (M == CodeModel::Small || M == CodeModel::Kernel))
13329 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13331 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13333 // With PIC, the address is actually $g + Offset.
13334 if (isGlobalRelativeToPICBase(OpFlags)) {
13335 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13336 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13340 // For globals that require a load from a stub to get the address, emit the
13342 if (isGlobalStubReference(OpFlags))
13343 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13344 MachinePointerInfo::getGOT(), false, false, false, 0);
13346 // If there was a non-zero offset that we didn't fold, create an explicit
13347 // addition for it.
13349 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13350 DAG.getConstant(Offset, getPointerTy()));
13356 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13357 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13358 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13359 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13363 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13364 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13365 unsigned char OperandFlags, bool LocalDynamic = false) {
13366 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13367 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13369 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13370 GA->getValueType(0),
13374 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13378 SDValue Ops[] = { Chain, TGA, *InFlag };
13379 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13381 SDValue Ops[] = { Chain, TGA };
13382 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13385 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13386 MFI->setAdjustsStack(true);
13387 MFI->setHasCalls(true);
13389 SDValue Flag = Chain.getValue(1);
13390 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13393 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13395 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13398 SDLoc dl(GA); // ? function entry point might be better
13399 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13400 DAG.getNode(X86ISD::GlobalBaseReg,
13401 SDLoc(), PtrVT), InFlag);
13402 InFlag = Chain.getValue(1);
13404 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13407 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13409 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13411 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13412 X86::RAX, X86II::MO_TLSGD);
13415 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13421 // Get the start address of the TLS block for this module.
13422 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13423 .getInfo<X86MachineFunctionInfo>();
13424 MFI->incNumLocalDynamicTLSAccesses();
13428 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13429 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13432 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13433 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13434 InFlag = Chain.getValue(1);
13435 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13436 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13439 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13443 unsigned char OperandFlags = X86II::MO_DTPOFF;
13444 unsigned WrapperKind = X86ISD::Wrapper;
13445 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13446 GA->getValueType(0),
13447 GA->getOffset(), OperandFlags);
13448 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13450 // Add x@dtpoff with the base.
13451 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13454 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13455 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13456 const EVT PtrVT, TLSModel::Model model,
13457 bool is64Bit, bool isPIC) {
13460 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13461 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13462 is64Bit ? 257 : 256));
13464 SDValue ThreadPointer =
13465 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13466 MachinePointerInfo(Ptr), false, false, false, 0);
13468 unsigned char OperandFlags = 0;
13469 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13471 unsigned WrapperKind = X86ISD::Wrapper;
13472 if (model == TLSModel::LocalExec) {
13473 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13474 } else if (model == TLSModel::InitialExec) {
13476 OperandFlags = X86II::MO_GOTTPOFF;
13477 WrapperKind = X86ISD::WrapperRIP;
13479 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13482 llvm_unreachable("Unexpected model");
13485 // emit "addl x@ntpoff,%eax" (local exec)
13486 // or "addl x@indntpoff,%eax" (initial exec)
13487 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13489 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13490 GA->getOffset(), OperandFlags);
13491 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13493 if (model == TLSModel::InitialExec) {
13494 if (isPIC && !is64Bit) {
13495 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13496 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13500 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13501 MachinePointerInfo::getGOT(), false, false, false, 0);
13504 // The address of the thread local variable is the add of the thread
13505 // pointer with the offset of the variable.
13506 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13510 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13512 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13513 const GlobalValue *GV = GA->getGlobal();
13515 if (Subtarget->isTargetELF()) {
13516 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13519 case TLSModel::GeneralDynamic:
13520 if (Subtarget->is64Bit())
13521 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13522 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13523 case TLSModel::LocalDynamic:
13524 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13525 Subtarget->is64Bit());
13526 case TLSModel::InitialExec:
13527 case TLSModel::LocalExec:
13528 return LowerToTLSExecModel(
13529 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13530 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13532 llvm_unreachable("Unknown TLS model.");
13535 if (Subtarget->isTargetDarwin()) {
13536 // Darwin only has one model of TLS. Lower to that.
13537 unsigned char OpFlag = 0;
13538 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13539 X86ISD::WrapperRIP : X86ISD::Wrapper;
13541 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13542 // global base reg.
13543 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13544 !Subtarget->is64Bit();
13546 OpFlag = X86II::MO_TLVP_PIC_BASE;
13548 OpFlag = X86II::MO_TLVP;
13550 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13551 GA->getValueType(0),
13552 GA->getOffset(), OpFlag);
13553 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13555 // With PIC32, the address is actually $g + Offset.
13557 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13558 DAG.getNode(X86ISD::GlobalBaseReg,
13559 SDLoc(), getPointerTy()),
13562 // Lowering the machine isd will make sure everything is in the right
13564 SDValue Chain = DAG.getEntryNode();
13565 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13566 SDValue Args[] = { Chain, Offset };
13567 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13569 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13570 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13571 MFI->setAdjustsStack(true);
13573 // And our return value (tls address) is in the standard call return value
13575 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13576 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13577 Chain.getValue(1));
13580 if (Subtarget->isTargetKnownWindowsMSVC() ||
13581 Subtarget->isTargetWindowsGNU()) {
13582 // Just use the implicit TLS architecture
13583 // Need to generate someting similar to:
13584 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13586 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13587 // mov rcx, qword [rdx+rcx*8]
13588 // mov eax, .tls$:tlsvar
13589 // [rax+rcx] contains the address
13590 // Windows 64bit: gs:0x58
13591 // Windows 32bit: fs:__tls_array
13594 SDValue Chain = DAG.getEntryNode();
13596 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13597 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13598 // use its literal value of 0x2C.
13599 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13600 ? Type::getInt8PtrTy(*DAG.getContext(),
13602 : Type::getInt32PtrTy(*DAG.getContext(),
13606 Subtarget->is64Bit()
13607 ? DAG.getIntPtrConstant(0x58)
13608 : (Subtarget->isTargetWindowsGNU()
13609 ? DAG.getIntPtrConstant(0x2C)
13610 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13612 SDValue ThreadPointer =
13613 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13614 MachinePointerInfo(Ptr), false, false, false, 0);
13616 // Load the _tls_index variable
13617 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13618 if (Subtarget->is64Bit())
13619 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13620 IDX, MachinePointerInfo(), MVT::i32,
13621 false, false, false, 0);
13623 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13624 false, false, false, 0);
13626 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13628 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13630 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13631 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13632 false, false, false, 0);
13634 // Get the offset of start of .tls section
13635 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13636 GA->getValueType(0),
13637 GA->getOffset(), X86II::MO_SECREL);
13638 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13640 // The address of the thread local variable is the add of the thread
13641 // pointer with the offset of the variable.
13642 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
13645 llvm_unreachable("TLS not implemented for this target.");
13648 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
13649 /// and take a 2 x i32 value to shift plus a shift amount.
13650 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13651 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13652 MVT VT = Op.getSimpleValueType();
13653 unsigned VTBits = VT.getSizeInBits();
13655 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13656 SDValue ShOpLo = Op.getOperand(0);
13657 SDValue ShOpHi = Op.getOperand(1);
13658 SDValue ShAmt = Op.getOperand(2);
13659 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13660 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13662 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13663 DAG.getConstant(VTBits - 1, MVT::i8));
13664 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13665 DAG.getConstant(VTBits - 1, MVT::i8))
13666 : DAG.getConstant(0, VT);
13668 SDValue Tmp2, Tmp3;
13669 if (Op.getOpcode() == ISD::SHL_PARTS) {
13670 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13671 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13673 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13674 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13677 // If the shift amount is larger or equal than the width of a part we can't
13678 // rely on the results of shld/shrd. Insert a test and select the appropriate
13679 // values for large shift amounts.
13680 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13681 DAG.getConstant(VTBits, MVT::i8));
13682 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13683 AndNode, DAG.getConstant(0, MVT::i8));
13686 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13687 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13688 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13690 if (Op.getOpcode() == ISD::SHL_PARTS) {
13691 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13692 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13694 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13695 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13698 SDValue Ops[2] = { Lo, Hi };
13699 return DAG.getMergeValues(Ops, dl);
13702 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13703 SelectionDAG &DAG) const {
13704 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
13707 if (SrcVT.isVector()) {
13708 if (SrcVT.getVectorElementType() == MVT::i1) {
13709 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13710 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13711 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
13712 Op.getOperand(0)));
13717 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13718 "Unknown SINT_TO_FP to lower!");
13720 // These are really Legal; return the operand so the caller accepts it as
13722 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13724 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13725 Subtarget->is64Bit()) {
13729 unsigned Size = SrcVT.getSizeInBits()/8;
13730 MachineFunction &MF = DAG.getMachineFunction();
13731 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13732 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13733 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13735 MachinePointerInfo::getFixedStack(SSFI),
13737 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
13740 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
13742 SelectionDAG &DAG) const {
13746 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
13748 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
13750 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
13752 unsigned ByteSize = SrcVT.getSizeInBits()/8;
13754 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
13755 MachineMemOperand *MMO;
13757 int SSFI = FI->getIndex();
13759 DAG.getMachineFunction()
13760 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13761 MachineMemOperand::MOLoad, ByteSize, ByteSize);
13763 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
13764 StackSlot = StackSlot.getOperand(1);
13766 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
13767 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
13769 Tys, Ops, SrcVT, MMO);
13772 Chain = Result.getValue(1);
13773 SDValue InFlag = Result.getValue(2);
13775 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
13776 // shouldn't be necessary except that RFP cannot be live across
13777 // multiple blocks. When stackifier is fixed, they can be uncoupled.
13778 MachineFunction &MF = DAG.getMachineFunction();
13779 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
13780 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
13781 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13782 Tys = DAG.getVTList(MVT::Other);
13784 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
13786 MachineMemOperand *MMO =
13787 DAG.getMachineFunction()
13788 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13789 MachineMemOperand::MOStore, SSFISize, SSFISize);
13791 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
13792 Ops, Op.getValueType(), MMO);
13793 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
13794 MachinePointerInfo::getFixedStack(SSFI),
13795 false, false, false, 0);
13801 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
13802 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
13803 SelectionDAG &DAG) const {
13804 // This algorithm is not obvious. Here it is what we're trying to output:
13807 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
13808 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
13810 haddpd %xmm0, %xmm0
13812 pshufd $0x4e, %xmm0, %xmm1
13818 LLVMContext *Context = DAG.getContext();
13820 // Build some magic constants.
13821 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
13822 Constant *C0 = ConstantDataVector::get(*Context, CV0);
13823 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
13825 SmallVector<Constant*,2> CV1;
13827 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13828 APInt(64, 0x4330000000000000ULL))));
13830 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13831 APInt(64, 0x4530000000000000ULL))));
13832 Constant *C1 = ConstantVector::get(CV1);
13833 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
13835 // Load the 64-bit value into an XMM register.
13836 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
13838 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
13839 MachinePointerInfo::getConstantPool(),
13840 false, false, false, 16);
13841 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
13842 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
13845 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
13846 MachinePointerInfo::getConstantPool(),
13847 false, false, false, 16);
13848 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
13849 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
13852 if (Subtarget->hasSSE3()) {
13853 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
13854 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
13856 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
13857 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
13859 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
13860 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
13864 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
13865 DAG.getIntPtrConstant(0));
13868 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
13869 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
13870 SelectionDAG &DAG) const {
13872 // FP constant to bias correct the final result.
13873 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
13876 // Load the 32-bit value into an XMM register.
13877 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
13880 // Zero out the upper parts of the register.
13881 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
13883 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13884 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
13885 DAG.getIntPtrConstant(0));
13887 // Or the load with the bias.
13888 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
13889 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13890 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13891 MVT::v2f64, Load)),
13892 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13893 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13894 MVT::v2f64, Bias)));
13895 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13896 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
13897 DAG.getIntPtrConstant(0));
13899 // Subtract the bias.
13900 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
13902 // Handle final rounding.
13903 EVT DestVT = Op.getValueType();
13905 if (DestVT.bitsLT(MVT::f64))
13906 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
13907 DAG.getIntPtrConstant(0));
13908 if (DestVT.bitsGT(MVT::f64))
13909 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
13911 // Handle final rounding.
13915 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
13916 const X86Subtarget &Subtarget) {
13917 // The algorithm is the following:
13918 // #ifdef __SSE4_1__
13919 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13920 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13921 // (uint4) 0x53000000, 0xaa);
13923 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13924 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
13926 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
13927 // return (float4) lo + fhi;
13930 SDValue V = Op->getOperand(0);
13931 EVT VecIntVT = V.getValueType();
13932 bool Is128 = VecIntVT == MVT::v4i32;
13933 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
13934 // If we convert to something else than the supported type, e.g., to v4f64,
13936 if (VecFloatVT != Op->getValueType(0))
13939 unsigned NumElts = VecIntVT.getVectorNumElements();
13940 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
13941 "Unsupported custom type");
13942 assert(NumElts <= 8 && "The size of the constant array must be fixed");
13944 // In the #idef/#else code, we have in common:
13945 // - The vector of constants:
13951 // Create the splat vector for 0x4b000000.
13952 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
13953 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
13954 CstLow, CstLow, CstLow, CstLow};
13955 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
13956 makeArrayRef(&CstLowArray[0], NumElts));
13957 // Create the splat vector for 0x53000000.
13958 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
13959 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
13960 CstHigh, CstHigh, CstHigh, CstHigh};
13961 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
13962 makeArrayRef(&CstHighArray[0], NumElts));
13964 // Create the right shift.
13965 SDValue CstShift = DAG.getConstant(16, MVT::i32);
13966 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
13967 CstShift, CstShift, CstShift, CstShift};
13968 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
13969 makeArrayRef(&CstShiftArray[0], NumElts));
13970 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
13973 if (Subtarget.hasSSE41()) {
13974 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
13975 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13976 SDValue VecCstLowBitcast =
13977 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
13978 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
13979 // Low will be bitcasted right away, so do not bother bitcasting back to its
13981 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
13982 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
13983 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13984 // (uint4) 0x53000000, 0xaa);
13985 SDValue VecCstHighBitcast =
13986 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
13987 SDValue VecShiftBitcast =
13988 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
13989 // High will be bitcasted right away, so do not bother bitcasting back to
13990 // its original type.
13991 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
13992 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
13994 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
13995 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
13996 CstMask, CstMask, CstMask);
13997 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13998 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
13999 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14001 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14002 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14005 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14006 SDValue CstFAdd = DAG.getConstantFP(
14007 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14008 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14009 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14010 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14011 makeArrayRef(&CstFAddArray[0], NumElts));
14013 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14014 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14016 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14017 // return (float4) lo + fhi;
14018 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14019 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14022 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14023 SelectionDAG &DAG) const {
14024 SDValue N0 = Op.getOperand(0);
14025 MVT SVT = N0.getSimpleValueType();
14028 switch (SVT.SimpleTy) {
14030 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14035 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14036 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14037 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14041 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14043 llvm_unreachable(nullptr);
14046 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14047 SelectionDAG &DAG) const {
14048 SDValue N0 = Op.getOperand(0);
14051 if (Op.getValueType().isVector())
14052 return lowerUINT_TO_FP_vec(Op, DAG);
14054 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14055 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14056 // the optimization here.
14057 if (DAG.SignBitIsZero(N0))
14058 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14060 MVT SrcVT = N0.getSimpleValueType();
14061 MVT DstVT = Op.getSimpleValueType();
14062 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14063 return LowerUINT_TO_FP_i64(Op, DAG);
14064 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14065 return LowerUINT_TO_FP_i32(Op, DAG);
14066 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14069 // Make a 64-bit buffer, and use it to build an FILD.
14070 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14071 if (SrcVT == MVT::i32) {
14072 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14073 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14074 getPointerTy(), StackSlot, WordOff);
14075 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14076 StackSlot, MachinePointerInfo(),
14078 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14079 OffsetSlot, MachinePointerInfo(),
14081 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14085 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14086 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14087 StackSlot, MachinePointerInfo(),
14089 // For i64 source, we need to add the appropriate power of 2 if the input
14090 // was negative. This is the same as the optimization in
14091 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14092 // we must be careful to do the computation in x87 extended precision, not
14093 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14094 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14095 MachineMemOperand *MMO =
14096 DAG.getMachineFunction()
14097 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14098 MachineMemOperand::MOLoad, 8, 8);
14100 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14101 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14102 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14105 APInt FF(32, 0x5F800000ULL);
14107 // Check whether the sign bit is set.
14108 SDValue SignSet = DAG.getSetCC(dl,
14109 getSetCCResultType(*DAG.getContext(), MVT::i64),
14110 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14113 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14114 SDValue FudgePtr = DAG.getConstantPool(
14115 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14118 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14119 SDValue Zero = DAG.getIntPtrConstant(0);
14120 SDValue Four = DAG.getIntPtrConstant(4);
14121 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14123 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14125 // Load the value out, extending it from f32 to f80.
14126 // FIXME: Avoid the extend by constructing the right constant pool?
14127 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14128 FudgePtr, MachinePointerInfo::getConstantPool(),
14129 MVT::f32, false, false, false, 4);
14130 // Extend everything to 80 bits to force it to be done on x87.
14131 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14132 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14135 std::pair<SDValue,SDValue>
14136 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14137 bool IsSigned, bool IsReplace) const {
14140 EVT DstTy = Op.getValueType();
14142 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14143 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14147 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14148 DstTy.getSimpleVT() >= MVT::i16 &&
14149 "Unknown FP_TO_INT to lower!");
14151 // These are really Legal.
14152 if (DstTy == MVT::i32 &&
14153 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14154 return std::make_pair(SDValue(), SDValue());
14155 if (Subtarget->is64Bit() &&
14156 DstTy == MVT::i64 &&
14157 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14158 return std::make_pair(SDValue(), SDValue());
14160 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14161 // stack slot, or into the FTOL runtime function.
14162 MachineFunction &MF = DAG.getMachineFunction();
14163 unsigned MemSize = DstTy.getSizeInBits()/8;
14164 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14165 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14168 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14169 Opc = X86ISD::WIN_FTOL;
14171 switch (DstTy.getSimpleVT().SimpleTy) {
14172 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14173 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14174 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14175 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14178 SDValue Chain = DAG.getEntryNode();
14179 SDValue Value = Op.getOperand(0);
14180 EVT TheVT = Op.getOperand(0).getValueType();
14181 // FIXME This causes a redundant load/store if the SSE-class value is already
14182 // in memory, such as if it is on the callstack.
14183 if (isScalarFPTypeInSSEReg(TheVT)) {
14184 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14185 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14186 MachinePointerInfo::getFixedStack(SSFI),
14188 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14190 Chain, StackSlot, DAG.getValueType(TheVT)
14193 MachineMemOperand *MMO =
14194 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14195 MachineMemOperand::MOLoad, MemSize, MemSize);
14196 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14197 Chain = Value.getValue(1);
14198 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14199 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14202 MachineMemOperand *MMO =
14203 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14204 MachineMemOperand::MOStore, MemSize, MemSize);
14206 if (Opc != X86ISD::WIN_FTOL) {
14207 // Build the FP_TO_INT*_IN_MEM
14208 SDValue Ops[] = { Chain, Value, StackSlot };
14209 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14211 return std::make_pair(FIST, StackSlot);
14213 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14214 DAG.getVTList(MVT::Other, MVT::Glue),
14216 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14217 MVT::i32, ftol.getValue(1));
14218 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14219 MVT::i32, eax.getValue(2));
14220 SDValue Ops[] = { eax, edx };
14221 SDValue pair = IsReplace
14222 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14223 : DAG.getMergeValues(Ops, DL);
14224 return std::make_pair(pair, SDValue());
14228 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14229 const X86Subtarget *Subtarget) {
14230 MVT VT = Op->getSimpleValueType(0);
14231 SDValue In = Op->getOperand(0);
14232 MVT InVT = In.getSimpleValueType();
14235 // Optimize vectors in AVX mode:
14238 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14239 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14240 // Concat upper and lower parts.
14243 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14244 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14245 // Concat upper and lower parts.
14248 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14249 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14250 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14253 if (Subtarget->hasInt256())
14254 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14256 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14257 SDValue Undef = DAG.getUNDEF(InVT);
14258 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14259 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14260 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14262 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14263 VT.getVectorNumElements()/2);
14265 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14266 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14268 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14271 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14272 SelectionDAG &DAG) {
14273 MVT VT = Op->getSimpleValueType(0);
14274 SDValue In = Op->getOperand(0);
14275 MVT InVT = In.getSimpleValueType();
14277 unsigned int NumElts = VT.getVectorNumElements();
14278 if (NumElts != 8 && NumElts != 16)
14281 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14282 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14284 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14285 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14286 // Now we have only mask extension
14287 assert(InVT.getVectorElementType() == MVT::i1);
14288 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14289 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14290 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14291 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14292 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14293 MachinePointerInfo::getConstantPool(),
14294 false, false, false, Alignment);
14296 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14297 if (VT.is512BitVector())
14299 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14302 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14303 SelectionDAG &DAG) {
14304 if (Subtarget->hasFp256()) {
14305 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14313 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14314 SelectionDAG &DAG) {
14316 MVT VT = Op.getSimpleValueType();
14317 SDValue In = Op.getOperand(0);
14318 MVT SVT = In.getSimpleValueType();
14320 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14321 return LowerZERO_EXTEND_AVX512(Op, DAG);
14323 if (Subtarget->hasFp256()) {
14324 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14329 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14330 VT.getVectorNumElements() != SVT.getVectorNumElements());
14334 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14336 MVT VT = Op.getSimpleValueType();
14337 SDValue In = Op.getOperand(0);
14338 MVT InVT = In.getSimpleValueType();
14340 if (VT == MVT::i1) {
14341 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14342 "Invalid scalar TRUNCATE operation");
14343 if (InVT.getSizeInBits() >= 32)
14345 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14346 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14348 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14349 "Invalid TRUNCATE operation");
14351 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14352 if (VT.getVectorElementType().getSizeInBits() >=8)
14353 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14355 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14356 unsigned NumElts = InVT.getVectorNumElements();
14357 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14358 if (InVT.getSizeInBits() < 512) {
14359 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14360 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14364 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14365 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14366 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14367 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14368 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14369 MachinePointerInfo::getConstantPool(),
14370 false, false, false, Alignment);
14371 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14372 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14373 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14376 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14377 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14378 if (Subtarget->hasInt256()) {
14379 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14380 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14381 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14383 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14384 DAG.getIntPtrConstant(0));
14387 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14388 DAG.getIntPtrConstant(0));
14389 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14390 DAG.getIntPtrConstant(2));
14391 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14392 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14393 static const int ShufMask[] = {0, 2, 4, 6};
14394 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14397 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14398 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14399 if (Subtarget->hasInt256()) {
14400 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14402 SmallVector<SDValue,32> pshufbMask;
14403 for (unsigned i = 0; i < 2; ++i) {
14404 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14405 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14406 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14407 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14408 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14409 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14410 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14411 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14412 for (unsigned j = 0; j < 8; ++j)
14413 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14415 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14416 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14417 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14419 static const int ShufMask[] = {0, 2, -1, -1};
14420 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14422 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14423 DAG.getIntPtrConstant(0));
14424 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14427 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14428 DAG.getIntPtrConstant(0));
14430 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14431 DAG.getIntPtrConstant(4));
14433 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14434 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14436 // The PSHUFB mask:
14437 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14438 -1, -1, -1, -1, -1, -1, -1, -1};
14440 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14441 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14442 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14444 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14445 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14447 // The MOVLHPS Mask:
14448 static const int ShufMask2[] = {0, 1, 4, 5};
14449 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14450 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14453 // Handle truncation of V256 to V128 using shuffles.
14454 if (!VT.is128BitVector() || !InVT.is256BitVector())
14457 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14459 unsigned NumElems = VT.getVectorNumElements();
14460 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14462 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14463 // Prepare truncation shuffle mask
14464 for (unsigned i = 0; i != NumElems; ++i)
14465 MaskVec[i] = i * 2;
14466 SDValue V = DAG.getVectorShuffle(NVT, DL,
14467 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14468 DAG.getUNDEF(NVT), &MaskVec[0]);
14469 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14470 DAG.getIntPtrConstant(0));
14473 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14474 SelectionDAG &DAG) const {
14475 assert(!Op.getSimpleValueType().isVector());
14477 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14478 /*IsSigned=*/ true, /*IsReplace=*/ false);
14479 SDValue FIST = Vals.first, StackSlot = Vals.second;
14480 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14481 if (!FIST.getNode()) return Op;
14483 if (StackSlot.getNode())
14484 // Load the result.
14485 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14486 FIST, StackSlot, MachinePointerInfo(),
14487 false, false, false, 0);
14489 // The node is the result.
14493 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14494 SelectionDAG &DAG) const {
14495 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14496 /*IsSigned=*/ false, /*IsReplace=*/ false);
14497 SDValue FIST = Vals.first, StackSlot = Vals.second;
14498 assert(FIST.getNode() && "Unexpected failure");
14500 if (StackSlot.getNode())
14501 // Load the result.
14502 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14503 FIST, StackSlot, MachinePointerInfo(),
14504 false, false, false, 0);
14506 // The node is the result.
14510 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14512 MVT VT = Op.getSimpleValueType();
14513 SDValue In = Op.getOperand(0);
14514 MVT SVT = In.getSimpleValueType();
14516 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14518 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14519 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14520 In, DAG.getUNDEF(SVT)));
14523 /// The only differences between FABS and FNEG are the mask and the logic op.
14524 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14525 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14526 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14527 "Wrong opcode for lowering FABS or FNEG.");
14529 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14531 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14532 // into an FNABS. We'll lower the FABS after that if it is still in use.
14534 for (SDNode *User : Op->uses())
14535 if (User->getOpcode() == ISD::FNEG)
14538 SDValue Op0 = Op.getOperand(0);
14539 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14542 MVT VT = Op.getSimpleValueType();
14543 // Assume scalar op for initialization; update for vector if needed.
14544 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14545 // generate a 16-byte vector constant and logic op even for the scalar case.
14546 // Using a 16-byte mask allows folding the load of the mask with
14547 // the logic op, so it can save (~4 bytes) on code size.
14549 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14550 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14551 // decide if we should generate a 16-byte constant mask when we only need 4 or
14552 // 8 bytes for the scalar case.
14553 if (VT.isVector()) {
14554 EltVT = VT.getVectorElementType();
14555 NumElts = VT.getVectorNumElements();
14558 unsigned EltBits = EltVT.getSizeInBits();
14559 LLVMContext *Context = DAG.getContext();
14560 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14562 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14563 Constant *C = ConstantInt::get(*Context, MaskElt);
14564 C = ConstantVector::getSplat(NumElts, C);
14565 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14566 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14567 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14568 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14569 MachinePointerInfo::getConstantPool(),
14570 false, false, false, Alignment);
14572 if (VT.isVector()) {
14573 // For a vector, cast operands to a vector type, perform the logic op,
14574 // and cast the result back to the original value type.
14575 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14576 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14577 SDValue Operand = IsFNABS ?
14578 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14579 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14580 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14581 return DAG.getNode(ISD::BITCAST, dl, VT,
14582 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14585 // If not vector, then scalar.
14586 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14587 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14588 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14591 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14592 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14593 LLVMContext *Context = DAG.getContext();
14594 SDValue Op0 = Op.getOperand(0);
14595 SDValue Op1 = Op.getOperand(1);
14597 MVT VT = Op.getSimpleValueType();
14598 MVT SrcVT = Op1.getSimpleValueType();
14600 // If second operand is smaller, extend it first.
14601 if (SrcVT.bitsLT(VT)) {
14602 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14605 // And if it is bigger, shrink it first.
14606 if (SrcVT.bitsGT(VT)) {
14607 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14611 // At this point the operands and the result should have the same
14612 // type, and that won't be f80 since that is not custom lowered.
14614 const fltSemantics &Sem =
14615 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14616 const unsigned SizeInBits = VT.getSizeInBits();
14618 SmallVector<Constant *, 4> CV(
14619 VT == MVT::f64 ? 2 : 4,
14620 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14622 // First, clear all bits but the sign bit from the second operand (sign).
14623 CV[0] = ConstantFP::get(*Context,
14624 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14625 Constant *C = ConstantVector::get(CV);
14626 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14627 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14628 MachinePointerInfo::getConstantPool(),
14629 false, false, false, 16);
14630 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14632 // Next, clear the sign bit from the first operand (magnitude).
14633 // If it's a constant, we can clear it here.
14634 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14635 APFloat APF = Op0CN->getValueAPF();
14636 // If the magnitude is a positive zero, the sign bit alone is enough.
14637 if (APF.isPosZero())
14640 CV[0] = ConstantFP::get(*Context, APF);
14642 CV[0] = ConstantFP::get(
14644 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14646 C = ConstantVector::get(CV);
14647 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14648 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14649 MachinePointerInfo::getConstantPool(),
14650 false, false, false, 16);
14651 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14652 if (!isa<ConstantFPSDNode>(Op0))
14653 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
14655 // OR the magnitude value with the sign bit.
14656 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
14659 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14660 SDValue N0 = Op.getOperand(0);
14662 MVT VT = Op.getSimpleValueType();
14664 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
14665 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
14666 DAG.getConstant(1, VT));
14667 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
14670 // Check whether an OR'd tree is PTEST-able.
14671 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
14672 SelectionDAG &DAG) {
14673 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14675 if (!Subtarget->hasSSE41())
14678 if (!Op->hasOneUse())
14681 SDNode *N = Op.getNode();
14684 SmallVector<SDValue, 8> Opnds;
14685 DenseMap<SDValue, unsigned> VecInMap;
14686 SmallVector<SDValue, 8> VecIns;
14687 EVT VT = MVT::Other;
14689 // Recognize a special case where a vector is casted into wide integer to
14691 Opnds.push_back(N->getOperand(0));
14692 Opnds.push_back(N->getOperand(1));
14694 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14695 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14696 // BFS traverse all OR'd operands.
14697 if (I->getOpcode() == ISD::OR) {
14698 Opnds.push_back(I->getOperand(0));
14699 Opnds.push_back(I->getOperand(1));
14700 // Re-evaluate the number of nodes to be traversed.
14701 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14705 // Quit if a non-EXTRACT_VECTOR_ELT
14706 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14709 // Quit if without a constant index.
14710 SDValue Idx = I->getOperand(1);
14711 if (!isa<ConstantSDNode>(Idx))
14714 SDValue ExtractedFromVec = I->getOperand(0);
14715 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14716 if (M == VecInMap.end()) {
14717 VT = ExtractedFromVec.getValueType();
14718 // Quit if not 128/256-bit vector.
14719 if (!VT.is128BitVector() && !VT.is256BitVector())
14721 // Quit if not the same type.
14722 if (VecInMap.begin() != VecInMap.end() &&
14723 VT != VecInMap.begin()->first.getValueType())
14725 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14726 VecIns.push_back(ExtractedFromVec);
14728 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14731 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14732 "Not extracted from 128-/256-bit vector.");
14734 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14736 for (DenseMap<SDValue, unsigned>::const_iterator
14737 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
14738 // Quit if not all elements are used.
14739 if (I->second != FullMask)
14743 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
14745 // Cast all vectors into TestVT for PTEST.
14746 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
14747 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
14749 // If more than one full vectors are evaluated, OR them first before PTEST.
14750 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
14751 // Each iteration will OR 2 nodes and append the result until there is only
14752 // 1 node left, i.e. the final OR'd value of all vectors.
14753 SDValue LHS = VecIns[Slot];
14754 SDValue RHS = VecIns[Slot + 1];
14755 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
14758 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
14759 VecIns.back(), VecIns.back());
14762 /// \brief return true if \c Op has a use that doesn't just read flags.
14763 static bool hasNonFlagsUse(SDValue Op) {
14764 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
14766 SDNode *User = *UI;
14767 unsigned UOpNo = UI.getOperandNo();
14768 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
14769 // Look pass truncate.
14770 UOpNo = User->use_begin().getOperandNo();
14771 User = *User->use_begin();
14774 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
14775 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
14781 /// Emit nodes that will be selected as "test Op0,Op0", or something
14783 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
14784 SelectionDAG &DAG) const {
14785 if (Op.getValueType() == MVT::i1)
14786 // KORTEST instruction should be selected
14787 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14788 DAG.getConstant(0, Op.getValueType()));
14790 // CF and OF aren't always set the way we want. Determine which
14791 // of these we need.
14792 bool NeedCF = false;
14793 bool NeedOF = false;
14796 case X86::COND_A: case X86::COND_AE:
14797 case X86::COND_B: case X86::COND_BE:
14800 case X86::COND_G: case X86::COND_GE:
14801 case X86::COND_L: case X86::COND_LE:
14802 case X86::COND_O: case X86::COND_NO: {
14803 // Check if we really need to set the
14804 // Overflow flag. If NoSignedWrap is present
14805 // that is not actually needed.
14806 switch (Op->getOpcode()) {
14811 const BinaryWithFlagsSDNode *BinNode =
14812 cast<BinaryWithFlagsSDNode>(Op.getNode());
14813 if (BinNode->hasNoSignedWrap())
14823 // See if we can use the EFLAGS value from the operand instead of
14824 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
14825 // we prove that the arithmetic won't overflow, we can't use OF or CF.
14826 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
14827 // Emit a CMP with 0, which is the TEST pattern.
14828 //if (Op.getValueType() == MVT::i1)
14829 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
14830 // DAG.getConstant(0, MVT::i1));
14831 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14832 DAG.getConstant(0, Op.getValueType()));
14834 unsigned Opcode = 0;
14835 unsigned NumOperands = 0;
14837 // Truncate operations may prevent the merge of the SETCC instruction
14838 // and the arithmetic instruction before it. Attempt to truncate the operands
14839 // of the arithmetic instruction and use a reduced bit-width instruction.
14840 bool NeedTruncation = false;
14841 SDValue ArithOp = Op;
14842 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
14843 SDValue Arith = Op->getOperand(0);
14844 // Both the trunc and the arithmetic op need to have one user each.
14845 if (Arith->hasOneUse())
14846 switch (Arith.getOpcode()) {
14853 NeedTruncation = true;
14859 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
14860 // which may be the result of a CAST. We use the variable 'Op', which is the
14861 // non-casted variable when we check for possible users.
14862 switch (ArithOp.getOpcode()) {
14864 // Due to an isel shortcoming, be conservative if this add is likely to be
14865 // selected as part of a load-modify-store instruction. When the root node
14866 // in a match is a store, isel doesn't know how to remap non-chain non-flag
14867 // uses of other nodes in the match, such as the ADD in this case. This
14868 // leads to the ADD being left around and reselected, with the result being
14869 // two adds in the output. Alas, even if none our users are stores, that
14870 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
14871 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
14872 // climbing the DAG back to the root, and it doesn't seem to be worth the
14874 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14875 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14876 if (UI->getOpcode() != ISD::CopyToReg &&
14877 UI->getOpcode() != ISD::SETCC &&
14878 UI->getOpcode() != ISD::STORE)
14881 if (ConstantSDNode *C =
14882 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
14883 // An add of one will be selected as an INC.
14884 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
14885 Opcode = X86ISD::INC;
14890 // An add of negative one (subtract of one) will be selected as a DEC.
14891 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
14892 Opcode = X86ISD::DEC;
14898 // Otherwise use a regular EFLAGS-setting add.
14899 Opcode = X86ISD::ADD;
14904 // If we have a constant logical shift that's only used in a comparison
14905 // against zero turn it into an equivalent AND. This allows turning it into
14906 // a TEST instruction later.
14907 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
14908 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
14909 EVT VT = Op.getValueType();
14910 unsigned BitWidth = VT.getSizeInBits();
14911 unsigned ShAmt = Op->getConstantOperandVal(1);
14912 if (ShAmt >= BitWidth) // Avoid undefined shifts.
14914 APInt Mask = ArithOp.getOpcode() == ISD::SRL
14915 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
14916 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
14917 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
14919 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
14920 DAG.getConstant(Mask, VT));
14921 DAG.ReplaceAllUsesWith(Op, New);
14927 // If the primary and result isn't used, don't bother using X86ISD::AND,
14928 // because a TEST instruction will be better.
14929 if (!hasNonFlagsUse(Op))
14935 // Due to the ISEL shortcoming noted above, be conservative if this op is
14936 // likely to be selected as part of a load-modify-store instruction.
14937 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14938 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14939 if (UI->getOpcode() == ISD::STORE)
14942 // Otherwise use a regular EFLAGS-setting instruction.
14943 switch (ArithOp.getOpcode()) {
14944 default: llvm_unreachable("unexpected operator!");
14945 case ISD::SUB: Opcode = X86ISD::SUB; break;
14946 case ISD::XOR: Opcode = X86ISD::XOR; break;
14947 case ISD::AND: Opcode = X86ISD::AND; break;
14949 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
14950 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
14951 if (EFLAGS.getNode())
14954 Opcode = X86ISD::OR;
14968 return SDValue(Op.getNode(), 1);
14974 // If we found that truncation is beneficial, perform the truncation and
14976 if (NeedTruncation) {
14977 EVT VT = Op.getValueType();
14978 SDValue WideVal = Op->getOperand(0);
14979 EVT WideVT = WideVal.getValueType();
14980 unsigned ConvertedOp = 0;
14981 // Use a target machine opcode to prevent further DAGCombine
14982 // optimizations that may separate the arithmetic operations
14983 // from the setcc node.
14984 switch (WideVal.getOpcode()) {
14986 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
14987 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
14988 case ISD::AND: ConvertedOp = X86ISD::AND; break;
14989 case ISD::OR: ConvertedOp = X86ISD::OR; break;
14990 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
14994 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14995 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
14996 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
14997 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
14998 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15004 // Emit a CMP with 0, which is the TEST pattern.
15005 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15006 DAG.getConstant(0, Op.getValueType()));
15008 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15009 SmallVector<SDValue, 4> Ops;
15010 for (unsigned i = 0; i != NumOperands; ++i)
15011 Ops.push_back(Op.getOperand(i));
15013 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15014 DAG.ReplaceAllUsesWith(Op, New);
15015 return SDValue(New.getNode(), 1);
15018 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15020 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15021 SDLoc dl, SelectionDAG &DAG) const {
15022 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15023 if (C->getAPIntValue() == 0)
15024 return EmitTest(Op0, X86CC, dl, DAG);
15026 if (Op0.getValueType() == MVT::i1)
15027 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15030 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15031 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15032 // Do the comparison at i32 if it's smaller, besides the Atom case.
15033 // This avoids subregister aliasing issues. Keep the smaller reference
15034 // if we're optimizing for size, however, as that'll allow better folding
15035 // of memory operations.
15036 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15037 !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
15038 AttributeSet::FunctionIndex, Attribute::MinSize) &&
15039 !Subtarget->isAtom()) {
15040 unsigned ExtendOp =
15041 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15042 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15043 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15045 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15046 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15047 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15049 return SDValue(Sub.getNode(), 1);
15051 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15054 /// Convert a comparison if required by the subtarget.
15055 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15056 SelectionDAG &DAG) const {
15057 // If the subtarget does not support the FUCOMI instruction, floating-point
15058 // comparisons have to be converted.
15059 if (Subtarget->hasCMov() ||
15060 Cmp.getOpcode() != X86ISD::CMP ||
15061 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15062 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15065 // The instruction selector will select an FUCOM instruction instead of
15066 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15067 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15068 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15070 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15071 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15072 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15073 DAG.getConstant(8, MVT::i8));
15074 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15075 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15078 /// The minimum architected relative accuracy is 2^-12. We need one
15079 /// Newton-Raphson step to have a good float result (24 bits of precision).
15080 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15081 DAGCombinerInfo &DCI,
15082 unsigned &RefinementSteps,
15083 bool &UseOneConstNR) const {
15084 // FIXME: We should use instruction latency models to calculate the cost of
15085 // each potential sequence, but this is very hard to do reliably because
15086 // at least Intel's Core* chips have variable timing based on the number of
15087 // significant digits in the divisor and/or sqrt operand.
15088 if (!Subtarget->useSqrtEst())
15091 EVT VT = Op.getValueType();
15093 // SSE1 has rsqrtss and rsqrtps.
15094 // TODO: Add support for AVX512 (v16f32).
15095 // It is likely not profitable to do this for f64 because a double-precision
15096 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15097 // instructions: convert to single, rsqrtss, convert back to double, refine
15098 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15099 // along with FMA, this could be a throughput win.
15100 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15101 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15102 RefinementSteps = 1;
15103 UseOneConstNR = false;
15104 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15109 /// The minimum architected relative accuracy is 2^-12. We need one
15110 /// Newton-Raphson step to have a good float result (24 bits of precision).
15111 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15112 DAGCombinerInfo &DCI,
15113 unsigned &RefinementSteps) const {
15114 // FIXME: We should use instruction latency models to calculate the cost of
15115 // each potential sequence, but this is very hard to do reliably because
15116 // at least Intel's Core* chips have variable timing based on the number of
15117 // significant digits in the divisor.
15118 if (!Subtarget->useReciprocalEst())
15121 EVT VT = Op.getValueType();
15123 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15124 // TODO: Add support for AVX512 (v16f32).
15125 // It is likely not profitable to do this for f64 because a double-precision
15126 // reciprocal estimate with refinement on x86 prior to FMA requires
15127 // 15 instructions: convert to single, rcpss, convert back to double, refine
15128 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15129 // along with FMA, this could be a throughput win.
15130 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15131 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15132 RefinementSteps = ReciprocalEstimateRefinementSteps;
15133 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15138 static bool isAllOnes(SDValue V) {
15139 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15140 return C && C->isAllOnesValue();
15143 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15144 /// if it's possible.
15145 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15146 SDLoc dl, SelectionDAG &DAG) const {
15147 SDValue Op0 = And.getOperand(0);
15148 SDValue Op1 = And.getOperand(1);
15149 if (Op0.getOpcode() == ISD::TRUNCATE)
15150 Op0 = Op0.getOperand(0);
15151 if (Op1.getOpcode() == ISD::TRUNCATE)
15152 Op1 = Op1.getOperand(0);
15155 if (Op1.getOpcode() == ISD::SHL)
15156 std::swap(Op0, Op1);
15157 if (Op0.getOpcode() == ISD::SHL) {
15158 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15159 if (And00C->getZExtValue() == 1) {
15160 // If we looked past a truncate, check that it's only truncating away
15162 unsigned BitWidth = Op0.getValueSizeInBits();
15163 unsigned AndBitWidth = And.getValueSizeInBits();
15164 if (BitWidth > AndBitWidth) {
15166 DAG.computeKnownBits(Op0, Zeros, Ones);
15167 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15171 RHS = Op0.getOperand(1);
15173 } else if (Op1.getOpcode() == ISD::Constant) {
15174 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15175 uint64_t AndRHSVal = AndRHS->getZExtValue();
15176 SDValue AndLHS = Op0;
15178 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15179 LHS = AndLHS.getOperand(0);
15180 RHS = AndLHS.getOperand(1);
15183 // Use BT if the immediate can't be encoded in a TEST instruction.
15184 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15186 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15190 if (LHS.getNode()) {
15191 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15192 // instruction. Since the shift amount is in-range-or-undefined, we know
15193 // that doing a bittest on the i32 value is ok. We extend to i32 because
15194 // the encoding for the i16 version is larger than the i32 version.
15195 // Also promote i16 to i32 for performance / code size reason.
15196 if (LHS.getValueType() == MVT::i8 ||
15197 LHS.getValueType() == MVT::i16)
15198 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15200 // If the operand types disagree, extend the shift amount to match. Since
15201 // BT ignores high bits (like shifts) we can use anyextend.
15202 if (LHS.getValueType() != RHS.getValueType())
15203 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15205 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15206 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15207 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15208 DAG.getConstant(Cond, MVT::i8), BT);
15214 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15216 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15221 // SSE Condition code mapping:
15230 switch (SetCCOpcode) {
15231 default: llvm_unreachable("Unexpected SETCC condition");
15233 case ISD::SETEQ: SSECC = 0; break;
15235 case ISD::SETGT: Swap = true; // Fallthrough
15237 case ISD::SETOLT: SSECC = 1; break;
15239 case ISD::SETGE: Swap = true; // Fallthrough
15241 case ISD::SETOLE: SSECC = 2; break;
15242 case ISD::SETUO: SSECC = 3; break;
15244 case ISD::SETNE: SSECC = 4; break;
15245 case ISD::SETULE: Swap = true; // Fallthrough
15246 case ISD::SETUGE: SSECC = 5; break;
15247 case ISD::SETULT: Swap = true; // Fallthrough
15248 case ISD::SETUGT: SSECC = 6; break;
15249 case ISD::SETO: SSECC = 7; break;
15251 case ISD::SETONE: SSECC = 8; break;
15254 std::swap(Op0, Op1);
15259 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15260 // ones, and then concatenate the result back.
15261 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15262 MVT VT = Op.getSimpleValueType();
15264 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15265 "Unsupported value type for operation");
15267 unsigned NumElems = VT.getVectorNumElements();
15269 SDValue CC = Op.getOperand(2);
15271 // Extract the LHS vectors
15272 SDValue LHS = Op.getOperand(0);
15273 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15274 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15276 // Extract the RHS vectors
15277 SDValue RHS = Op.getOperand(1);
15278 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15279 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15281 // Issue the operation on the smaller types and concatenate the result back
15282 MVT EltVT = VT.getVectorElementType();
15283 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15284 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15285 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15286 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15289 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15290 const X86Subtarget *Subtarget) {
15291 SDValue Op0 = Op.getOperand(0);
15292 SDValue Op1 = Op.getOperand(1);
15293 SDValue CC = Op.getOperand(2);
15294 MVT VT = Op.getSimpleValueType();
15297 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15298 Op.getValueType().getScalarType() == MVT::i1 &&
15299 "Cannot set masked compare for this operation");
15301 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15303 bool Unsigned = false;
15306 switch (SetCCOpcode) {
15307 default: llvm_unreachable("Unexpected SETCC condition");
15308 case ISD::SETNE: SSECC = 4; break;
15309 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15310 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15311 case ISD::SETLT: Swap = true; //fall-through
15312 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15313 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15314 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15315 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15316 case ISD::SETULE: Unsigned = true; //fall-through
15317 case ISD::SETLE: SSECC = 2; break;
15321 std::swap(Op0, Op1);
15323 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15324 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15325 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15326 DAG.getConstant(SSECC, MVT::i8));
15329 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15330 /// operand \p Op1. If non-trivial (for example because it's not constant)
15331 /// return an empty value.
15332 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15334 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15338 MVT VT = Op1.getSimpleValueType();
15339 MVT EVT = VT.getVectorElementType();
15340 unsigned n = VT.getVectorNumElements();
15341 SmallVector<SDValue, 8> ULTOp1;
15343 for (unsigned i = 0; i < n; ++i) {
15344 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15345 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15348 // Avoid underflow.
15349 APInt Val = Elt->getAPIntValue();
15353 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15356 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15359 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15360 SelectionDAG &DAG) {
15361 SDValue Op0 = Op.getOperand(0);
15362 SDValue Op1 = Op.getOperand(1);
15363 SDValue CC = Op.getOperand(2);
15364 MVT VT = Op.getSimpleValueType();
15365 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15366 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15371 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15372 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15375 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15376 unsigned Opc = X86ISD::CMPP;
15377 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15378 assert(VT.getVectorNumElements() <= 16);
15379 Opc = X86ISD::CMPM;
15381 // In the two special cases we can't handle, emit two comparisons.
15384 unsigned CombineOpc;
15385 if (SetCCOpcode == ISD::SETUEQ) {
15386 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15388 assert(SetCCOpcode == ISD::SETONE);
15389 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15392 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15393 DAG.getConstant(CC0, MVT::i8));
15394 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15395 DAG.getConstant(CC1, MVT::i8));
15396 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15398 // Handle all other FP comparisons here.
15399 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15400 DAG.getConstant(SSECC, MVT::i8));
15403 // Break 256-bit integer vector compare into smaller ones.
15404 if (VT.is256BitVector() && !Subtarget->hasInt256())
15405 return Lower256IntVSETCC(Op, DAG);
15407 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15408 EVT OpVT = Op1.getValueType();
15409 if (Subtarget->hasAVX512()) {
15410 if (Op1.getValueType().is512BitVector() ||
15411 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15412 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15413 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15415 // In AVX-512 architecture setcc returns mask with i1 elements,
15416 // But there is no compare instruction for i8 and i16 elements in KNL.
15417 // We are not talking about 512-bit operands in this case, these
15418 // types are illegal.
15420 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15421 OpVT.getVectorElementType().getSizeInBits() >= 8))
15422 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15423 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15426 // We are handling one of the integer comparisons here. Since SSE only has
15427 // GT and EQ comparisons for integer, swapping operands and multiple
15428 // operations may be required for some comparisons.
15430 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15431 bool Subus = false;
15433 switch (SetCCOpcode) {
15434 default: llvm_unreachable("Unexpected SETCC condition");
15435 case ISD::SETNE: Invert = true;
15436 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15437 case ISD::SETLT: Swap = true;
15438 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15439 case ISD::SETGE: Swap = true;
15440 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15441 Invert = true; break;
15442 case ISD::SETULT: Swap = true;
15443 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15444 FlipSigns = true; break;
15445 case ISD::SETUGE: Swap = true;
15446 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15447 FlipSigns = true; Invert = true; break;
15450 // Special case: Use min/max operations for SETULE/SETUGE
15451 MVT VET = VT.getVectorElementType();
15453 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15454 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15457 switch (SetCCOpcode) {
15459 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15460 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15463 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15466 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15467 if (!MinMax && hasSubus) {
15468 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15470 // t = psubus Op0, Op1
15471 // pcmpeq t, <0..0>
15472 switch (SetCCOpcode) {
15474 case ISD::SETULT: {
15475 // If the comparison is against a constant we can turn this into a
15476 // setule. With psubus, setule does not require a swap. This is
15477 // beneficial because the constant in the register is no longer
15478 // destructed as the destination so it can be hoisted out of a loop.
15479 // Only do this pre-AVX since vpcmp* is no longer destructive.
15480 if (Subtarget->hasAVX())
15482 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15483 if (ULEOp1.getNode()) {
15485 Subus = true; Invert = false; Swap = false;
15489 // Psubus is better than flip-sign because it requires no inversion.
15490 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15491 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15495 Opc = X86ISD::SUBUS;
15501 std::swap(Op0, Op1);
15503 // Check that the operation in question is available (most are plain SSE2,
15504 // but PCMPGTQ and PCMPEQQ have different requirements).
15505 if (VT == MVT::v2i64) {
15506 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15507 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15509 // First cast everything to the right type.
15510 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15511 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15513 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15514 // bits of the inputs before performing those operations. The lower
15515 // compare is always unsigned.
15518 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15520 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15521 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15522 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15523 Sign, Zero, Sign, Zero);
15525 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15526 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15528 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15529 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15530 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15532 // Create masks for only the low parts/high parts of the 64 bit integers.
15533 static const int MaskHi[] = { 1, 1, 3, 3 };
15534 static const int MaskLo[] = { 0, 0, 2, 2 };
15535 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15536 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15537 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15539 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15540 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15543 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15545 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15548 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15549 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15550 // pcmpeqd + pshufd + pand.
15551 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15553 // First cast everything to the right type.
15554 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15555 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15558 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15560 // Make sure the lower and upper halves are both all-ones.
15561 static const int Mask[] = { 1, 0, 3, 2 };
15562 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15563 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15566 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15568 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15572 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15573 // bits of the inputs before performing those operations.
15575 EVT EltVT = VT.getVectorElementType();
15576 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15577 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15578 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15581 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15583 // If the logical-not of the result is required, perform that now.
15585 Result = DAG.getNOT(dl, Result, VT);
15588 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15591 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15592 getZeroVector(VT, Subtarget, DAG, dl));
15597 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15599 MVT VT = Op.getSimpleValueType();
15601 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15603 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15604 && "SetCC type must be 8-bit or 1-bit integer");
15605 SDValue Op0 = Op.getOperand(0);
15606 SDValue Op1 = Op.getOperand(1);
15608 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15610 // Optimize to BT if possible.
15611 // Lower (X & (1 << N)) == 0 to BT(X, N).
15612 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15613 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15614 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15615 Op1.getOpcode() == ISD::Constant &&
15616 cast<ConstantSDNode>(Op1)->isNullValue() &&
15617 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15618 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15619 if (NewSetCC.getNode()) {
15621 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15626 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15628 if (Op1.getOpcode() == ISD::Constant &&
15629 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15630 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15631 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15633 // If the input is a setcc, then reuse the input setcc or use a new one with
15634 // the inverted condition.
15635 if (Op0.getOpcode() == X86ISD::SETCC) {
15636 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15637 bool Invert = (CC == ISD::SETNE) ^
15638 cast<ConstantSDNode>(Op1)->isNullValue();
15642 CCode = X86::GetOppositeBranchCondition(CCode);
15643 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15644 DAG.getConstant(CCode, MVT::i8),
15645 Op0.getOperand(1));
15647 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15651 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
15652 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
15653 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15655 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15656 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
15659 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15660 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
15661 if (X86CC == X86::COND_INVALID)
15664 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15665 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15666 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15667 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
15669 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15673 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
15674 static bool isX86LogicalCmp(SDValue Op) {
15675 unsigned Opc = Op.getNode()->getOpcode();
15676 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15677 Opc == X86ISD::SAHF)
15679 if (Op.getResNo() == 1 &&
15680 (Opc == X86ISD::ADD ||
15681 Opc == X86ISD::SUB ||
15682 Opc == X86ISD::ADC ||
15683 Opc == X86ISD::SBB ||
15684 Opc == X86ISD::SMUL ||
15685 Opc == X86ISD::UMUL ||
15686 Opc == X86ISD::INC ||
15687 Opc == X86ISD::DEC ||
15688 Opc == X86ISD::OR ||
15689 Opc == X86ISD::XOR ||
15690 Opc == X86ISD::AND))
15693 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15699 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15700 if (V.getOpcode() != ISD::TRUNCATE)
15703 SDValue VOp0 = V.getOperand(0);
15704 unsigned InBits = VOp0.getValueSizeInBits();
15705 unsigned Bits = V.getValueSizeInBits();
15706 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
15709 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15710 bool addTest = true;
15711 SDValue Cond = Op.getOperand(0);
15712 SDValue Op1 = Op.getOperand(1);
15713 SDValue Op2 = Op.getOperand(2);
15715 EVT VT = Op1.getValueType();
15718 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15719 // are available. Otherwise fp cmovs get lowered into a less efficient branch
15720 // sequence later on.
15721 if (Cond.getOpcode() == ISD::SETCC &&
15722 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15723 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
15724 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
15725 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15726 int SSECC = translateX86FSETCC(
15727 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15730 if (Subtarget->hasAVX512()) {
15731 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15732 DAG.getConstant(SSECC, MVT::i8));
15733 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15735 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15736 DAG.getConstant(SSECC, MVT::i8));
15737 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15738 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15739 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15743 if (Cond.getOpcode() == ISD::SETCC) {
15744 SDValue NewCond = LowerSETCC(Cond, DAG);
15745 if (NewCond.getNode())
15749 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15750 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15751 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15752 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15753 if (Cond.getOpcode() == X86ISD::SETCC &&
15754 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15755 isZero(Cond.getOperand(1).getOperand(1))) {
15756 SDValue Cmp = Cond.getOperand(1);
15758 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15760 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
15761 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15762 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
15764 SDValue CmpOp0 = Cmp.getOperand(0);
15765 // Apply further optimizations for special cases
15766 // (select (x != 0), -1, 0) -> neg & sbb
15767 // (select (x == 0), 0, -1) -> neg & sbb
15768 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
15769 if (YC->isNullValue() &&
15770 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
15771 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15772 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15773 DAG.getConstant(0, CmpOp0.getValueType()),
15775 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15776 DAG.getConstant(X86::COND_B, MVT::i8),
15777 SDValue(Neg.getNode(), 1));
15781 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15782 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
15783 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15785 SDValue Res = // Res = 0 or -1.
15786 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15787 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
15789 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
15790 Res = DAG.getNOT(DL, Res, Res.getValueType());
15792 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
15793 if (!N2C || !N2C->isNullValue())
15794 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
15799 // Look past (and (setcc_carry (cmp ...)), 1).
15800 if (Cond.getOpcode() == ISD::AND &&
15801 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
15802 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
15803 if (C && C->getAPIntValue() == 1)
15804 Cond = Cond.getOperand(0);
15807 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15808 // setting operand in place of the X86ISD::SETCC.
15809 unsigned CondOpcode = Cond.getOpcode();
15810 if (CondOpcode == X86ISD::SETCC ||
15811 CondOpcode == X86ISD::SETCC_CARRY) {
15812 CC = Cond.getOperand(0);
15814 SDValue Cmp = Cond.getOperand(1);
15815 unsigned Opc = Cmp.getOpcode();
15816 MVT VT = Op.getSimpleValueType();
15818 bool IllegalFPCMov = false;
15819 if (VT.isFloatingPoint() && !VT.isVector() &&
15820 !isScalarFPTypeInSSEReg(VT)) // FPStack?
15821 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
15823 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
15824 Opc == X86ISD::BT) { // FIXME
15828 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15829 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15830 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15831 Cond.getOperand(0).getValueType() != MVT::i8)) {
15832 SDValue LHS = Cond.getOperand(0);
15833 SDValue RHS = Cond.getOperand(1);
15834 unsigned X86Opcode;
15837 switch (CondOpcode) {
15838 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15839 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15840 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15841 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15842 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15843 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15844 default: llvm_unreachable("unexpected overflowing operator");
15846 if (CondOpcode == ISD::UMULO)
15847 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15850 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15852 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
15854 if (CondOpcode == ISD::UMULO)
15855 Cond = X86Op.getValue(2);
15857 Cond = X86Op.getValue(1);
15859 CC = DAG.getConstant(X86Cond, MVT::i8);
15864 // Look pass the truncate if the high bits are known zero.
15865 if (isTruncWithZeroHighBitsInput(Cond, DAG))
15866 Cond = Cond.getOperand(0);
15868 // We know the result of AND is compared against zero. Try to match
15870 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15871 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
15872 if (NewSetCC.getNode()) {
15873 CC = NewSetCC.getOperand(0);
15874 Cond = NewSetCC.getOperand(1);
15881 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
15882 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
15885 // a < b ? -1 : 0 -> RES = ~setcc_carry
15886 // a < b ? 0 : -1 -> RES = setcc_carry
15887 // a >= b ? -1 : 0 -> RES = setcc_carry
15888 // a >= b ? 0 : -1 -> RES = ~setcc_carry
15889 if (Cond.getOpcode() == X86ISD::SUB) {
15890 Cond = ConvertCmpIfNecessary(Cond, DAG);
15891 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
15893 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
15894 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
15895 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15896 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
15897 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
15898 return DAG.getNOT(DL, Res, Res.getValueType());
15903 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
15904 // widen the cmov and push the truncate through. This avoids introducing a new
15905 // branch during isel and doesn't add any extensions.
15906 if (Op.getValueType() == MVT::i8 &&
15907 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
15908 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
15909 if (T1.getValueType() == T2.getValueType() &&
15910 // Blacklist CopyFromReg to avoid partial register stalls.
15911 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
15912 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
15913 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
15914 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
15918 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
15919 // condition is true.
15920 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
15921 SDValue Ops[] = { Op2, Op1, CC, Cond };
15922 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
15925 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
15926 SelectionDAG &DAG) {
15927 MVT VT = Op->getSimpleValueType(0);
15928 SDValue In = Op->getOperand(0);
15929 MVT InVT = In.getSimpleValueType();
15930 MVT VTElt = VT.getVectorElementType();
15931 MVT InVTElt = InVT.getVectorElementType();
15935 if ((InVTElt == MVT::i1) &&
15936 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
15937 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
15939 ((Subtarget->hasBWI() && VT.is512BitVector() &&
15940 VTElt.getSizeInBits() <= 16)) ||
15942 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
15943 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
15945 ((Subtarget->hasDQI() && VT.is512BitVector() &&
15946 VTElt.getSizeInBits() >= 32))))
15947 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15949 unsigned int NumElts = VT.getVectorNumElements();
15951 if (NumElts != 8 && NumElts != 16)
15954 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
15955 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
15956 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
15957 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15960 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15961 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
15963 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
15964 Constant *C = ConstantInt::get(*DAG.getContext(),
15965 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
15967 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
15968 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
15969 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
15970 MachinePointerInfo::getConstantPool(),
15971 false, false, false, Alignment);
15972 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
15973 if (VT.is512BitVector())
15975 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
15978 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
15979 SelectionDAG &DAG) {
15980 MVT VT = Op->getSimpleValueType(0);
15981 SDValue In = Op->getOperand(0);
15982 MVT InVT = In.getSimpleValueType();
15985 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
15986 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
15988 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
15989 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
15990 (VT != MVT::v16i16 || InVT != MVT::v16i8))
15993 if (Subtarget->hasInt256())
15994 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15996 // Optimize vectors in AVX mode
15997 // Sign extend v8i16 to v8i32 and
16000 // Divide input vector into two parts
16001 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16002 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16003 // concat the vectors to original VT
16005 unsigned NumElems = InVT.getVectorNumElements();
16006 SDValue Undef = DAG.getUNDEF(InVT);
16008 SmallVector<int,8> ShufMask1(NumElems, -1);
16009 for (unsigned i = 0; i != NumElems/2; ++i)
16012 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16014 SmallVector<int,8> ShufMask2(NumElems, -1);
16015 for (unsigned i = 0; i != NumElems/2; ++i)
16016 ShufMask2[i] = i + NumElems/2;
16018 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16020 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16021 VT.getVectorNumElements()/2);
16023 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16024 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16026 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16029 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16030 // may emit an illegal shuffle but the expansion is still better than scalar
16031 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16032 // we'll emit a shuffle and a arithmetic shift.
16033 // TODO: It is possible to support ZExt by zeroing the undef values during
16034 // the shuffle phase or after the shuffle.
16035 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16036 SelectionDAG &DAG) {
16037 MVT RegVT = Op.getSimpleValueType();
16038 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16039 assert(RegVT.isInteger() &&
16040 "We only custom lower integer vector sext loads.");
16042 // Nothing useful we can do without SSE2 shuffles.
16043 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16045 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16047 EVT MemVT = Ld->getMemoryVT();
16048 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16049 unsigned RegSz = RegVT.getSizeInBits();
16051 ISD::LoadExtType Ext = Ld->getExtensionType();
16053 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16054 && "Only anyext and sext are currently implemented.");
16055 assert(MemVT != RegVT && "Cannot extend to the same type");
16056 assert(MemVT.isVector() && "Must load a vector from memory");
16058 unsigned NumElems = RegVT.getVectorNumElements();
16059 unsigned MemSz = MemVT.getSizeInBits();
16060 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16062 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16063 // The only way in which we have a legal 256-bit vector result but not the
16064 // integer 256-bit operations needed to directly lower a sextload is if we
16065 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16066 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16067 // correctly legalized. We do this late to allow the canonical form of
16068 // sextload to persist throughout the rest of the DAG combiner -- it wants
16069 // to fold together any extensions it can, and so will fuse a sign_extend
16070 // of an sextload into a sextload targeting a wider value.
16072 if (MemSz == 128) {
16073 // Just switch this to a normal load.
16074 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16075 "it must be a legal 128-bit vector "
16077 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16078 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16079 Ld->isInvariant(), Ld->getAlignment());
16081 assert(MemSz < 128 &&
16082 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16083 // Do an sext load to a 128-bit vector type. We want to use the same
16084 // number of elements, but elements half as wide. This will end up being
16085 // recursively lowered by this routine, but will succeed as we definitely
16086 // have all the necessary features if we're using AVX1.
16088 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16089 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16091 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16092 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16093 Ld->isNonTemporal(), Ld->isInvariant(),
16094 Ld->getAlignment());
16097 // Replace chain users with the new chain.
16098 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16099 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16101 // Finally, do a normal sign-extend to the desired register.
16102 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16105 // All sizes must be a power of two.
16106 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16107 "Non-power-of-two elements are not custom lowered!");
16109 // Attempt to load the original value using scalar loads.
16110 // Find the largest scalar type that divides the total loaded size.
16111 MVT SclrLoadTy = MVT::i8;
16112 for (MVT Tp : MVT::integer_valuetypes()) {
16113 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16118 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16119 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16121 SclrLoadTy = MVT::f64;
16123 // Calculate the number of scalar loads that we need to perform
16124 // in order to load our vector from memory.
16125 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16127 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16128 "Can only lower sext loads with a single scalar load!");
16130 unsigned loadRegZize = RegSz;
16131 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16134 // Represent our vector as a sequence of elements which are the
16135 // largest scalar that we can load.
16136 EVT LoadUnitVecVT = EVT::getVectorVT(
16137 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16139 // Represent the data using the same element type that is stored in
16140 // memory. In practice, we ''widen'' MemVT.
16142 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16143 loadRegZize / MemVT.getScalarType().getSizeInBits());
16145 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16146 "Invalid vector type");
16148 // We can't shuffle using an illegal type.
16149 assert(TLI.isTypeLegal(WideVecVT) &&
16150 "We only lower types that form legal widened vector types");
16152 SmallVector<SDValue, 8> Chains;
16153 SDValue Ptr = Ld->getBasePtr();
16154 SDValue Increment =
16155 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16156 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16158 for (unsigned i = 0; i < NumLoads; ++i) {
16159 // Perform a single load.
16160 SDValue ScalarLoad =
16161 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16162 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16163 Ld->getAlignment());
16164 Chains.push_back(ScalarLoad.getValue(1));
16165 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16166 // another round of DAGCombining.
16168 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16170 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16171 ScalarLoad, DAG.getIntPtrConstant(i));
16173 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16176 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16178 // Bitcast the loaded value to a vector of the original element type, in
16179 // the size of the target vector type.
16180 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16181 unsigned SizeRatio = RegSz / MemSz;
16183 if (Ext == ISD::SEXTLOAD) {
16184 // If we have SSE4.1, we can directly emit a VSEXT node.
16185 if (Subtarget->hasSSE41()) {
16186 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16187 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16191 // Otherwise we'll shuffle the small elements in the high bits of the
16192 // larger type and perform an arithmetic shift. If the shift is not legal
16193 // it's better to scalarize.
16194 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16195 "We can't implement a sext load without an arithmetic right shift!");
16197 // Redistribute the loaded elements into the different locations.
16198 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16199 for (unsigned i = 0; i != NumElems; ++i)
16200 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16202 SDValue Shuff = DAG.getVectorShuffle(
16203 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16205 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16207 // Build the arithmetic shift.
16208 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16209 MemVT.getVectorElementType().getSizeInBits();
16211 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16213 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16217 // Redistribute the loaded elements into the different locations.
16218 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16219 for (unsigned i = 0; i != NumElems; ++i)
16220 ShuffleVec[i * SizeRatio] = i;
16222 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16223 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16225 // Bitcast to the requested type.
16226 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16227 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16231 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16232 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16233 // from the AND / OR.
16234 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16235 Opc = Op.getOpcode();
16236 if (Opc != ISD::OR && Opc != ISD::AND)
16238 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16239 Op.getOperand(0).hasOneUse() &&
16240 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16241 Op.getOperand(1).hasOneUse());
16244 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16245 // 1 and that the SETCC node has a single use.
16246 static bool isXor1OfSetCC(SDValue Op) {
16247 if (Op.getOpcode() != ISD::XOR)
16249 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16250 if (N1C && N1C->getAPIntValue() == 1) {
16251 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16252 Op.getOperand(0).hasOneUse();
16257 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16258 bool addTest = true;
16259 SDValue Chain = Op.getOperand(0);
16260 SDValue Cond = Op.getOperand(1);
16261 SDValue Dest = Op.getOperand(2);
16264 bool Inverted = false;
16266 if (Cond.getOpcode() == ISD::SETCC) {
16267 // Check for setcc([su]{add,sub,mul}o == 0).
16268 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16269 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16270 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16271 Cond.getOperand(0).getResNo() == 1 &&
16272 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16273 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16274 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16275 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16276 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16277 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16279 Cond = Cond.getOperand(0);
16281 SDValue NewCond = LowerSETCC(Cond, DAG);
16282 if (NewCond.getNode())
16287 // FIXME: LowerXALUO doesn't handle these!!
16288 else if (Cond.getOpcode() == X86ISD::ADD ||
16289 Cond.getOpcode() == X86ISD::SUB ||
16290 Cond.getOpcode() == X86ISD::SMUL ||
16291 Cond.getOpcode() == X86ISD::UMUL)
16292 Cond = LowerXALUO(Cond, DAG);
16295 // Look pass (and (setcc_carry (cmp ...)), 1).
16296 if (Cond.getOpcode() == ISD::AND &&
16297 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16298 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16299 if (C && C->getAPIntValue() == 1)
16300 Cond = Cond.getOperand(0);
16303 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16304 // setting operand in place of the X86ISD::SETCC.
16305 unsigned CondOpcode = Cond.getOpcode();
16306 if (CondOpcode == X86ISD::SETCC ||
16307 CondOpcode == X86ISD::SETCC_CARRY) {
16308 CC = Cond.getOperand(0);
16310 SDValue Cmp = Cond.getOperand(1);
16311 unsigned Opc = Cmp.getOpcode();
16312 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16313 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16317 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16321 // These can only come from an arithmetic instruction with overflow,
16322 // e.g. SADDO, UADDO.
16323 Cond = Cond.getNode()->getOperand(1);
16329 CondOpcode = Cond.getOpcode();
16330 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16331 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16332 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16333 Cond.getOperand(0).getValueType() != MVT::i8)) {
16334 SDValue LHS = Cond.getOperand(0);
16335 SDValue RHS = Cond.getOperand(1);
16336 unsigned X86Opcode;
16339 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16340 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16342 switch (CondOpcode) {
16343 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16345 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16347 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16350 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16351 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16353 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16355 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16358 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16359 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16360 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16361 default: llvm_unreachable("unexpected overflowing operator");
16364 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16365 if (CondOpcode == ISD::UMULO)
16366 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16369 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16371 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16373 if (CondOpcode == ISD::UMULO)
16374 Cond = X86Op.getValue(2);
16376 Cond = X86Op.getValue(1);
16378 CC = DAG.getConstant(X86Cond, MVT::i8);
16382 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16383 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16384 if (CondOpc == ISD::OR) {
16385 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16386 // two branches instead of an explicit OR instruction with a
16388 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16389 isX86LogicalCmp(Cmp)) {
16390 CC = Cond.getOperand(0).getOperand(0);
16391 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16392 Chain, Dest, CC, Cmp);
16393 CC = Cond.getOperand(1).getOperand(0);
16397 } else { // ISD::AND
16398 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16399 // two branches instead of an explicit AND instruction with a
16400 // separate test. However, we only do this if this block doesn't
16401 // have a fall-through edge, because this requires an explicit
16402 // jmp when the condition is false.
16403 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16404 isX86LogicalCmp(Cmp) &&
16405 Op.getNode()->hasOneUse()) {
16406 X86::CondCode CCode =
16407 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16408 CCode = X86::GetOppositeBranchCondition(CCode);
16409 CC = DAG.getConstant(CCode, MVT::i8);
16410 SDNode *User = *Op.getNode()->use_begin();
16411 // Look for an unconditional branch following this conditional branch.
16412 // We need this because we need to reverse the successors in order
16413 // to implement FCMP_OEQ.
16414 if (User->getOpcode() == ISD::BR) {
16415 SDValue FalseBB = User->getOperand(1);
16417 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16418 assert(NewBR == User);
16422 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16423 Chain, Dest, CC, Cmp);
16424 X86::CondCode CCode =
16425 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16426 CCode = X86::GetOppositeBranchCondition(CCode);
16427 CC = DAG.getConstant(CCode, MVT::i8);
16433 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16434 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16435 // It should be transformed during dag combiner except when the condition
16436 // is set by a arithmetics with overflow node.
16437 X86::CondCode CCode =
16438 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16439 CCode = X86::GetOppositeBranchCondition(CCode);
16440 CC = DAG.getConstant(CCode, MVT::i8);
16441 Cond = Cond.getOperand(0).getOperand(1);
16443 } else if (Cond.getOpcode() == ISD::SETCC &&
16444 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16445 // For FCMP_OEQ, we can emit
16446 // two branches instead of an explicit AND instruction with a
16447 // separate test. However, we only do this if this block doesn't
16448 // have a fall-through edge, because this requires an explicit
16449 // jmp when the condition is false.
16450 if (Op.getNode()->hasOneUse()) {
16451 SDNode *User = *Op.getNode()->use_begin();
16452 // Look for an unconditional branch following this conditional branch.
16453 // We need this because we need to reverse the successors in order
16454 // to implement FCMP_OEQ.
16455 if (User->getOpcode() == ISD::BR) {
16456 SDValue FalseBB = User->getOperand(1);
16458 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16459 assert(NewBR == User);
16463 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16464 Cond.getOperand(0), Cond.getOperand(1));
16465 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16466 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16467 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16468 Chain, Dest, CC, Cmp);
16469 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16474 } else if (Cond.getOpcode() == ISD::SETCC &&
16475 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16476 // For FCMP_UNE, we can emit
16477 // two branches instead of an explicit AND instruction with a
16478 // separate test. However, we only do this if this block doesn't
16479 // have a fall-through edge, because this requires an explicit
16480 // jmp when the condition is false.
16481 if (Op.getNode()->hasOneUse()) {
16482 SDNode *User = *Op.getNode()->use_begin();
16483 // Look for an unconditional branch following this conditional branch.
16484 // We need this because we need to reverse the successors in order
16485 // to implement FCMP_UNE.
16486 if (User->getOpcode() == ISD::BR) {
16487 SDValue FalseBB = User->getOperand(1);
16489 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16490 assert(NewBR == User);
16493 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16494 Cond.getOperand(0), Cond.getOperand(1));
16495 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16496 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16497 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16498 Chain, Dest, CC, Cmp);
16499 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16509 // Look pass the truncate if the high bits are known zero.
16510 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16511 Cond = Cond.getOperand(0);
16513 // We know the result of AND is compared against zero. Try to match
16515 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16516 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16517 if (NewSetCC.getNode()) {
16518 CC = NewSetCC.getOperand(0);
16519 Cond = NewSetCC.getOperand(1);
16526 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16527 CC = DAG.getConstant(X86Cond, MVT::i8);
16528 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16530 Cond = ConvertCmpIfNecessary(Cond, DAG);
16531 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16532 Chain, Dest, CC, Cond);
16535 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16536 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16537 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16538 // that the guard pages used by the OS virtual memory manager are allocated in
16539 // correct sequence.
16541 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16542 SelectionDAG &DAG) const {
16543 MachineFunction &MF = DAG.getMachineFunction();
16544 bool SplitStack = MF.shouldSplitStack();
16545 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16550 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16551 SDNode* Node = Op.getNode();
16553 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16554 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16555 " not tell us which reg is the stack pointer!");
16556 EVT VT = Node->getValueType(0);
16557 SDValue Tmp1 = SDValue(Node, 0);
16558 SDValue Tmp2 = SDValue(Node, 1);
16559 SDValue Tmp3 = Node->getOperand(2);
16560 SDValue Chain = Tmp1.getOperand(0);
16562 // Chain the dynamic stack allocation so that it doesn't modify the stack
16563 // pointer when other instructions are using the stack.
16564 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16567 SDValue Size = Tmp2.getOperand(1);
16568 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16569 Chain = SP.getValue(1);
16570 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16571 const TargetFrameLowering &TFI = *DAG.getSubtarget().getFrameLowering();
16572 unsigned StackAlign = TFI.getStackAlignment();
16573 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16574 if (Align > StackAlign)
16575 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16576 DAG.getConstant(-(uint64_t)Align, VT));
16577 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16579 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16580 DAG.getIntPtrConstant(0, true), SDValue(),
16583 SDValue Ops[2] = { Tmp1, Tmp2 };
16584 return DAG.getMergeValues(Ops, dl);
16588 SDValue Chain = Op.getOperand(0);
16589 SDValue Size = Op.getOperand(1);
16590 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16591 EVT VT = Op.getNode()->getValueType(0);
16593 bool Is64Bit = Subtarget->is64Bit();
16594 EVT SPTy = getPointerTy();
16597 MachineRegisterInfo &MRI = MF.getRegInfo();
16600 // The 64 bit implementation of segmented stacks needs to clobber both r10
16601 // r11. This makes it impossible to use it along with nested parameters.
16602 const Function *F = MF.getFunction();
16604 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16606 if (I->hasNestAttr())
16607 report_fatal_error("Cannot use segmented stacks with functions that "
16608 "have nested arguments.");
16611 const TargetRegisterClass *AddrRegClass =
16612 getRegClassFor(getPointerTy());
16613 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16614 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16615 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16616 DAG.getRegister(Vreg, SPTy));
16617 SDValue Ops1[2] = { Value, Chain };
16618 return DAG.getMergeValues(Ops1, dl);
16621 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16623 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16624 Flag = Chain.getValue(1);
16625 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16627 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16629 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
16630 DAG.getSubtarget().getRegisterInfo());
16631 unsigned SPReg = RegInfo->getStackRegister();
16632 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16633 Chain = SP.getValue(1);
16636 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16637 DAG.getConstant(-(uint64_t)Align, VT));
16638 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16641 SDValue Ops1[2] = { SP, Chain };
16642 return DAG.getMergeValues(Ops1, dl);
16646 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16647 MachineFunction &MF = DAG.getMachineFunction();
16648 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16650 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16653 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
16654 // vastart just stores the address of the VarArgsFrameIndex slot into the
16655 // memory location argument.
16656 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16658 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16659 MachinePointerInfo(SV), false, false, 0);
16663 // gp_offset (0 - 6 * 8)
16664 // fp_offset (48 - 48 + 8 * 16)
16665 // overflow_arg_area (point to parameters coming in memory).
16667 SmallVector<SDValue, 8> MemOps;
16668 SDValue FIN = Op.getOperand(1);
16670 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
16671 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
16673 FIN, MachinePointerInfo(SV), false, false, 0);
16674 MemOps.push_back(Store);
16677 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16678 FIN, DAG.getIntPtrConstant(4));
16679 Store = DAG.getStore(Op.getOperand(0), DL,
16680 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
16682 FIN, MachinePointerInfo(SV, 4), false, false, 0);
16683 MemOps.push_back(Store);
16685 // Store ptr to overflow_arg_area
16686 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16687 FIN, DAG.getIntPtrConstant(4));
16688 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16690 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16691 MachinePointerInfo(SV, 8),
16693 MemOps.push_back(Store);
16695 // Store ptr to reg_save_area.
16696 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16697 FIN, DAG.getIntPtrConstant(8));
16698 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
16700 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
16701 MachinePointerInfo(SV, 16), false, false, 0);
16702 MemOps.push_back(Store);
16703 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16706 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16707 assert(Subtarget->is64Bit() &&
16708 "LowerVAARG only handles 64-bit va_arg!");
16709 assert((Subtarget->isTargetLinux() ||
16710 Subtarget->isTargetDarwin()) &&
16711 "Unhandled target in LowerVAARG");
16712 assert(Op.getNode()->getNumOperands() == 4);
16713 SDValue Chain = Op.getOperand(0);
16714 SDValue SrcPtr = Op.getOperand(1);
16715 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16716 unsigned Align = Op.getConstantOperandVal(3);
16719 EVT ArgVT = Op.getNode()->getValueType(0);
16720 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16721 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
16724 // Decide which area this value should be read from.
16725 // TODO: Implement the AMD64 ABI in its entirety. This simple
16726 // selection mechanism works only for the basic types.
16727 if (ArgVT == MVT::f80) {
16728 llvm_unreachable("va_arg for f80 not yet implemented");
16729 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16730 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16731 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16732 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16734 llvm_unreachable("Unhandled argument type in LowerVAARG");
16737 if (ArgMode == 2) {
16738 // Sanity Check: Make sure using fp_offset makes sense.
16739 assert(!DAG.getTarget().Options.UseSoftFloat &&
16740 !(DAG.getMachineFunction()
16741 .getFunction()->getAttributes()
16742 .hasAttribute(AttributeSet::FunctionIndex,
16743 Attribute::NoImplicitFloat)) &&
16744 Subtarget->hasSSE1());
16747 // Insert VAARG_64 node into the DAG
16748 // VAARG_64 returns two values: Variable Argument Address, Chain
16749 SmallVector<SDValue, 11> InstOps;
16750 InstOps.push_back(Chain);
16751 InstOps.push_back(SrcPtr);
16752 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
16753 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
16754 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
16755 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
16756 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
16757 VTs, InstOps, MVT::i64,
16758 MachinePointerInfo(SV),
16760 /*Volatile=*/false,
16762 /*WriteMem=*/true);
16763 Chain = VAARG.getValue(1);
16765 // Load the next argument and return it
16766 return DAG.getLoad(ArgVT, dl,
16769 MachinePointerInfo(),
16770 false, false, false, 0);
16773 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
16774 SelectionDAG &DAG) {
16775 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
16776 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
16777 SDValue Chain = Op.getOperand(0);
16778 SDValue DstPtr = Op.getOperand(1);
16779 SDValue SrcPtr = Op.getOperand(2);
16780 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
16781 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
16784 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
16785 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
16787 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
16790 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
16791 // amount is a constant. Takes immediate version of shift as input.
16792 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
16793 SDValue SrcOp, uint64_t ShiftAmt,
16794 SelectionDAG &DAG) {
16795 MVT ElementType = VT.getVectorElementType();
16797 // Fold this packed shift into its first operand if ShiftAmt is 0.
16801 // Check for ShiftAmt >= element width
16802 if (ShiftAmt >= ElementType.getSizeInBits()) {
16803 if (Opc == X86ISD::VSRAI)
16804 ShiftAmt = ElementType.getSizeInBits() - 1;
16806 return DAG.getConstant(0, VT);
16809 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
16810 && "Unknown target vector shift-by-constant node");
16812 // Fold this packed vector shift into a build vector if SrcOp is a
16813 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
16814 if (VT == SrcOp.getSimpleValueType() &&
16815 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
16816 SmallVector<SDValue, 8> Elts;
16817 unsigned NumElts = SrcOp->getNumOperands();
16818 ConstantSDNode *ND;
16821 default: llvm_unreachable(nullptr);
16822 case X86ISD::VSHLI:
16823 for (unsigned i=0; i!=NumElts; ++i) {
16824 SDValue CurrentOp = SrcOp->getOperand(i);
16825 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16826 Elts.push_back(CurrentOp);
16829 ND = cast<ConstantSDNode>(CurrentOp);
16830 const APInt &C = ND->getAPIntValue();
16831 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
16834 case X86ISD::VSRLI:
16835 for (unsigned i=0; i!=NumElts; ++i) {
16836 SDValue CurrentOp = SrcOp->getOperand(i);
16837 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16838 Elts.push_back(CurrentOp);
16841 ND = cast<ConstantSDNode>(CurrentOp);
16842 const APInt &C = ND->getAPIntValue();
16843 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
16846 case X86ISD::VSRAI:
16847 for (unsigned i=0; i!=NumElts; ++i) {
16848 SDValue CurrentOp = SrcOp->getOperand(i);
16849 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16850 Elts.push_back(CurrentOp);
16853 ND = cast<ConstantSDNode>(CurrentOp);
16854 const APInt &C = ND->getAPIntValue();
16855 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
16860 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
16863 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
16866 // getTargetVShiftNode - Handle vector element shifts where the shift amount
16867 // may or may not be a constant. Takes immediate version of shift as input.
16868 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
16869 SDValue SrcOp, SDValue ShAmt,
16870 SelectionDAG &DAG) {
16871 MVT SVT = ShAmt.getSimpleValueType();
16872 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
16874 // Catch shift-by-constant.
16875 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
16876 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
16877 CShAmt->getZExtValue(), DAG);
16879 // Change opcode to non-immediate version
16881 default: llvm_unreachable("Unknown target vector shift node");
16882 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
16883 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
16884 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
16887 const X86Subtarget &Subtarget =
16888 DAG.getTarget().getSubtarget<X86Subtarget>();
16889 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
16890 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
16891 // Let the shuffle legalizer expand this shift amount node.
16892 SDValue Op0 = ShAmt.getOperand(0);
16893 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
16894 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
16896 // Need to build a vector containing shift amount.
16897 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
16898 SmallVector<SDValue, 4> ShOps;
16899 ShOps.push_back(ShAmt);
16900 if (SVT == MVT::i32) {
16901 ShOps.push_back(DAG.getConstant(0, SVT));
16902 ShOps.push_back(DAG.getUNDEF(SVT));
16904 ShOps.push_back(DAG.getUNDEF(SVT));
16906 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
16907 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
16910 // The return type has to be a 128-bit type with the same element
16911 // type as the input type.
16912 MVT EltVT = VT.getVectorElementType();
16913 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
16915 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
16916 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
16919 /// \brief Return (and \p Op, \p Mask) for compare instructions or
16920 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
16921 /// necessary casting for \p Mask when lowering masking intrinsics.
16922 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
16923 SDValue PreservedSrc,
16924 const X86Subtarget *Subtarget,
16925 SelectionDAG &DAG) {
16926 EVT VT = Op.getValueType();
16927 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
16928 MVT::i1, VT.getVectorNumElements());
16929 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
16930 Mask.getValueType().getSizeInBits());
16933 assert(MaskVT.isSimple() && "invalid mask type");
16935 if (isAllOnes(Mask))
16938 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
16939 // are extracted by EXTRACT_SUBVECTOR.
16940 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
16941 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
16942 DAG.getIntPtrConstant(0));
16944 switch (Op.getOpcode()) {
16946 case X86ISD::PCMPEQM:
16947 case X86ISD::PCMPGTM:
16949 case X86ISD::CMPMU:
16950 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
16952 if (PreservedSrc.getOpcode() == ISD::UNDEF)
16953 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
16954 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
16957 /// \brief Creates an SDNode for a predicated scalar operation.
16958 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
16959 /// The mask is comming as MVT::i8 and it should be truncated
16960 /// to MVT::i1 while lowering masking intrinsics.
16961 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
16962 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
16963 /// a scalar instruction.
16964 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
16965 SDValue PreservedSrc,
16966 const X86Subtarget *Subtarget,
16967 SelectionDAG &DAG) {
16968 if (isAllOnes(Mask))
16971 EVT VT = Op.getValueType();
16973 // The mask should be of type MVT::i1
16974 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
16976 if (PreservedSrc.getOpcode() == ISD::UNDEF)
16977 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
16978 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
16981 static unsigned getOpcodeForFMAIntrinsic(unsigned IntNo) {
16983 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
16984 case Intrinsic::x86_fma_vfmadd_ps:
16985 case Intrinsic::x86_fma_vfmadd_pd:
16986 case Intrinsic::x86_fma_vfmadd_ps_256:
16987 case Intrinsic::x86_fma_vfmadd_pd_256:
16988 case Intrinsic::x86_fma_mask_vfmadd_ps_512:
16989 case Intrinsic::x86_fma_mask_vfmadd_pd_512:
16990 return X86ISD::FMADD;
16991 case Intrinsic::x86_fma_vfmsub_ps:
16992 case Intrinsic::x86_fma_vfmsub_pd:
16993 case Intrinsic::x86_fma_vfmsub_ps_256:
16994 case Intrinsic::x86_fma_vfmsub_pd_256:
16995 case Intrinsic::x86_fma_mask_vfmsub_ps_512:
16996 case Intrinsic::x86_fma_mask_vfmsub_pd_512:
16997 return X86ISD::FMSUB;
16998 case Intrinsic::x86_fma_vfnmadd_ps:
16999 case Intrinsic::x86_fma_vfnmadd_pd:
17000 case Intrinsic::x86_fma_vfnmadd_ps_256:
17001 case Intrinsic::x86_fma_vfnmadd_pd_256:
17002 case Intrinsic::x86_fma_mask_vfnmadd_ps_512:
17003 case Intrinsic::x86_fma_mask_vfnmadd_pd_512:
17004 return X86ISD::FNMADD;
17005 case Intrinsic::x86_fma_vfnmsub_ps:
17006 case Intrinsic::x86_fma_vfnmsub_pd:
17007 case Intrinsic::x86_fma_vfnmsub_ps_256:
17008 case Intrinsic::x86_fma_vfnmsub_pd_256:
17009 case Intrinsic::x86_fma_mask_vfnmsub_ps_512:
17010 case Intrinsic::x86_fma_mask_vfnmsub_pd_512:
17011 return X86ISD::FNMSUB;
17012 case Intrinsic::x86_fma_vfmaddsub_ps:
17013 case Intrinsic::x86_fma_vfmaddsub_pd:
17014 case Intrinsic::x86_fma_vfmaddsub_ps_256:
17015 case Intrinsic::x86_fma_vfmaddsub_pd_256:
17016 case Intrinsic::x86_fma_mask_vfmaddsub_ps_512:
17017 case Intrinsic::x86_fma_mask_vfmaddsub_pd_512:
17018 return X86ISD::FMADDSUB;
17019 case Intrinsic::x86_fma_vfmsubadd_ps:
17020 case Intrinsic::x86_fma_vfmsubadd_pd:
17021 case Intrinsic::x86_fma_vfmsubadd_ps_256:
17022 case Intrinsic::x86_fma_vfmsubadd_pd_256:
17023 case Intrinsic::x86_fma_mask_vfmsubadd_ps_512:
17024 case Intrinsic::x86_fma_mask_vfmsubadd_pd_512:
17025 return X86ISD::FMSUBADD;
17029 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17030 SelectionDAG &DAG) {
17032 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17033 EVT VT = Op.getValueType();
17034 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17036 switch(IntrData->Type) {
17037 case INTR_TYPE_1OP:
17038 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17039 case INTR_TYPE_2OP:
17040 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17042 case INTR_TYPE_3OP:
17043 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17044 Op.getOperand(2), Op.getOperand(3));
17045 case INTR_TYPE_1OP_MASK_RM: {
17046 SDValue Src = Op.getOperand(1);
17047 SDValue Src0 = Op.getOperand(2);
17048 SDValue Mask = Op.getOperand(3);
17049 SDValue RoundingMode = Op.getOperand(4);
17050 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17052 Mask, Src0, Subtarget, DAG);
17054 case INTR_TYPE_SCALAR_MASK_RM: {
17055 SDValue Src1 = Op.getOperand(1);
17056 SDValue Src2 = Op.getOperand(2);
17057 SDValue Src0 = Op.getOperand(3);
17058 SDValue Mask = Op.getOperand(4);
17059 SDValue RoundingMode = Op.getOperand(5);
17060 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17062 Mask, Src0, Subtarget, DAG);
17064 case INTR_TYPE_2OP_MASK: {
17065 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
17067 Op.getOperand(4), Op.getOperand(3), Subtarget, DAG);
17070 case CMP_MASK_CC: {
17071 // Comparison intrinsics with masks.
17072 // Example of transformation:
17073 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17074 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17076 // (v8i1 (insert_subvector undef,
17077 // (v2i1 (and (PCMPEQM %a, %b),
17078 // (extract_subvector
17079 // (v8i1 (bitcast %mask)), 0))), 0))))
17080 EVT VT = Op.getOperand(1).getValueType();
17081 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17082 VT.getVectorNumElements());
17083 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17084 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17085 Mask.getValueType().getSizeInBits());
17087 if (IntrData->Type == CMP_MASK_CC) {
17088 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17089 Op.getOperand(2), Op.getOperand(3));
17091 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17092 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17095 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17096 DAG.getTargetConstant(0, MaskVT),
17098 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17099 DAG.getUNDEF(BitcastVT), CmpMask,
17100 DAG.getIntPtrConstant(0));
17101 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17103 case COMI: { // Comparison intrinsics
17104 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17105 SDValue LHS = Op.getOperand(1);
17106 SDValue RHS = Op.getOperand(2);
17107 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17108 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17109 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17110 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17111 DAG.getConstant(X86CC, MVT::i8), Cond);
17112 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17115 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17116 Op.getOperand(1), Op.getOperand(2), DAG);
17118 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17119 Op.getSimpleValueType(),
17121 Op.getOperand(2), DAG),
17122 Op.getOperand(4), Op.getOperand(3), Subtarget,
17124 case COMPRESS_EXPAND_IN_REG: {
17125 SDValue Mask = Op.getOperand(3);
17126 SDValue DataToCompress = Op.getOperand(1);
17127 SDValue PassThru = Op.getOperand(2);
17128 if (isAllOnes(Mask)) // return data as is
17129 return Op.getOperand(1);
17130 EVT VT = Op.getValueType();
17131 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17132 VT.getVectorNumElements());
17133 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17134 Mask.getValueType().getSizeInBits());
17136 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17137 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17138 DAG.getIntPtrConstant(0));
17140 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17144 SDValue Mask = Op.getOperand(3);
17145 EVT VT = Op.getValueType();
17146 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17147 VT.getVectorNumElements());
17148 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17149 Mask.getValueType().getSizeInBits());
17151 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17152 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17153 DAG.getIntPtrConstant(0));
17154 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17159 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17160 dl, Op.getValueType(),
17164 Op.getOperand(4), Op.getOperand(1),
17173 default: return SDValue(); // Don't custom lower most intrinsics.
17175 case Intrinsic::x86_avx512_mask_valign_q_512:
17176 case Intrinsic::x86_avx512_mask_valign_d_512:
17177 // Vector source operands are swapped.
17178 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17179 Op.getValueType(), Op.getOperand(2),
17182 Op.getOperand(5), Op.getOperand(4),
17185 // ptest and testp intrinsics. The intrinsic these come from are designed to
17186 // return an integer value, not just an instruction so lower it to the ptest
17187 // or testp pattern and a setcc for the result.
17188 case Intrinsic::x86_sse41_ptestz:
17189 case Intrinsic::x86_sse41_ptestc:
17190 case Intrinsic::x86_sse41_ptestnzc:
17191 case Intrinsic::x86_avx_ptestz_256:
17192 case Intrinsic::x86_avx_ptestc_256:
17193 case Intrinsic::x86_avx_ptestnzc_256:
17194 case Intrinsic::x86_avx_vtestz_ps:
17195 case Intrinsic::x86_avx_vtestc_ps:
17196 case Intrinsic::x86_avx_vtestnzc_ps:
17197 case Intrinsic::x86_avx_vtestz_pd:
17198 case Intrinsic::x86_avx_vtestc_pd:
17199 case Intrinsic::x86_avx_vtestnzc_pd:
17200 case Intrinsic::x86_avx_vtestz_ps_256:
17201 case Intrinsic::x86_avx_vtestc_ps_256:
17202 case Intrinsic::x86_avx_vtestnzc_ps_256:
17203 case Intrinsic::x86_avx_vtestz_pd_256:
17204 case Intrinsic::x86_avx_vtestc_pd_256:
17205 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17206 bool IsTestPacked = false;
17209 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17210 case Intrinsic::x86_avx_vtestz_ps:
17211 case Intrinsic::x86_avx_vtestz_pd:
17212 case Intrinsic::x86_avx_vtestz_ps_256:
17213 case Intrinsic::x86_avx_vtestz_pd_256:
17214 IsTestPacked = true; // Fallthrough
17215 case Intrinsic::x86_sse41_ptestz:
17216 case Intrinsic::x86_avx_ptestz_256:
17218 X86CC = X86::COND_E;
17220 case Intrinsic::x86_avx_vtestc_ps:
17221 case Intrinsic::x86_avx_vtestc_pd:
17222 case Intrinsic::x86_avx_vtestc_ps_256:
17223 case Intrinsic::x86_avx_vtestc_pd_256:
17224 IsTestPacked = true; // Fallthrough
17225 case Intrinsic::x86_sse41_ptestc:
17226 case Intrinsic::x86_avx_ptestc_256:
17228 X86CC = X86::COND_B;
17230 case Intrinsic::x86_avx_vtestnzc_ps:
17231 case Intrinsic::x86_avx_vtestnzc_pd:
17232 case Intrinsic::x86_avx_vtestnzc_ps_256:
17233 case Intrinsic::x86_avx_vtestnzc_pd_256:
17234 IsTestPacked = true; // Fallthrough
17235 case Intrinsic::x86_sse41_ptestnzc:
17236 case Intrinsic::x86_avx_ptestnzc_256:
17238 X86CC = X86::COND_A;
17242 SDValue LHS = Op.getOperand(1);
17243 SDValue RHS = Op.getOperand(2);
17244 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17245 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17246 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17247 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17248 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17250 case Intrinsic::x86_avx512_kortestz_w:
17251 case Intrinsic::x86_avx512_kortestc_w: {
17252 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17253 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17254 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17255 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17256 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17257 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17258 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17261 case Intrinsic::x86_sse42_pcmpistria128:
17262 case Intrinsic::x86_sse42_pcmpestria128:
17263 case Intrinsic::x86_sse42_pcmpistric128:
17264 case Intrinsic::x86_sse42_pcmpestric128:
17265 case Intrinsic::x86_sse42_pcmpistrio128:
17266 case Intrinsic::x86_sse42_pcmpestrio128:
17267 case Intrinsic::x86_sse42_pcmpistris128:
17268 case Intrinsic::x86_sse42_pcmpestris128:
17269 case Intrinsic::x86_sse42_pcmpistriz128:
17270 case Intrinsic::x86_sse42_pcmpestriz128: {
17274 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17275 case Intrinsic::x86_sse42_pcmpistria128:
17276 Opcode = X86ISD::PCMPISTRI;
17277 X86CC = X86::COND_A;
17279 case Intrinsic::x86_sse42_pcmpestria128:
17280 Opcode = X86ISD::PCMPESTRI;
17281 X86CC = X86::COND_A;
17283 case Intrinsic::x86_sse42_pcmpistric128:
17284 Opcode = X86ISD::PCMPISTRI;
17285 X86CC = X86::COND_B;
17287 case Intrinsic::x86_sse42_pcmpestric128:
17288 Opcode = X86ISD::PCMPESTRI;
17289 X86CC = X86::COND_B;
17291 case Intrinsic::x86_sse42_pcmpistrio128:
17292 Opcode = X86ISD::PCMPISTRI;
17293 X86CC = X86::COND_O;
17295 case Intrinsic::x86_sse42_pcmpestrio128:
17296 Opcode = X86ISD::PCMPESTRI;
17297 X86CC = X86::COND_O;
17299 case Intrinsic::x86_sse42_pcmpistris128:
17300 Opcode = X86ISD::PCMPISTRI;
17301 X86CC = X86::COND_S;
17303 case Intrinsic::x86_sse42_pcmpestris128:
17304 Opcode = X86ISD::PCMPESTRI;
17305 X86CC = X86::COND_S;
17307 case Intrinsic::x86_sse42_pcmpistriz128:
17308 Opcode = X86ISD::PCMPISTRI;
17309 X86CC = X86::COND_E;
17311 case Intrinsic::x86_sse42_pcmpestriz128:
17312 Opcode = X86ISD::PCMPESTRI;
17313 X86CC = X86::COND_E;
17316 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17317 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17318 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17319 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17320 DAG.getConstant(X86CC, MVT::i8),
17321 SDValue(PCMP.getNode(), 1));
17322 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17325 case Intrinsic::x86_sse42_pcmpistri128:
17326 case Intrinsic::x86_sse42_pcmpestri128: {
17328 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17329 Opcode = X86ISD::PCMPISTRI;
17331 Opcode = X86ISD::PCMPESTRI;
17333 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17334 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17335 return DAG.getNode(Opcode, dl, VTs, NewOps);
17338 case Intrinsic::x86_fma_mask_vfmadd_ps_512:
17339 case Intrinsic::x86_fma_mask_vfmadd_pd_512:
17340 case Intrinsic::x86_fma_mask_vfmsub_ps_512:
17341 case Intrinsic::x86_fma_mask_vfmsub_pd_512:
17342 case Intrinsic::x86_fma_mask_vfnmadd_ps_512:
17343 case Intrinsic::x86_fma_mask_vfnmadd_pd_512:
17344 case Intrinsic::x86_fma_mask_vfnmsub_ps_512:
17345 case Intrinsic::x86_fma_mask_vfnmsub_pd_512:
17346 case Intrinsic::x86_fma_mask_vfmaddsub_ps_512:
17347 case Intrinsic::x86_fma_mask_vfmaddsub_pd_512:
17348 case Intrinsic::x86_fma_mask_vfmsubadd_ps_512:
17349 case Intrinsic::x86_fma_mask_vfmsubadd_pd_512: {
17350 auto *SAE = cast<ConstantSDNode>(Op.getOperand(5));
17351 if (SAE->getZExtValue() == X86::STATIC_ROUNDING::CUR_DIRECTION)
17352 return getVectorMaskingNode(DAG.getNode(getOpcodeForFMAIntrinsic(IntNo),
17353 dl, Op.getValueType(),
17357 Op.getOperand(4), Op.getOperand(1),
17363 case Intrinsic::x86_fma_vfmadd_ps:
17364 case Intrinsic::x86_fma_vfmadd_pd:
17365 case Intrinsic::x86_fma_vfmsub_ps:
17366 case Intrinsic::x86_fma_vfmsub_pd:
17367 case Intrinsic::x86_fma_vfnmadd_ps:
17368 case Intrinsic::x86_fma_vfnmadd_pd:
17369 case Intrinsic::x86_fma_vfnmsub_ps:
17370 case Intrinsic::x86_fma_vfnmsub_pd:
17371 case Intrinsic::x86_fma_vfmaddsub_ps:
17372 case Intrinsic::x86_fma_vfmaddsub_pd:
17373 case Intrinsic::x86_fma_vfmsubadd_ps:
17374 case Intrinsic::x86_fma_vfmsubadd_pd:
17375 case Intrinsic::x86_fma_vfmadd_ps_256:
17376 case Intrinsic::x86_fma_vfmadd_pd_256:
17377 case Intrinsic::x86_fma_vfmsub_ps_256:
17378 case Intrinsic::x86_fma_vfmsub_pd_256:
17379 case Intrinsic::x86_fma_vfnmadd_ps_256:
17380 case Intrinsic::x86_fma_vfnmadd_pd_256:
17381 case Intrinsic::x86_fma_vfnmsub_ps_256:
17382 case Intrinsic::x86_fma_vfnmsub_pd_256:
17383 case Intrinsic::x86_fma_vfmaddsub_ps_256:
17384 case Intrinsic::x86_fma_vfmaddsub_pd_256:
17385 case Intrinsic::x86_fma_vfmsubadd_ps_256:
17386 case Intrinsic::x86_fma_vfmsubadd_pd_256:
17387 return DAG.getNode(getOpcodeForFMAIntrinsic(IntNo), dl, Op.getValueType(),
17388 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
17392 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17393 SDValue Src, SDValue Mask, SDValue Base,
17394 SDValue Index, SDValue ScaleOp, SDValue Chain,
17395 const X86Subtarget * Subtarget) {
17397 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17398 assert(C && "Invalid scale type");
17399 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17400 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17401 Index.getSimpleValueType().getVectorNumElements());
17403 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17405 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17407 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17408 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17409 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17410 SDValue Segment = DAG.getRegister(0, MVT::i32);
17411 if (Src.getOpcode() == ISD::UNDEF)
17412 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17413 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17414 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17415 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17416 return DAG.getMergeValues(RetOps, dl);
17419 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17420 SDValue Src, SDValue Mask, SDValue Base,
17421 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17423 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17424 assert(C && "Invalid scale type");
17425 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17426 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17427 SDValue Segment = DAG.getRegister(0, MVT::i32);
17428 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17429 Index.getSimpleValueType().getVectorNumElements());
17431 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17433 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17435 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17436 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17437 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17438 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17439 return SDValue(Res, 1);
17442 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17443 SDValue Mask, SDValue Base, SDValue Index,
17444 SDValue ScaleOp, SDValue Chain) {
17446 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17447 assert(C && "Invalid scale type");
17448 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17449 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17450 SDValue Segment = DAG.getRegister(0, MVT::i32);
17452 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17454 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17456 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17458 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17459 //SDVTList VTs = DAG.getVTList(MVT::Other);
17460 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17461 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17462 return SDValue(Res, 0);
17465 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17466 // read performance monitor counters (x86_rdpmc).
17467 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17468 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17469 SmallVectorImpl<SDValue> &Results) {
17470 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17471 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17474 // The ECX register is used to select the index of the performance counter
17476 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17478 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17480 // Reads the content of a 64-bit performance counter and returns it in the
17481 // registers EDX:EAX.
17482 if (Subtarget->is64Bit()) {
17483 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17484 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17487 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17488 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17491 Chain = HI.getValue(1);
17493 if (Subtarget->is64Bit()) {
17494 // The EAX register is loaded with the low-order 32 bits. The EDX register
17495 // is loaded with the supported high-order bits of the counter.
17496 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17497 DAG.getConstant(32, MVT::i8));
17498 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17499 Results.push_back(Chain);
17503 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17504 SDValue Ops[] = { LO, HI };
17505 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17506 Results.push_back(Pair);
17507 Results.push_back(Chain);
17510 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17511 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17512 // also used to custom lower READCYCLECOUNTER nodes.
17513 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17514 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17515 SmallVectorImpl<SDValue> &Results) {
17516 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17517 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17520 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17521 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17522 // and the EAX register is loaded with the low-order 32 bits.
17523 if (Subtarget->is64Bit()) {
17524 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17525 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17528 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17529 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17532 SDValue Chain = HI.getValue(1);
17534 if (Opcode == X86ISD::RDTSCP_DAG) {
17535 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17537 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17538 // the ECX register. Add 'ecx' explicitly to the chain.
17539 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17541 // Explicitly store the content of ECX at the location passed in input
17542 // to the 'rdtscp' intrinsic.
17543 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17544 MachinePointerInfo(), false, false, 0);
17547 if (Subtarget->is64Bit()) {
17548 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17549 // the EAX register is loaded with the low-order 32 bits.
17550 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17551 DAG.getConstant(32, MVT::i8));
17552 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17553 Results.push_back(Chain);
17557 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17558 SDValue Ops[] = { LO, HI };
17559 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17560 Results.push_back(Pair);
17561 Results.push_back(Chain);
17564 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17565 SelectionDAG &DAG) {
17566 SmallVector<SDValue, 2> Results;
17568 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17570 return DAG.getMergeValues(Results, DL);
17574 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17575 SelectionDAG &DAG) {
17576 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17578 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17583 switch(IntrData->Type) {
17585 llvm_unreachable("Unknown Intrinsic Type");
17589 // Emit the node with the right value type.
17590 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17591 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17593 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17594 // Otherwise return the value from Rand, which is always 0, casted to i32.
17595 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17596 DAG.getConstant(1, Op->getValueType(1)),
17597 DAG.getConstant(X86::COND_B, MVT::i32),
17598 SDValue(Result.getNode(), 1) };
17599 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17600 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17603 // Return { result, isValid, chain }.
17604 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17605 SDValue(Result.getNode(), 2));
17608 //gather(v1, mask, index, base, scale);
17609 SDValue Chain = Op.getOperand(0);
17610 SDValue Src = Op.getOperand(2);
17611 SDValue Base = Op.getOperand(3);
17612 SDValue Index = Op.getOperand(4);
17613 SDValue Mask = Op.getOperand(5);
17614 SDValue Scale = Op.getOperand(6);
17615 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17619 //scatter(base, mask, index, v1, scale);
17620 SDValue Chain = Op.getOperand(0);
17621 SDValue Base = Op.getOperand(2);
17622 SDValue Mask = Op.getOperand(3);
17623 SDValue Index = Op.getOperand(4);
17624 SDValue Src = Op.getOperand(5);
17625 SDValue Scale = Op.getOperand(6);
17626 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17629 SDValue Hint = Op.getOperand(6);
17631 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17632 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17633 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17634 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17635 SDValue Chain = Op.getOperand(0);
17636 SDValue Mask = Op.getOperand(2);
17637 SDValue Index = Op.getOperand(3);
17638 SDValue Base = Op.getOperand(4);
17639 SDValue Scale = Op.getOperand(5);
17640 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17642 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17644 SmallVector<SDValue, 2> Results;
17645 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17646 return DAG.getMergeValues(Results, dl);
17648 // Read Performance Monitoring Counters.
17650 SmallVector<SDValue, 2> Results;
17651 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17652 return DAG.getMergeValues(Results, dl);
17654 // XTEST intrinsics.
17656 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17657 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17658 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17659 DAG.getConstant(X86::COND_NE, MVT::i8),
17661 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17662 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17663 Ret, SDValue(InTrans.getNode(), 1));
17667 SmallVector<SDValue, 2> Results;
17668 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17669 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17670 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17671 DAG.getConstant(-1, MVT::i8));
17672 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17673 Op.getOperand(4), GenCF.getValue(1));
17674 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17675 Op.getOperand(5), MachinePointerInfo(),
17677 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17678 DAG.getConstant(X86::COND_B, MVT::i8),
17680 Results.push_back(SetCC);
17681 Results.push_back(Store);
17682 return DAG.getMergeValues(Results, dl);
17684 case COMPRESS_TO_MEM: {
17686 SDValue Mask = Op.getOperand(4);
17687 SDValue DataToCompress = Op.getOperand(3);
17688 SDValue Addr = Op.getOperand(2);
17689 SDValue Chain = Op.getOperand(0);
17691 if (isAllOnes(Mask)) // return just a store
17692 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17693 MachinePointerInfo(), false, false, 0);
17695 EVT VT = DataToCompress.getValueType();
17696 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17697 VT.getVectorNumElements());
17698 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17699 Mask.getValueType().getSizeInBits());
17700 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17701 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17702 DAG.getIntPtrConstant(0));
17704 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17705 DataToCompress, DAG.getUNDEF(VT));
17706 return DAG.getStore(Chain, dl, Compressed, Addr,
17707 MachinePointerInfo(), false, false, 0);
17709 case EXPAND_FROM_MEM: {
17711 SDValue Mask = Op.getOperand(4);
17712 SDValue PathThru = Op.getOperand(3);
17713 SDValue Addr = Op.getOperand(2);
17714 SDValue Chain = Op.getOperand(0);
17715 EVT VT = Op.getValueType();
17717 if (isAllOnes(Mask)) // return just a load
17718 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17720 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17721 VT.getVectorNumElements());
17722 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17723 Mask.getValueType().getSizeInBits());
17724 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17725 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17726 DAG.getIntPtrConstant(0));
17728 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17729 false, false, false, 0);
17731 SmallVector<SDValue, 2> Results;
17732 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
17734 Results.push_back(Chain);
17735 return DAG.getMergeValues(Results, dl);
17740 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17741 SelectionDAG &DAG) const {
17742 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17743 MFI->setReturnAddressIsTaken(true);
17745 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17748 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17750 EVT PtrVT = getPointerTy();
17753 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17754 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17755 DAG.getSubtarget().getRegisterInfo());
17756 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
17757 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17758 DAG.getNode(ISD::ADD, dl, PtrVT,
17759 FrameAddr, Offset),
17760 MachinePointerInfo(), false, false, false, 0);
17763 // Just load the return address.
17764 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17765 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17766 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17769 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17770 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17771 MFI->setFrameAddressIsTaken(true);
17773 EVT VT = Op.getValueType();
17774 SDLoc dl(Op); // FIXME probably not meaningful
17775 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17776 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17777 DAG.getSubtarget().getRegisterInfo());
17778 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
17779 DAG.getMachineFunction());
17780 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17781 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17782 "Invalid Frame Register!");
17783 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17785 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17786 MachinePointerInfo(),
17787 false, false, false, 0);
17791 // FIXME? Maybe this could be a TableGen attribute on some registers and
17792 // this table could be generated automatically from RegInfo.
17793 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
17795 unsigned Reg = StringSwitch<unsigned>(RegName)
17796 .Case("esp", X86::ESP)
17797 .Case("rsp", X86::RSP)
17801 report_fatal_error("Invalid register name global variable");
17804 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17805 SelectionDAG &DAG) const {
17806 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17807 DAG.getSubtarget().getRegisterInfo());
17808 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
17811 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17812 SDValue Chain = Op.getOperand(0);
17813 SDValue Offset = Op.getOperand(1);
17814 SDValue Handler = Op.getOperand(2);
17817 EVT PtrVT = getPointerTy();
17818 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17819 DAG.getSubtarget().getRegisterInfo());
17820 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
17821 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
17822 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
17823 "Invalid Frame Register!");
17824 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
17825 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
17827 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
17828 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
17829 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
17830 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
17832 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
17834 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
17835 DAG.getRegister(StoreAddrReg, PtrVT));
17838 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
17839 SelectionDAG &DAG) const {
17841 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
17842 DAG.getVTList(MVT::i32, MVT::Other),
17843 Op.getOperand(0), Op.getOperand(1));
17846 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
17847 SelectionDAG &DAG) const {
17849 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
17850 Op.getOperand(0), Op.getOperand(1));
17853 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
17854 return Op.getOperand(0);
17857 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
17858 SelectionDAG &DAG) const {
17859 SDValue Root = Op.getOperand(0);
17860 SDValue Trmp = Op.getOperand(1); // trampoline
17861 SDValue FPtr = Op.getOperand(2); // nested function
17862 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
17865 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17866 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
17868 if (Subtarget->is64Bit()) {
17869 SDValue OutChains[6];
17871 // Large code-model.
17872 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
17873 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
17875 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
17876 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
17878 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
17880 // Load the pointer to the nested function into R11.
17881 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
17882 SDValue Addr = Trmp;
17883 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17884 Addr, MachinePointerInfo(TrmpAddr),
17887 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17888 DAG.getConstant(2, MVT::i64));
17889 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
17890 MachinePointerInfo(TrmpAddr, 2),
17893 // Load the 'nest' parameter value into R10.
17894 // R10 is specified in X86CallingConv.td
17895 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
17896 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17897 DAG.getConstant(10, MVT::i64));
17898 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17899 Addr, MachinePointerInfo(TrmpAddr, 10),
17902 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17903 DAG.getConstant(12, MVT::i64));
17904 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
17905 MachinePointerInfo(TrmpAddr, 12),
17908 // Jump to the nested function.
17909 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
17910 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17911 DAG.getConstant(20, MVT::i64));
17912 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17913 Addr, MachinePointerInfo(TrmpAddr, 20),
17916 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
17917 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17918 DAG.getConstant(22, MVT::i64));
17919 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
17920 MachinePointerInfo(TrmpAddr, 22),
17923 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17925 const Function *Func =
17926 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
17927 CallingConv::ID CC = Func->getCallingConv();
17932 llvm_unreachable("Unsupported calling convention");
17933 case CallingConv::C:
17934 case CallingConv::X86_StdCall: {
17935 // Pass 'nest' parameter in ECX.
17936 // Must be kept in sync with X86CallingConv.td
17937 NestReg = X86::ECX;
17939 // Check that ECX wasn't needed by an 'inreg' parameter.
17940 FunctionType *FTy = Func->getFunctionType();
17941 const AttributeSet &Attrs = Func->getAttributes();
17943 if (!Attrs.isEmpty() && !Func->isVarArg()) {
17944 unsigned InRegCount = 0;
17947 for (FunctionType::param_iterator I = FTy->param_begin(),
17948 E = FTy->param_end(); I != E; ++I, ++Idx)
17949 if (Attrs.hasAttribute(Idx, Attribute::InReg))
17950 // FIXME: should only count parameters that are lowered to integers.
17951 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
17953 if (InRegCount > 2) {
17954 report_fatal_error("Nest register in use - reduce number of inreg"
17960 case CallingConv::X86_FastCall:
17961 case CallingConv::X86_ThisCall:
17962 case CallingConv::Fast:
17963 // Pass 'nest' parameter in EAX.
17964 // Must be kept in sync with X86CallingConv.td
17965 NestReg = X86::EAX;
17969 SDValue OutChains[4];
17970 SDValue Addr, Disp;
17972 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17973 DAG.getConstant(10, MVT::i32));
17974 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
17976 // This is storing the opcode for MOV32ri.
17977 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
17978 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
17979 OutChains[0] = DAG.getStore(Root, dl,
17980 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
17981 Trmp, MachinePointerInfo(TrmpAddr),
17984 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17985 DAG.getConstant(1, MVT::i32));
17986 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
17987 MachinePointerInfo(TrmpAddr, 1),
17990 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
17991 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17992 DAG.getConstant(5, MVT::i32));
17993 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
17994 MachinePointerInfo(TrmpAddr, 5),
17997 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17998 DAG.getConstant(6, MVT::i32));
17999 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18000 MachinePointerInfo(TrmpAddr, 6),
18003 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18007 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18008 SelectionDAG &DAG) const {
18010 The rounding mode is in bits 11:10 of FPSR, and has the following
18012 00 Round to nearest
18017 FLT_ROUNDS, on the other hand, expects the following:
18024 To perform the conversion, we do:
18025 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18028 MachineFunction &MF = DAG.getMachineFunction();
18029 const TargetMachine &TM = MF.getTarget();
18030 const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
18031 unsigned StackAlignment = TFI.getStackAlignment();
18032 MVT VT = Op.getSimpleValueType();
18035 // Save FP Control Word to stack slot
18036 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18037 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18039 MachineMemOperand *MMO =
18040 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18041 MachineMemOperand::MOStore, 2, 2);
18043 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18044 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18045 DAG.getVTList(MVT::Other),
18046 Ops, MVT::i16, MMO);
18048 // Load FP Control Word from stack slot
18049 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18050 MachinePointerInfo(), false, false, false, 0);
18052 // Transform as necessary
18054 DAG.getNode(ISD::SRL, DL, MVT::i16,
18055 DAG.getNode(ISD::AND, DL, MVT::i16,
18056 CWD, DAG.getConstant(0x800, MVT::i16)),
18057 DAG.getConstant(11, MVT::i8));
18059 DAG.getNode(ISD::SRL, DL, MVT::i16,
18060 DAG.getNode(ISD::AND, DL, MVT::i16,
18061 CWD, DAG.getConstant(0x400, MVT::i16)),
18062 DAG.getConstant(9, MVT::i8));
18065 DAG.getNode(ISD::AND, DL, MVT::i16,
18066 DAG.getNode(ISD::ADD, DL, MVT::i16,
18067 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18068 DAG.getConstant(1, MVT::i16)),
18069 DAG.getConstant(3, MVT::i16));
18071 return DAG.getNode((VT.getSizeInBits() < 16 ?
18072 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18075 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18076 MVT VT = Op.getSimpleValueType();
18078 unsigned NumBits = VT.getSizeInBits();
18081 Op = Op.getOperand(0);
18082 if (VT == MVT::i8) {
18083 // Zero extend to i32 since there is not an i8 bsr.
18085 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18088 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18089 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18090 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18092 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18095 DAG.getConstant(NumBits+NumBits-1, OpVT),
18096 DAG.getConstant(X86::COND_E, MVT::i8),
18099 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18101 // Finally xor with NumBits-1.
18102 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18105 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18109 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18110 MVT VT = Op.getSimpleValueType();
18112 unsigned NumBits = VT.getSizeInBits();
18115 Op = Op.getOperand(0);
18116 if (VT == MVT::i8) {
18117 // Zero extend to i32 since there is not an i8 bsr.
18119 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18122 // Issue a bsr (scan bits in reverse).
18123 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18124 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18126 // And xor with NumBits-1.
18127 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18130 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18134 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18135 MVT VT = Op.getSimpleValueType();
18136 unsigned NumBits = VT.getSizeInBits();
18138 Op = Op.getOperand(0);
18140 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18141 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18142 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18144 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18147 DAG.getConstant(NumBits, VT),
18148 DAG.getConstant(X86::COND_E, MVT::i8),
18151 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18154 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18155 // ones, and then concatenate the result back.
18156 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18157 MVT VT = Op.getSimpleValueType();
18159 assert(VT.is256BitVector() && VT.isInteger() &&
18160 "Unsupported value type for operation");
18162 unsigned NumElems = VT.getVectorNumElements();
18165 // Extract the LHS vectors
18166 SDValue LHS = Op.getOperand(0);
18167 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18168 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18170 // Extract the RHS vectors
18171 SDValue RHS = Op.getOperand(1);
18172 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18173 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18175 MVT EltVT = VT.getVectorElementType();
18176 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18178 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18179 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18180 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18183 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18184 assert(Op.getSimpleValueType().is256BitVector() &&
18185 Op.getSimpleValueType().isInteger() &&
18186 "Only handle AVX 256-bit vector integer operation");
18187 return Lower256IntArith(Op, DAG);
18190 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18191 assert(Op.getSimpleValueType().is256BitVector() &&
18192 Op.getSimpleValueType().isInteger() &&
18193 "Only handle AVX 256-bit vector integer operation");
18194 return Lower256IntArith(Op, DAG);
18197 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18198 SelectionDAG &DAG) {
18200 MVT VT = Op.getSimpleValueType();
18202 // Decompose 256-bit ops into smaller 128-bit ops.
18203 if (VT.is256BitVector() && !Subtarget->hasInt256())
18204 return Lower256IntArith(Op, DAG);
18206 SDValue A = Op.getOperand(0);
18207 SDValue B = Op.getOperand(1);
18209 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18210 if (VT == MVT::v4i32) {
18211 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18212 "Should not custom lower when pmuldq is available!");
18214 // Extract the odd parts.
18215 static const int UnpackMask[] = { 1, -1, 3, -1 };
18216 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18217 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18219 // Multiply the even parts.
18220 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18221 // Now multiply odd parts.
18222 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18224 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18225 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18227 // Merge the two vectors back together with a shuffle. This expands into 2
18229 static const int ShufMask[] = { 0, 4, 2, 6 };
18230 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18233 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18234 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18236 // Ahi = psrlqi(a, 32);
18237 // Bhi = psrlqi(b, 32);
18239 // AloBlo = pmuludq(a, b);
18240 // AloBhi = pmuludq(a, Bhi);
18241 // AhiBlo = pmuludq(Ahi, b);
18243 // AloBhi = psllqi(AloBhi, 32);
18244 // AhiBlo = psllqi(AhiBlo, 32);
18245 // return AloBlo + AloBhi + AhiBlo;
18247 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18248 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18250 // Bit cast to 32-bit vectors for MULUDQ
18251 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18252 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18253 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18254 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18255 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18256 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18258 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18259 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18260 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18262 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18263 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18265 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18266 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18269 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18270 assert(Subtarget->isTargetWin64() && "Unexpected target");
18271 EVT VT = Op.getValueType();
18272 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18273 "Unexpected return type for lowering");
18277 switch (Op->getOpcode()) {
18278 default: llvm_unreachable("Unexpected request for libcall!");
18279 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18280 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18281 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18282 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18283 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18284 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18288 SDValue InChain = DAG.getEntryNode();
18290 TargetLowering::ArgListTy Args;
18291 TargetLowering::ArgListEntry Entry;
18292 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18293 EVT ArgVT = Op->getOperand(i).getValueType();
18294 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18295 "Unexpected argument type for lowering");
18296 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18297 Entry.Node = StackPtr;
18298 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18300 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18301 Entry.Ty = PointerType::get(ArgTy,0);
18302 Entry.isSExt = false;
18303 Entry.isZExt = false;
18304 Args.push_back(Entry);
18307 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18310 TargetLowering::CallLoweringInfo CLI(DAG);
18311 CLI.setDebugLoc(dl).setChain(InChain)
18312 .setCallee(getLibcallCallingConv(LC),
18313 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18314 Callee, std::move(Args), 0)
18315 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18317 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18318 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18321 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18322 SelectionDAG &DAG) {
18323 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18324 EVT VT = Op0.getValueType();
18327 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18328 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18330 // PMULxD operations multiply each even value (starting at 0) of LHS with
18331 // the related value of RHS and produce a widen result.
18332 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18333 // => <2 x i64> <ae|cg>
18335 // In other word, to have all the results, we need to perform two PMULxD:
18336 // 1. one with the even values.
18337 // 2. one with the odd values.
18338 // To achieve #2, with need to place the odd values at an even position.
18340 // Place the odd value at an even position (basically, shift all values 1
18341 // step to the left):
18342 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18343 // <a|b|c|d> => <b|undef|d|undef>
18344 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18345 // <e|f|g|h> => <f|undef|h|undef>
18346 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18348 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18350 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18351 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18353 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18354 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18355 // => <2 x i64> <ae|cg>
18356 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18357 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18358 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18359 // => <2 x i64> <bf|dh>
18360 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18361 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18363 // Shuffle it back into the right order.
18364 SDValue Highs, Lows;
18365 if (VT == MVT::v8i32) {
18366 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18367 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18368 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18369 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18371 const int HighMask[] = {1, 5, 3, 7};
18372 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18373 const int LowMask[] = {0, 4, 2, 6};
18374 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18377 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18378 // unsigned multiply.
18379 if (IsSigned && !Subtarget->hasSSE41()) {
18381 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18382 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18383 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18384 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18385 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18387 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18388 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18391 // The first result of MUL_LOHI is actually the low value, followed by the
18393 SDValue Ops[] = {Lows, Highs};
18394 return DAG.getMergeValues(Ops, dl);
18397 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18398 const X86Subtarget *Subtarget) {
18399 MVT VT = Op.getSimpleValueType();
18401 SDValue R = Op.getOperand(0);
18402 SDValue Amt = Op.getOperand(1);
18404 // Optimize shl/srl/sra with constant shift amount.
18405 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18406 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18407 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18409 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18410 (Subtarget->hasInt256() &&
18411 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18412 (Subtarget->hasAVX512() &&
18413 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18414 if (Op.getOpcode() == ISD::SHL)
18415 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18417 if (Op.getOpcode() == ISD::SRL)
18418 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18420 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18421 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18425 if (VT == MVT::v16i8) {
18426 if (Op.getOpcode() == ISD::SHL) {
18427 // Make a large shift.
18428 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18429 MVT::v8i16, R, ShiftAmt,
18431 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18432 // Zero out the rightmost bits.
18433 SmallVector<SDValue, 16> V(16,
18434 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18436 return DAG.getNode(ISD::AND, dl, VT, SHL,
18437 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18439 if (Op.getOpcode() == ISD::SRL) {
18440 // Make a large shift.
18441 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18442 MVT::v8i16, R, ShiftAmt,
18444 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18445 // Zero out the leftmost bits.
18446 SmallVector<SDValue, 16> V(16,
18447 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18449 return DAG.getNode(ISD::AND, dl, VT, SRL,
18450 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18452 if (Op.getOpcode() == ISD::SRA) {
18453 if (ShiftAmt == 7) {
18454 // R s>> 7 === R s< 0
18455 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18456 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18459 // R s>> a === ((R u>> a) ^ m) - m
18460 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18461 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18463 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18464 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18465 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18468 llvm_unreachable("Unknown shift opcode.");
18471 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18472 if (Op.getOpcode() == ISD::SHL) {
18473 // Make a large shift.
18474 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18475 MVT::v16i16, R, ShiftAmt,
18477 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18478 // Zero out the rightmost bits.
18479 SmallVector<SDValue, 32> V(32,
18480 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18482 return DAG.getNode(ISD::AND, dl, VT, SHL,
18483 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18485 if (Op.getOpcode() == ISD::SRL) {
18486 // Make a large shift.
18487 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18488 MVT::v16i16, R, ShiftAmt,
18490 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18491 // Zero out the leftmost bits.
18492 SmallVector<SDValue, 32> V(32,
18493 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18495 return DAG.getNode(ISD::AND, dl, VT, SRL,
18496 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18498 if (Op.getOpcode() == ISD::SRA) {
18499 if (ShiftAmt == 7) {
18500 // R s>> 7 === R s< 0
18501 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18502 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18505 // R s>> a === ((R u>> a) ^ m) - m
18506 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18507 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18509 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18510 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18511 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18514 llvm_unreachable("Unknown shift opcode.");
18519 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18520 if (!Subtarget->is64Bit() &&
18521 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18522 Amt.getOpcode() == ISD::BITCAST &&
18523 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18524 Amt = Amt.getOperand(0);
18525 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18526 VT.getVectorNumElements();
18527 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18528 uint64_t ShiftAmt = 0;
18529 for (unsigned i = 0; i != Ratio; ++i) {
18530 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18534 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18536 // Check remaining shift amounts.
18537 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18538 uint64_t ShAmt = 0;
18539 for (unsigned j = 0; j != Ratio; ++j) {
18540 ConstantSDNode *C =
18541 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18545 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18547 if (ShAmt != ShiftAmt)
18550 switch (Op.getOpcode()) {
18552 llvm_unreachable("Unknown shift opcode!");
18554 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18557 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18560 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18568 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18569 const X86Subtarget* Subtarget) {
18570 MVT VT = Op.getSimpleValueType();
18572 SDValue R = Op.getOperand(0);
18573 SDValue Amt = Op.getOperand(1);
18575 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18576 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18577 (Subtarget->hasInt256() &&
18578 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18579 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18580 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18582 EVT EltVT = VT.getVectorElementType();
18584 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18585 // Check if this build_vector node is doing a splat.
18586 // If so, then set BaseShAmt equal to the splat value.
18587 BaseShAmt = BV->getSplatValue();
18588 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18589 BaseShAmt = SDValue();
18591 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18592 Amt = Amt.getOperand(0);
18594 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18595 if (SVN && SVN->isSplat()) {
18596 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18597 SDValue InVec = Amt.getOperand(0);
18598 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18599 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18600 "Unexpected shuffle index found!");
18601 BaseShAmt = InVec.getOperand(SplatIdx);
18602 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18603 if (ConstantSDNode *C =
18604 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18605 if (C->getZExtValue() == SplatIdx)
18606 BaseShAmt = InVec.getOperand(1);
18611 // Avoid introducing an extract element from a shuffle.
18612 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18613 DAG.getIntPtrConstant(SplatIdx));
18617 if (BaseShAmt.getNode()) {
18618 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18619 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18620 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18621 else if (EltVT.bitsLT(MVT::i32))
18622 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18624 switch (Op.getOpcode()) {
18626 llvm_unreachable("Unknown shift opcode!");
18628 switch (VT.SimpleTy) {
18629 default: return SDValue();
18638 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18641 switch (VT.SimpleTy) {
18642 default: return SDValue();
18649 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18652 switch (VT.SimpleTy) {
18653 default: return SDValue();
18662 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18668 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18669 if (!Subtarget->is64Bit() &&
18670 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18671 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18672 Amt.getOpcode() == ISD::BITCAST &&
18673 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18674 Amt = Amt.getOperand(0);
18675 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18676 VT.getVectorNumElements();
18677 std::vector<SDValue> Vals(Ratio);
18678 for (unsigned i = 0; i != Ratio; ++i)
18679 Vals[i] = Amt.getOperand(i);
18680 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18681 for (unsigned j = 0; j != Ratio; ++j)
18682 if (Vals[j] != Amt.getOperand(i + j))
18685 switch (Op.getOpcode()) {
18687 llvm_unreachable("Unknown shift opcode!");
18689 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18691 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18693 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18700 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18701 SelectionDAG &DAG) {
18702 MVT VT = Op.getSimpleValueType();
18704 SDValue R = Op.getOperand(0);
18705 SDValue Amt = Op.getOperand(1);
18708 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18709 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18711 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
18715 V = LowerScalarVariableShift(Op, DAG, Subtarget);
18719 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
18721 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
18722 if (Subtarget->hasInt256()) {
18723 if (Op.getOpcode() == ISD::SRL &&
18724 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18725 VT == MVT::v4i64 || VT == MVT::v8i32))
18727 if (Op.getOpcode() == ISD::SHL &&
18728 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18729 VT == MVT::v4i64 || VT == MVT::v8i32))
18731 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
18735 // If possible, lower this packed shift into a vector multiply instead of
18736 // expanding it into a sequence of scalar shifts.
18737 // Do this only if the vector shift count is a constant build_vector.
18738 if (Op.getOpcode() == ISD::SHL &&
18739 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18740 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18741 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18742 SmallVector<SDValue, 8> Elts;
18743 EVT SVT = VT.getScalarType();
18744 unsigned SVTBits = SVT.getSizeInBits();
18745 const APInt &One = APInt(SVTBits, 1);
18746 unsigned NumElems = VT.getVectorNumElements();
18748 for (unsigned i=0; i !=NumElems; ++i) {
18749 SDValue Op = Amt->getOperand(i);
18750 if (Op->getOpcode() == ISD::UNDEF) {
18751 Elts.push_back(Op);
18755 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18756 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
18757 uint64_t ShAmt = C.getZExtValue();
18758 if (ShAmt >= SVTBits) {
18759 Elts.push_back(DAG.getUNDEF(SVT));
18762 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
18764 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18765 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18768 // Lower SHL with variable shift amount.
18769 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18770 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
18772 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
18773 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
18774 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18775 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18778 // If possible, lower this shift as a sequence of two shifts by
18779 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18781 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18783 // Could be rewritten as:
18784 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18786 // The advantage is that the two shifts from the example would be
18787 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18788 // the vector shift into four scalar shifts plus four pairs of vector
18790 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18791 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18792 unsigned TargetOpcode = X86ISD::MOVSS;
18793 bool CanBeSimplified;
18794 // The splat value for the first packed shift (the 'X' from the example).
18795 SDValue Amt1 = Amt->getOperand(0);
18796 // The splat value for the second packed shift (the 'Y' from the example).
18797 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18798 Amt->getOperand(2);
18800 // See if it is possible to replace this node with a sequence of
18801 // two shifts followed by a MOVSS/MOVSD
18802 if (VT == MVT::v4i32) {
18803 // Check if it is legal to use a MOVSS.
18804 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18805 Amt2 == Amt->getOperand(3);
18806 if (!CanBeSimplified) {
18807 // Otherwise, check if we can still simplify this node using a MOVSD.
18808 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18809 Amt->getOperand(2) == Amt->getOperand(3);
18810 TargetOpcode = X86ISD::MOVSD;
18811 Amt2 = Amt->getOperand(2);
18814 // Do similar checks for the case where the machine value type
18816 CanBeSimplified = Amt1 == Amt->getOperand(1);
18817 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18818 CanBeSimplified = Amt2 == Amt->getOperand(i);
18820 if (!CanBeSimplified) {
18821 TargetOpcode = X86ISD::MOVSD;
18822 CanBeSimplified = true;
18823 Amt2 = Amt->getOperand(4);
18824 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
18825 CanBeSimplified = Amt1 == Amt->getOperand(i);
18826 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
18827 CanBeSimplified = Amt2 == Amt->getOperand(j);
18831 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
18832 isa<ConstantSDNode>(Amt2)) {
18833 // Replace this node with two shifts followed by a MOVSS/MOVSD.
18834 EVT CastVT = MVT::v4i32;
18836 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
18837 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
18839 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
18840 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
18841 if (TargetOpcode == X86ISD::MOVSD)
18842 CastVT = MVT::v2i64;
18843 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
18844 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
18845 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
18847 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
18851 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
18852 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
18855 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
18856 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
18858 // Turn 'a' into a mask suitable for VSELECT
18859 SDValue VSelM = DAG.getConstant(0x80, VT);
18860 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18861 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18863 SDValue CM1 = DAG.getConstant(0x0f, VT);
18864 SDValue CM2 = DAG.getConstant(0x3f, VT);
18866 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
18867 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
18868 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
18869 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18870 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18873 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18874 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18875 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18877 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
18878 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
18879 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
18880 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18881 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18884 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18885 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18886 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18888 // return VSELECT(r, r+r, a);
18889 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
18890 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
18894 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
18895 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
18896 // solution better.
18897 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
18898 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
18900 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
18901 R = DAG.getNode(ExtOpc, dl, NewVT, R);
18902 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
18903 return DAG.getNode(ISD::TRUNCATE, dl, VT,
18904 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
18907 // Decompose 256-bit shifts into smaller 128-bit shifts.
18908 if (VT.is256BitVector()) {
18909 unsigned NumElems = VT.getVectorNumElements();
18910 MVT EltVT = VT.getVectorElementType();
18911 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18913 // Extract the two vectors
18914 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
18915 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
18917 // Recreate the shift amount vectors
18918 SDValue Amt1, Amt2;
18919 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
18920 // Constant shift amount
18921 SmallVector<SDValue, 4> Amt1Csts;
18922 SmallVector<SDValue, 4> Amt2Csts;
18923 for (unsigned i = 0; i != NumElems/2; ++i)
18924 Amt1Csts.push_back(Amt->getOperand(i));
18925 for (unsigned i = NumElems/2; i != NumElems; ++i)
18926 Amt2Csts.push_back(Amt->getOperand(i));
18928 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
18929 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
18931 // Variable shift amount
18932 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
18933 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
18936 // Issue new vector shifts for the smaller types
18937 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
18938 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
18940 // Concatenate the result back
18941 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
18947 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
18948 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
18949 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
18950 // looks for this combo and may remove the "setcc" instruction if the "setcc"
18951 // has only one use.
18952 SDNode *N = Op.getNode();
18953 SDValue LHS = N->getOperand(0);
18954 SDValue RHS = N->getOperand(1);
18955 unsigned BaseOp = 0;
18958 switch (Op.getOpcode()) {
18959 default: llvm_unreachable("Unknown ovf instruction!");
18961 // A subtract of one will be selected as a INC. Note that INC doesn't
18962 // set CF, so we can't do this for UADDO.
18963 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
18965 BaseOp = X86ISD::INC;
18966 Cond = X86::COND_O;
18969 BaseOp = X86ISD::ADD;
18970 Cond = X86::COND_O;
18973 BaseOp = X86ISD::ADD;
18974 Cond = X86::COND_B;
18977 // A subtract of one will be selected as a DEC. Note that DEC doesn't
18978 // set CF, so we can't do this for USUBO.
18979 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
18981 BaseOp = X86ISD::DEC;
18982 Cond = X86::COND_O;
18985 BaseOp = X86ISD::SUB;
18986 Cond = X86::COND_O;
18989 BaseOp = X86ISD::SUB;
18990 Cond = X86::COND_B;
18993 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
18994 Cond = X86::COND_O;
18996 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
18997 if (N->getValueType(0) == MVT::i8) {
18998 BaseOp = X86ISD::UMUL8;
18999 Cond = X86::COND_O;
19002 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19004 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19007 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19008 DAG.getConstant(X86::COND_O, MVT::i32),
19009 SDValue(Sum.getNode(), 2));
19011 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19015 // Also sets EFLAGS.
19016 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19017 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19020 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19021 DAG.getConstant(Cond, MVT::i32),
19022 SDValue(Sum.getNode(), 1));
19024 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19027 // Sign extension of the low part of vector elements. This may be used either
19028 // when sign extend instructions are not available or if the vector element
19029 // sizes already match the sign-extended size. If the vector elements are in
19030 // their pre-extended size and sign extend instructions are available, that will
19031 // be handled by LowerSIGN_EXTEND.
19032 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19033 SelectionDAG &DAG) const {
19035 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19036 MVT VT = Op.getSimpleValueType();
19038 if (!Subtarget->hasSSE2() || !VT.isVector())
19041 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19042 ExtraVT.getScalarType().getSizeInBits();
19044 switch (VT.SimpleTy) {
19045 default: return SDValue();
19048 if (!Subtarget->hasFp256())
19050 if (!Subtarget->hasInt256()) {
19051 // needs to be split
19052 unsigned NumElems = VT.getVectorNumElements();
19054 // Extract the LHS vectors
19055 SDValue LHS = Op.getOperand(0);
19056 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19057 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19059 MVT EltVT = VT.getVectorElementType();
19060 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19062 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19063 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19064 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19066 SDValue Extra = DAG.getValueType(ExtraVT);
19068 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19069 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19071 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19076 SDValue Op0 = Op.getOperand(0);
19078 // This is a sign extension of some low part of vector elements without
19079 // changing the size of the vector elements themselves:
19080 // Shift-Left + Shift-Right-Algebraic.
19081 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19083 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19089 /// Returns true if the operand type is exactly twice the native width, and
19090 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19091 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19092 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19093 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19094 const X86Subtarget &Subtarget =
19095 getTargetMachine().getSubtarget<X86Subtarget>();
19096 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19099 return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19100 else if (OpWidth == 128)
19101 return Subtarget.hasCmpxchg16b();
19106 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19107 return needsCmpXchgNb(SI->getValueOperand()->getType());
19110 // Note: this turns large loads into lock cmpxchg8b/16b.
19111 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19112 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19113 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19114 return needsCmpXchgNb(PTy->getElementType());
19117 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19118 const X86Subtarget &Subtarget =
19119 getTargetMachine().getSubtarget<X86Subtarget>();
19120 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
19121 const Type *MemType = AI->getType();
19123 // If the operand is too big, we must see if cmpxchg8/16b is available
19124 // and default to library calls otherwise.
19125 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19126 return needsCmpXchgNb(MemType);
19128 AtomicRMWInst::BinOp Op = AI->getOperation();
19131 llvm_unreachable("Unknown atomic operation");
19132 case AtomicRMWInst::Xchg:
19133 case AtomicRMWInst::Add:
19134 case AtomicRMWInst::Sub:
19135 // It's better to use xadd, xsub or xchg for these in all cases.
19137 case AtomicRMWInst::Or:
19138 case AtomicRMWInst::And:
19139 case AtomicRMWInst::Xor:
19140 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19141 // prefix to a normal instruction for these operations.
19142 return !AI->use_empty();
19143 case AtomicRMWInst::Nand:
19144 case AtomicRMWInst::Max:
19145 case AtomicRMWInst::Min:
19146 case AtomicRMWInst::UMax:
19147 case AtomicRMWInst::UMin:
19148 // These always require a non-trivial set of data operations on x86. We must
19149 // use a cmpxchg loop.
19154 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19155 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19156 // no-sse2). There isn't any reason to disable it if the target processor
19158 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19162 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19163 const X86Subtarget &Subtarget =
19164 getTargetMachine().getSubtarget<X86Subtarget>();
19165 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
19166 const Type *MemType = AI->getType();
19167 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19168 // there is no benefit in turning such RMWs into loads, and it is actually
19169 // harmful as it introduces a mfence.
19170 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19173 auto Builder = IRBuilder<>(AI);
19174 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19175 auto SynchScope = AI->getSynchScope();
19176 // We must restrict the ordering to avoid generating loads with Release or
19177 // ReleaseAcquire orderings.
19178 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19179 auto Ptr = AI->getPointerOperand();
19181 // Before the load we need a fence. Here is an example lifted from
19182 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19185 // x.store(1, relaxed);
19186 // r1 = y.fetch_add(0, release);
19188 // y.fetch_add(42, acquire);
19189 // r2 = x.load(relaxed);
19190 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19191 // lowered to just a load without a fence. A mfence flushes the store buffer,
19192 // making the optimization clearly correct.
19193 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19194 // otherwise, we might be able to be more agressive on relaxed idempotent
19195 // rmw. In practice, they do not look useful, so we don't try to be
19196 // especially clever.
19197 if (SynchScope == SingleThread) {
19198 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19199 // the IR level, so we must wrap it in an intrinsic.
19201 } else if (hasMFENCE(Subtarget)) {
19202 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19203 Intrinsic::x86_sse2_mfence);
19204 Builder.CreateCall(MFence);
19206 // FIXME: it might make sense to use a locked operation here but on a
19207 // different cache-line to prevent cache-line bouncing. In practice it
19208 // is probably a small win, and x86 processors without mfence are rare
19209 // enough that we do not bother.
19213 // Finally we can emit the atomic load.
19214 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19215 AI->getType()->getPrimitiveSizeInBits());
19216 Loaded->setAtomic(Order, SynchScope);
19217 AI->replaceAllUsesWith(Loaded);
19218 AI->eraseFromParent();
19222 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19223 SelectionDAG &DAG) {
19225 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19226 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19227 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19228 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19230 // The only fence that needs an instruction is a sequentially-consistent
19231 // cross-thread fence.
19232 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19233 if (hasMFENCE(*Subtarget))
19234 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19236 SDValue Chain = Op.getOperand(0);
19237 SDValue Zero = DAG.getConstant(0, MVT::i32);
19239 DAG.getRegister(X86::ESP, MVT::i32), // Base
19240 DAG.getTargetConstant(1, MVT::i8), // Scale
19241 DAG.getRegister(0, MVT::i32), // Index
19242 DAG.getTargetConstant(0, MVT::i32), // Disp
19243 DAG.getRegister(0, MVT::i32), // Segment.
19247 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19248 return SDValue(Res, 0);
19251 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19252 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19255 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19256 SelectionDAG &DAG) {
19257 MVT T = Op.getSimpleValueType();
19261 switch(T.SimpleTy) {
19262 default: llvm_unreachable("Invalid value type!");
19263 case MVT::i8: Reg = X86::AL; size = 1; break;
19264 case MVT::i16: Reg = X86::AX; size = 2; break;
19265 case MVT::i32: Reg = X86::EAX; size = 4; break;
19267 assert(Subtarget->is64Bit() && "Node not type legal!");
19268 Reg = X86::RAX; size = 8;
19271 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19272 Op.getOperand(2), SDValue());
19273 SDValue Ops[] = { cpIn.getValue(0),
19276 DAG.getTargetConstant(size, MVT::i8),
19277 cpIn.getValue(1) };
19278 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19279 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19280 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19284 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19285 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19286 MVT::i32, cpOut.getValue(2));
19287 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19288 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19290 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19291 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19292 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19296 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19297 SelectionDAG &DAG) {
19298 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19299 MVT DstVT = Op.getSimpleValueType();
19301 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19302 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19303 if (DstVT != MVT::f64)
19304 // This conversion needs to be expanded.
19307 SDValue InVec = Op->getOperand(0);
19309 unsigned NumElts = SrcVT.getVectorNumElements();
19310 EVT SVT = SrcVT.getVectorElementType();
19312 // Widen the vector in input in the case of MVT::v2i32.
19313 // Example: from MVT::v2i32 to MVT::v4i32.
19314 SmallVector<SDValue, 16> Elts;
19315 for (unsigned i = 0, e = NumElts; i != e; ++i)
19316 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19317 DAG.getIntPtrConstant(i)));
19319 // Explicitly mark the extra elements as Undef.
19320 SDValue Undef = DAG.getUNDEF(SVT);
19321 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19322 Elts.push_back(Undef);
19324 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19325 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19326 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19327 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19328 DAG.getIntPtrConstant(0));
19331 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19332 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19333 assert((DstVT == MVT::i64 ||
19334 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19335 "Unexpected custom BITCAST");
19336 // i64 <=> MMX conversions are Legal.
19337 if (SrcVT==MVT::i64 && DstVT.isVector())
19339 if (DstVT==MVT::i64 && SrcVT.isVector())
19341 // MMX <=> MMX conversions are Legal.
19342 if (SrcVT.isVector() && DstVT.isVector())
19344 // All other conversions need to be expanded.
19348 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19349 SelectionDAG &DAG) {
19350 SDNode *Node = Op.getNode();
19353 Op = Op.getOperand(0);
19354 EVT VT = Op.getValueType();
19355 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19356 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19358 unsigned NumElts = VT.getVectorNumElements();
19359 EVT EltVT = VT.getVectorElementType();
19360 unsigned Len = EltVT.getSizeInBits();
19362 // This is the vectorized version of the "best" algorithm from
19363 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19364 // with a minor tweak to use a series of adds + shifts instead of vector
19365 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19367 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19368 // v8i32 => Always profitable
19370 // FIXME: There a couple of possible improvements:
19372 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19373 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19375 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19376 "CTPOP not implemented for this vector element type.");
19378 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19379 // extra legalization.
19380 bool NeedsBitcast = EltVT == MVT::i32;
19381 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19383 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19384 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19385 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19387 // v = v - ((v >> 1) & 0x55555555...)
19388 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19389 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19390 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19392 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19394 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19395 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19397 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19399 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19400 if (VT != And.getValueType())
19401 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19402 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19404 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19405 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19406 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19407 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19408 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19410 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19411 if (NeedsBitcast) {
19412 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19413 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19414 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19417 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19418 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19419 if (VT != AndRHS.getValueType()) {
19420 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19421 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19423 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19425 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19426 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19427 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19428 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19429 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19431 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19432 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19433 if (NeedsBitcast) {
19434 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19435 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19437 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19438 if (VT != And.getValueType())
19439 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19441 // The algorithm mentioned above uses:
19442 // v = (v * 0x01010101...) >> (Len - 8)
19444 // Change it to use vector adds + vector shifts which yield faster results on
19445 // Haswell than using vector integer multiplication.
19447 // For i32 elements:
19448 // v = v + (v >> 8)
19449 // v = v + (v >> 16)
19451 // For i64 elements:
19452 // v = v + (v >> 8)
19453 // v = v + (v >> 16)
19454 // v = v + (v >> 32)
19457 SmallVector<SDValue, 8> Csts;
19458 for (unsigned i = 8; i <= Len/2; i *= 2) {
19459 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19460 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19461 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19462 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19466 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19467 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19468 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19469 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19470 if (NeedsBitcast) {
19471 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19472 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19474 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19475 if (VT != And.getValueType())
19476 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19481 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19482 SDNode *Node = Op.getNode();
19484 EVT T = Node->getValueType(0);
19485 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19486 DAG.getConstant(0, T), Node->getOperand(2));
19487 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19488 cast<AtomicSDNode>(Node)->getMemoryVT(),
19489 Node->getOperand(0),
19490 Node->getOperand(1), negOp,
19491 cast<AtomicSDNode>(Node)->getMemOperand(),
19492 cast<AtomicSDNode>(Node)->getOrdering(),
19493 cast<AtomicSDNode>(Node)->getSynchScope());
19496 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19497 SDNode *Node = Op.getNode();
19499 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19501 // Convert seq_cst store -> xchg
19502 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19503 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19504 // (The only way to get a 16-byte store is cmpxchg16b)
19505 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19506 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19507 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19508 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19509 cast<AtomicSDNode>(Node)->getMemoryVT(),
19510 Node->getOperand(0),
19511 Node->getOperand(1), Node->getOperand(2),
19512 cast<AtomicSDNode>(Node)->getMemOperand(),
19513 cast<AtomicSDNode>(Node)->getOrdering(),
19514 cast<AtomicSDNode>(Node)->getSynchScope());
19515 return Swap.getValue(1);
19517 // Other atomic stores have a simple pattern.
19521 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19522 EVT VT = Op.getNode()->getSimpleValueType(0);
19524 // Let legalize expand this if it isn't a legal type yet.
19525 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19528 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19531 bool ExtraOp = false;
19532 switch (Op.getOpcode()) {
19533 default: llvm_unreachable("Invalid code");
19534 case ISD::ADDC: Opc = X86ISD::ADD; break;
19535 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19536 case ISD::SUBC: Opc = X86ISD::SUB; break;
19537 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19541 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19543 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19544 Op.getOperand(1), Op.getOperand(2));
19547 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19548 SelectionDAG &DAG) {
19549 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19551 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19552 // which returns the values as { float, float } (in XMM0) or
19553 // { double, double } (which is returned in XMM0, XMM1).
19555 SDValue Arg = Op.getOperand(0);
19556 EVT ArgVT = Arg.getValueType();
19557 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19559 TargetLowering::ArgListTy Args;
19560 TargetLowering::ArgListEntry Entry;
19564 Entry.isSExt = false;
19565 Entry.isZExt = false;
19566 Args.push_back(Entry);
19568 bool isF64 = ArgVT == MVT::f64;
19569 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19570 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19571 // the results are returned via SRet in memory.
19572 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19573 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19574 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19576 Type *RetTy = isF64
19577 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19578 : (Type*)VectorType::get(ArgTy, 4);
19580 TargetLowering::CallLoweringInfo CLI(DAG);
19581 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19582 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19584 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19587 // Returned in xmm0 and xmm1.
19588 return CallResult.first;
19590 // Returned in bits 0:31 and 32:64 xmm0.
19591 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19592 CallResult.first, DAG.getIntPtrConstant(0));
19593 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19594 CallResult.first, DAG.getIntPtrConstant(1));
19595 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19596 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19599 /// LowerOperation - Provide custom lowering hooks for some operations.
19601 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19602 switch (Op.getOpcode()) {
19603 default: llvm_unreachable("Should not custom lower this!");
19604 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19605 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19606 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19607 return LowerCMP_SWAP(Op, Subtarget, DAG);
19608 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19609 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19610 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19611 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19612 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19613 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19614 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19615 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19616 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19617 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19618 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19619 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19620 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19621 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19622 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19623 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19624 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19625 case ISD::SHL_PARTS:
19626 case ISD::SRA_PARTS:
19627 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19628 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19629 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19630 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19631 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19632 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19633 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19634 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19635 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19636 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19637 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19639 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19640 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19641 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19642 case ISD::SETCC: return LowerSETCC(Op, DAG);
19643 case ISD::SELECT: return LowerSELECT(Op, DAG);
19644 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19645 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19646 case ISD::VASTART: return LowerVASTART(Op, DAG);
19647 case ISD::VAARG: return LowerVAARG(Op, DAG);
19648 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19649 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19650 case ISD::INTRINSIC_VOID:
19651 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19652 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19653 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19654 case ISD::FRAME_TO_ARGS_OFFSET:
19655 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19656 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19657 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19658 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19659 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19660 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19661 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19662 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19663 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19664 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19665 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19666 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19667 case ISD::UMUL_LOHI:
19668 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19671 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19677 case ISD::UMULO: return LowerXALUO(Op, DAG);
19678 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19679 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19683 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19684 case ISD::ADD: return LowerADD(Op, DAG);
19685 case ISD::SUB: return LowerSUB(Op, DAG);
19686 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19690 /// ReplaceNodeResults - Replace a node with an illegal result type
19691 /// with a new node built out of custom code.
19692 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19693 SmallVectorImpl<SDValue>&Results,
19694 SelectionDAG &DAG) const {
19696 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19697 switch (N->getOpcode()) {
19699 llvm_unreachable("Do not know how to custom type legalize this operation!");
19700 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19701 case X86ISD::FMINC:
19703 case X86ISD::FMAXC:
19704 case X86ISD::FMAX: {
19705 EVT VT = N->getValueType(0);
19706 if (VT != MVT::v2f32)
19707 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19708 SDValue UNDEF = DAG.getUNDEF(VT);
19709 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19710 N->getOperand(0), UNDEF);
19711 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19712 N->getOperand(1), UNDEF);
19713 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
19716 case ISD::SIGN_EXTEND_INREG:
19721 // We don't want to expand or promote these.
19728 case ISD::UDIVREM: {
19729 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
19730 Results.push_back(V);
19733 case ISD::FP_TO_SINT:
19734 case ISD::FP_TO_UINT: {
19735 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
19737 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
19740 std::pair<SDValue,SDValue> Vals =
19741 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
19742 SDValue FIST = Vals.first, StackSlot = Vals.second;
19743 if (FIST.getNode()) {
19744 EVT VT = N->getValueType(0);
19745 // Return a load from the stack slot.
19746 if (StackSlot.getNode())
19747 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
19748 MachinePointerInfo(),
19749 false, false, false, 0));
19751 Results.push_back(FIST);
19755 case ISD::UINT_TO_FP: {
19756 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19757 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
19758 N->getValueType(0) != MVT::v2f32)
19760 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
19762 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
19764 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
19765 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
19766 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
19767 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
19768 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
19769 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
19772 case ISD::FP_ROUND: {
19773 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
19775 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
19776 Results.push_back(V);
19779 case ISD::INTRINSIC_W_CHAIN: {
19780 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19782 default : llvm_unreachable("Do not know how to custom type "
19783 "legalize this intrinsic operation!");
19784 case Intrinsic::x86_rdtsc:
19785 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19787 case Intrinsic::x86_rdtscp:
19788 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
19790 case Intrinsic::x86_rdpmc:
19791 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
19794 case ISD::READCYCLECOUNTER: {
19795 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19798 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
19799 EVT T = N->getValueType(0);
19800 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
19801 bool Regs64bit = T == MVT::i128;
19802 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
19803 SDValue cpInL, cpInH;
19804 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19805 DAG.getConstant(0, HalfT));
19806 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19807 DAG.getConstant(1, HalfT));
19808 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
19809 Regs64bit ? X86::RAX : X86::EAX,
19811 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
19812 Regs64bit ? X86::RDX : X86::EDX,
19813 cpInH, cpInL.getValue(1));
19814 SDValue swapInL, swapInH;
19815 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19816 DAG.getConstant(0, HalfT));
19817 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19818 DAG.getConstant(1, HalfT));
19819 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
19820 Regs64bit ? X86::RBX : X86::EBX,
19821 swapInL, cpInH.getValue(1));
19822 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
19823 Regs64bit ? X86::RCX : X86::ECX,
19824 swapInH, swapInL.getValue(1));
19825 SDValue Ops[] = { swapInH.getValue(0),
19827 swapInH.getValue(1) };
19828 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19829 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
19830 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
19831 X86ISD::LCMPXCHG8_DAG;
19832 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
19833 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
19834 Regs64bit ? X86::RAX : X86::EAX,
19835 HalfT, Result.getValue(1));
19836 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
19837 Regs64bit ? X86::RDX : X86::EDX,
19838 HalfT, cpOutL.getValue(2));
19839 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
19841 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
19842 MVT::i32, cpOutH.getValue(2));
19844 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19845 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19846 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
19848 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
19849 Results.push_back(Success);
19850 Results.push_back(EFLAGS.getValue(1));
19853 case ISD::ATOMIC_SWAP:
19854 case ISD::ATOMIC_LOAD_ADD:
19855 case ISD::ATOMIC_LOAD_SUB:
19856 case ISD::ATOMIC_LOAD_AND:
19857 case ISD::ATOMIC_LOAD_OR:
19858 case ISD::ATOMIC_LOAD_XOR:
19859 case ISD::ATOMIC_LOAD_NAND:
19860 case ISD::ATOMIC_LOAD_MIN:
19861 case ISD::ATOMIC_LOAD_MAX:
19862 case ISD::ATOMIC_LOAD_UMIN:
19863 case ISD::ATOMIC_LOAD_UMAX:
19864 case ISD::ATOMIC_LOAD: {
19865 // Delegate to generic TypeLegalization. Situations we can really handle
19866 // should have already been dealt with by AtomicExpandPass.cpp.
19869 case ISD::BITCAST: {
19870 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19871 EVT DstVT = N->getValueType(0);
19872 EVT SrcVT = N->getOperand(0)->getValueType(0);
19874 if (SrcVT != MVT::f64 ||
19875 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
19878 unsigned NumElts = DstVT.getVectorNumElements();
19879 EVT SVT = DstVT.getVectorElementType();
19880 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19881 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
19882 MVT::v2f64, N->getOperand(0));
19883 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
19885 if (ExperimentalVectorWideningLegalization) {
19886 // If we are legalizing vectors by widening, we already have the desired
19887 // legal vector type, just return it.
19888 Results.push_back(ToVecInt);
19892 SmallVector<SDValue, 8> Elts;
19893 for (unsigned i = 0, e = NumElts; i != e; ++i)
19894 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
19895 ToVecInt, DAG.getIntPtrConstant(i)));
19897 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
19902 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
19904 default: return nullptr;
19905 case X86ISD::BSF: return "X86ISD::BSF";
19906 case X86ISD::BSR: return "X86ISD::BSR";
19907 case X86ISD::SHLD: return "X86ISD::SHLD";
19908 case X86ISD::SHRD: return "X86ISD::SHRD";
19909 case X86ISD::FAND: return "X86ISD::FAND";
19910 case X86ISD::FANDN: return "X86ISD::FANDN";
19911 case X86ISD::FOR: return "X86ISD::FOR";
19912 case X86ISD::FXOR: return "X86ISD::FXOR";
19913 case X86ISD::FSRL: return "X86ISD::FSRL";
19914 case X86ISD::FILD: return "X86ISD::FILD";
19915 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
19916 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
19917 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
19918 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
19919 case X86ISD::FLD: return "X86ISD::FLD";
19920 case X86ISD::FST: return "X86ISD::FST";
19921 case X86ISD::CALL: return "X86ISD::CALL";
19922 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
19923 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
19924 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
19925 case X86ISD::BT: return "X86ISD::BT";
19926 case X86ISD::CMP: return "X86ISD::CMP";
19927 case X86ISD::COMI: return "X86ISD::COMI";
19928 case X86ISD::UCOMI: return "X86ISD::UCOMI";
19929 case X86ISD::CMPM: return "X86ISD::CMPM";
19930 case X86ISD::CMPMU: return "X86ISD::CMPMU";
19931 case X86ISD::SETCC: return "X86ISD::SETCC";
19932 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
19933 case X86ISD::FSETCC: return "X86ISD::FSETCC";
19934 case X86ISD::CMOV: return "X86ISD::CMOV";
19935 case X86ISD::BRCOND: return "X86ISD::BRCOND";
19936 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
19937 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
19938 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
19939 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
19940 case X86ISD::Wrapper: return "X86ISD::Wrapper";
19941 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
19942 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
19943 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
19944 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
19945 case X86ISD::PINSRB: return "X86ISD::PINSRB";
19946 case X86ISD::PINSRW: return "X86ISD::PINSRW";
19947 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
19948 case X86ISD::ANDNP: return "X86ISD::ANDNP";
19949 case X86ISD::PSIGN: return "X86ISD::PSIGN";
19950 case X86ISD::BLENDI: return "X86ISD::BLENDI";
19951 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
19952 case X86ISD::SUBUS: return "X86ISD::SUBUS";
19953 case X86ISD::HADD: return "X86ISD::HADD";
19954 case X86ISD::HSUB: return "X86ISD::HSUB";
19955 case X86ISD::FHADD: return "X86ISD::FHADD";
19956 case X86ISD::FHSUB: return "X86ISD::FHSUB";
19957 case X86ISD::UMAX: return "X86ISD::UMAX";
19958 case X86ISD::UMIN: return "X86ISD::UMIN";
19959 case X86ISD::SMAX: return "X86ISD::SMAX";
19960 case X86ISD::SMIN: return "X86ISD::SMIN";
19961 case X86ISD::FMAX: return "X86ISD::FMAX";
19962 case X86ISD::FMIN: return "X86ISD::FMIN";
19963 case X86ISD::FMAXC: return "X86ISD::FMAXC";
19964 case X86ISD::FMINC: return "X86ISD::FMINC";
19965 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
19966 case X86ISD::FRCP: return "X86ISD::FRCP";
19967 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
19968 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
19969 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
19970 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
19971 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
19972 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
19973 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
19974 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
19975 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
19976 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
19977 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
19978 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
19979 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
19980 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
19981 case X86ISD::VZEXT: return "X86ISD::VZEXT";
19982 case X86ISD::VSEXT: return "X86ISD::VSEXT";
19983 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
19984 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
19985 case X86ISD::VINSERT: return "X86ISD::VINSERT";
19986 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
19987 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
19988 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
19989 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
19990 case X86ISD::VSHL: return "X86ISD::VSHL";
19991 case X86ISD::VSRL: return "X86ISD::VSRL";
19992 case X86ISD::VSRA: return "X86ISD::VSRA";
19993 case X86ISD::VSHLI: return "X86ISD::VSHLI";
19994 case X86ISD::VSRLI: return "X86ISD::VSRLI";
19995 case X86ISD::VSRAI: return "X86ISD::VSRAI";
19996 case X86ISD::CMPP: return "X86ISD::CMPP";
19997 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
19998 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
19999 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20000 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20001 case X86ISD::ADD: return "X86ISD::ADD";
20002 case X86ISD::SUB: return "X86ISD::SUB";
20003 case X86ISD::ADC: return "X86ISD::ADC";
20004 case X86ISD::SBB: return "X86ISD::SBB";
20005 case X86ISD::SMUL: return "X86ISD::SMUL";
20006 case X86ISD::UMUL: return "X86ISD::UMUL";
20007 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20008 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20009 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20010 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20011 case X86ISD::INC: return "X86ISD::INC";
20012 case X86ISD::DEC: return "X86ISD::DEC";
20013 case X86ISD::OR: return "X86ISD::OR";
20014 case X86ISD::XOR: return "X86ISD::XOR";
20015 case X86ISD::AND: return "X86ISD::AND";
20016 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20017 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20018 case X86ISD::PTEST: return "X86ISD::PTEST";
20019 case X86ISD::TESTP: return "X86ISD::TESTP";
20020 case X86ISD::TESTM: return "X86ISD::TESTM";
20021 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20022 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20023 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20024 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20025 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20026 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20027 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20028 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20029 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20030 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20031 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20032 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20033 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20034 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20035 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20036 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20037 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20038 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20039 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20040 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20041 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20042 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20043 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20044 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20045 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20046 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20047 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20048 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20049 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20050 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20051 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20052 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20053 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20054 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20055 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20056 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20057 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20058 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20059 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20060 case X86ISD::SAHF: return "X86ISD::SAHF";
20061 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20062 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20063 case X86ISD::FMADD: return "X86ISD::FMADD";
20064 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20065 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20066 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20067 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20068 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20069 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20070 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20071 case X86ISD::XTEST: return "X86ISD::XTEST";
20072 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20073 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20074 case X86ISD::SELECT: return "X86ISD::SELECT";
20078 // isLegalAddressingMode - Return true if the addressing mode represented
20079 // by AM is legal for this target, for a load/store of the specified type.
20080 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20082 // X86 supports extremely general addressing modes.
20083 CodeModel::Model M = getTargetMachine().getCodeModel();
20084 Reloc::Model R = getTargetMachine().getRelocationModel();
20086 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20087 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20092 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20094 // If a reference to this global requires an extra load, we can't fold it.
20095 if (isGlobalStubReference(GVFlags))
20098 // If BaseGV requires a register for the PIC base, we cannot also have a
20099 // BaseReg specified.
20100 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20103 // If lower 4G is not available, then we must use rip-relative addressing.
20104 if ((M != CodeModel::Small || R != Reloc::Static) &&
20105 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20109 switch (AM.Scale) {
20115 // These scales always work.
20120 // These scales are formed with basereg+scalereg. Only accept if there is
20125 default: // Other stuff never works.
20132 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20133 unsigned Bits = Ty->getScalarSizeInBits();
20135 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20136 // particularly cheaper than those without.
20140 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20141 // variable shifts just as cheap as scalar ones.
20142 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20145 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20146 // fully general vector.
20150 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20151 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20153 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20154 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20155 return NumBits1 > NumBits2;
20158 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20159 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20162 if (!isTypeLegal(EVT::getEVT(Ty1)))
20165 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20167 // Assuming the caller doesn't have a zeroext or signext return parameter,
20168 // truncation all the way down to i1 is valid.
20172 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20173 return isInt<32>(Imm);
20176 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20177 // Can also use sub to handle negated immediates.
20178 return isInt<32>(Imm);
20181 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20182 if (!VT1.isInteger() || !VT2.isInteger())
20184 unsigned NumBits1 = VT1.getSizeInBits();
20185 unsigned NumBits2 = VT2.getSizeInBits();
20186 return NumBits1 > NumBits2;
20189 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20190 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20191 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20194 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20195 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20196 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20199 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20200 EVT VT1 = Val.getValueType();
20201 if (isZExtFree(VT1, VT2))
20204 if (Val.getOpcode() != ISD::LOAD)
20207 if (!VT1.isSimple() || !VT1.isInteger() ||
20208 !VT2.isSimple() || !VT2.isInteger())
20211 switch (VT1.getSimpleVT().SimpleTy) {
20216 // X86 has 8, 16, and 32-bit zero-extending loads.
20224 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20225 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20228 VT = VT.getScalarType();
20230 if (!VT.isSimple())
20233 switch (VT.getSimpleVT().SimpleTy) {
20244 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20245 // i16 instructions are longer (0x66 prefix) and potentially slower.
20246 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20249 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20250 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20251 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20252 /// are assumed to be legal.
20254 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20256 if (!VT.isSimple())
20259 MVT SVT = VT.getSimpleVT();
20261 // Very little shuffling can be done for 64-bit vectors right now.
20262 if (VT.getSizeInBits() == 64)
20265 // This is an experimental legality test that is tailored to match the
20266 // legality test of the experimental lowering more closely. They are gated
20267 // separately to ease testing of performance differences.
20268 if (ExperimentalVectorShuffleLegality)
20269 // We only care that the types being shuffled are legal. The lowering can
20270 // handle any possible shuffle mask that results.
20271 return isTypeLegal(SVT);
20273 // If this is a single-input shuffle with no 128 bit lane crossings we can
20274 // lower it into pshufb.
20275 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20276 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20277 bool isLegal = true;
20278 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20279 if (M[I] >= (int)SVT.getVectorNumElements() ||
20280 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20289 // FIXME: blends, shifts.
20290 return (SVT.getVectorNumElements() == 2 ||
20291 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20292 isMOVLMask(M, SVT) ||
20293 isCommutedMOVLMask(M, SVT) ||
20294 isMOVHLPSMask(M, SVT) ||
20295 isSHUFPMask(M, SVT) ||
20296 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20297 isPSHUFDMask(M, SVT) ||
20298 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20299 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20300 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20301 isPALIGNRMask(M, SVT, Subtarget) ||
20302 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20303 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20304 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20305 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20306 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20307 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20311 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20313 if (!VT.isSimple())
20316 MVT SVT = VT.getSimpleVT();
20318 // This is an experimental legality test that is tailored to match the
20319 // legality test of the experimental lowering more closely. They are gated
20320 // separately to ease testing of performance differences.
20321 if (ExperimentalVectorShuffleLegality)
20322 // The new vector shuffle lowering is very good at managing zero-inputs.
20323 return isShuffleMaskLegal(Mask, VT);
20325 unsigned NumElts = SVT.getVectorNumElements();
20326 // FIXME: This collection of masks seems suspect.
20329 if (NumElts == 4 && SVT.is128BitVector()) {
20330 return (isMOVLMask(Mask, SVT) ||
20331 isCommutedMOVLMask(Mask, SVT, true) ||
20332 isSHUFPMask(Mask, SVT) ||
20333 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20334 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20335 Subtarget->hasInt256()));
20340 //===----------------------------------------------------------------------===//
20341 // X86 Scheduler Hooks
20342 //===----------------------------------------------------------------------===//
20344 /// Utility function to emit xbegin specifying the start of an RTM region.
20345 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20346 const TargetInstrInfo *TII) {
20347 DebugLoc DL = MI->getDebugLoc();
20349 const BasicBlock *BB = MBB->getBasicBlock();
20350 MachineFunction::iterator I = MBB;
20353 // For the v = xbegin(), we generate
20364 MachineBasicBlock *thisMBB = MBB;
20365 MachineFunction *MF = MBB->getParent();
20366 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20367 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20368 MF->insert(I, mainMBB);
20369 MF->insert(I, sinkMBB);
20371 // Transfer the remainder of BB and its successor edges to sinkMBB.
20372 sinkMBB->splice(sinkMBB->begin(), MBB,
20373 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20374 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20378 // # fallthrough to mainMBB
20379 // # abortion to sinkMBB
20380 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20381 thisMBB->addSuccessor(mainMBB);
20382 thisMBB->addSuccessor(sinkMBB);
20386 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20387 mainMBB->addSuccessor(sinkMBB);
20390 // EAX is live into the sinkMBB
20391 sinkMBB->addLiveIn(X86::EAX);
20392 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20393 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20396 MI->eraseFromParent();
20400 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20401 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20402 // in the .td file.
20403 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20404 const TargetInstrInfo *TII) {
20406 switch (MI->getOpcode()) {
20407 default: llvm_unreachable("illegal opcode!");
20408 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20409 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20410 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20411 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20412 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20413 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20414 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20415 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20418 DebugLoc dl = MI->getDebugLoc();
20419 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20421 unsigned NumArgs = MI->getNumOperands();
20422 for (unsigned i = 1; i < NumArgs; ++i) {
20423 MachineOperand &Op = MI->getOperand(i);
20424 if (!(Op.isReg() && Op.isImplicit()))
20425 MIB.addOperand(Op);
20427 if (MI->hasOneMemOperand())
20428 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20430 BuildMI(*BB, MI, dl,
20431 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20432 .addReg(X86::XMM0);
20434 MI->eraseFromParent();
20438 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20439 // defs in an instruction pattern
20440 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20441 const TargetInstrInfo *TII) {
20443 switch (MI->getOpcode()) {
20444 default: llvm_unreachable("illegal opcode!");
20445 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20446 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20447 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20448 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20449 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20450 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20451 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20452 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20455 DebugLoc dl = MI->getDebugLoc();
20456 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20458 unsigned NumArgs = MI->getNumOperands(); // remove the results
20459 for (unsigned i = 1; i < NumArgs; ++i) {
20460 MachineOperand &Op = MI->getOperand(i);
20461 if (!(Op.isReg() && Op.isImplicit()))
20462 MIB.addOperand(Op);
20464 if (MI->hasOneMemOperand())
20465 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20467 BuildMI(*BB, MI, dl,
20468 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20471 MI->eraseFromParent();
20475 static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20476 const TargetInstrInfo *TII,
20477 const X86Subtarget* Subtarget) {
20478 DebugLoc dl = MI->getDebugLoc();
20480 // Address into RAX/EAX, other two args into ECX, EDX.
20481 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20482 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20483 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20484 for (int i = 0; i < X86::AddrNumOperands; ++i)
20485 MIB.addOperand(MI->getOperand(i));
20487 unsigned ValOps = X86::AddrNumOperands;
20488 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20489 .addReg(MI->getOperand(ValOps).getReg());
20490 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20491 .addReg(MI->getOperand(ValOps+1).getReg());
20493 // The instruction doesn't actually take any operands though.
20494 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20496 MI->eraseFromParent(); // The pseudo is gone now.
20500 MachineBasicBlock *
20501 X86TargetLowering::EmitVAARG64WithCustomInserter(
20503 MachineBasicBlock *MBB) const {
20504 // Emit va_arg instruction on X86-64.
20506 // Operands to this pseudo-instruction:
20507 // 0 ) Output : destination address (reg)
20508 // 1-5) Input : va_list address (addr, i64mem)
20509 // 6 ) ArgSize : Size (in bytes) of vararg type
20510 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20511 // 8 ) Align : Alignment of type
20512 // 9 ) EFLAGS (implicit-def)
20514 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20515 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20517 unsigned DestReg = MI->getOperand(0).getReg();
20518 MachineOperand &Base = MI->getOperand(1);
20519 MachineOperand &Scale = MI->getOperand(2);
20520 MachineOperand &Index = MI->getOperand(3);
20521 MachineOperand &Disp = MI->getOperand(4);
20522 MachineOperand &Segment = MI->getOperand(5);
20523 unsigned ArgSize = MI->getOperand(6).getImm();
20524 unsigned ArgMode = MI->getOperand(7).getImm();
20525 unsigned Align = MI->getOperand(8).getImm();
20527 // Memory Reference
20528 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20529 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20530 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20532 // Machine Information
20533 const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
20534 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20535 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20536 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20537 DebugLoc DL = MI->getDebugLoc();
20539 // struct va_list {
20542 // i64 overflow_area (address)
20543 // i64 reg_save_area (address)
20545 // sizeof(va_list) = 24
20546 // alignment(va_list) = 8
20548 unsigned TotalNumIntRegs = 6;
20549 unsigned TotalNumXMMRegs = 8;
20550 bool UseGPOffset = (ArgMode == 1);
20551 bool UseFPOffset = (ArgMode == 2);
20552 unsigned MaxOffset = TotalNumIntRegs * 8 +
20553 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20555 /* Align ArgSize to a multiple of 8 */
20556 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20557 bool NeedsAlign = (Align > 8);
20559 MachineBasicBlock *thisMBB = MBB;
20560 MachineBasicBlock *overflowMBB;
20561 MachineBasicBlock *offsetMBB;
20562 MachineBasicBlock *endMBB;
20564 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20565 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20566 unsigned OffsetReg = 0;
20568 if (!UseGPOffset && !UseFPOffset) {
20569 // If we only pull from the overflow region, we don't create a branch.
20570 // We don't need to alter control flow.
20571 OffsetDestReg = 0; // unused
20572 OverflowDestReg = DestReg;
20574 offsetMBB = nullptr;
20575 overflowMBB = thisMBB;
20578 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20579 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20580 // If not, pull from overflow_area. (branch to overflowMBB)
20585 // offsetMBB overflowMBB
20590 // Registers for the PHI in endMBB
20591 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20592 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20594 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20595 MachineFunction *MF = MBB->getParent();
20596 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20597 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20598 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20600 MachineFunction::iterator MBBIter = MBB;
20603 // Insert the new basic blocks
20604 MF->insert(MBBIter, offsetMBB);
20605 MF->insert(MBBIter, overflowMBB);
20606 MF->insert(MBBIter, endMBB);
20608 // Transfer the remainder of MBB and its successor edges to endMBB.
20609 endMBB->splice(endMBB->begin(), thisMBB,
20610 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20611 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20613 // Make offsetMBB and overflowMBB successors of thisMBB
20614 thisMBB->addSuccessor(offsetMBB);
20615 thisMBB->addSuccessor(overflowMBB);
20617 // endMBB is a successor of both offsetMBB and overflowMBB
20618 offsetMBB->addSuccessor(endMBB);
20619 overflowMBB->addSuccessor(endMBB);
20621 // Load the offset value into a register
20622 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20623 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20627 .addDisp(Disp, UseFPOffset ? 4 : 0)
20628 .addOperand(Segment)
20629 .setMemRefs(MMOBegin, MMOEnd);
20631 // Check if there is enough room left to pull this argument.
20632 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20634 .addImm(MaxOffset + 8 - ArgSizeA8);
20636 // Branch to "overflowMBB" if offset >= max
20637 // Fall through to "offsetMBB" otherwise
20638 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20639 .addMBB(overflowMBB);
20642 // In offsetMBB, emit code to use the reg_save_area.
20644 assert(OffsetReg != 0);
20646 // Read the reg_save_area address.
20647 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20648 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20653 .addOperand(Segment)
20654 .setMemRefs(MMOBegin, MMOEnd);
20656 // Zero-extend the offset
20657 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20658 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20661 .addImm(X86::sub_32bit);
20663 // Add the offset to the reg_save_area to get the final address.
20664 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20665 .addReg(OffsetReg64)
20666 .addReg(RegSaveReg);
20668 // Compute the offset for the next argument
20669 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20670 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20672 .addImm(UseFPOffset ? 16 : 8);
20674 // Store it back into the va_list.
20675 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20679 .addDisp(Disp, UseFPOffset ? 4 : 0)
20680 .addOperand(Segment)
20681 .addReg(NextOffsetReg)
20682 .setMemRefs(MMOBegin, MMOEnd);
20685 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20690 // Emit code to use overflow area
20693 // Load the overflow_area address into a register.
20694 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20695 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20700 .addOperand(Segment)
20701 .setMemRefs(MMOBegin, MMOEnd);
20703 // If we need to align it, do so. Otherwise, just copy the address
20704 // to OverflowDestReg.
20706 // Align the overflow address
20707 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20708 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20710 // aligned_addr = (addr + (align-1)) & ~(align-1)
20711 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20712 .addReg(OverflowAddrReg)
20715 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20717 .addImm(~(uint64_t)(Align-1));
20719 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20720 .addReg(OverflowAddrReg);
20723 // Compute the next overflow address after this argument.
20724 // (the overflow address should be kept 8-byte aligned)
20725 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20726 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20727 .addReg(OverflowDestReg)
20728 .addImm(ArgSizeA8);
20730 // Store the new overflow address.
20731 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20736 .addOperand(Segment)
20737 .addReg(NextAddrReg)
20738 .setMemRefs(MMOBegin, MMOEnd);
20740 // If we branched, emit the PHI to the front of endMBB.
20742 BuildMI(*endMBB, endMBB->begin(), DL,
20743 TII->get(X86::PHI), DestReg)
20744 .addReg(OffsetDestReg).addMBB(offsetMBB)
20745 .addReg(OverflowDestReg).addMBB(overflowMBB);
20748 // Erase the pseudo instruction
20749 MI->eraseFromParent();
20754 MachineBasicBlock *
20755 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20757 MachineBasicBlock *MBB) const {
20758 // Emit code to save XMM registers to the stack. The ABI says that the
20759 // number of registers to save is given in %al, so it's theoretically
20760 // possible to do an indirect jump trick to avoid saving all of them,
20761 // however this code takes a simpler approach and just executes all
20762 // of the stores if %al is non-zero. It's less code, and it's probably
20763 // easier on the hardware branch predictor, and stores aren't all that
20764 // expensive anyway.
20766 // Create the new basic blocks. One block contains all the XMM stores,
20767 // and one block is the final destination regardless of whether any
20768 // stores were performed.
20769 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20770 MachineFunction *F = MBB->getParent();
20771 MachineFunction::iterator MBBIter = MBB;
20773 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
20774 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
20775 F->insert(MBBIter, XMMSaveMBB);
20776 F->insert(MBBIter, EndMBB);
20778 // Transfer the remainder of MBB and its successor edges to EndMBB.
20779 EndMBB->splice(EndMBB->begin(), MBB,
20780 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20781 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
20783 // The original block will now fall through to the XMM save block.
20784 MBB->addSuccessor(XMMSaveMBB);
20785 // The XMMSaveMBB will fall through to the end block.
20786 XMMSaveMBB->addSuccessor(EndMBB);
20788 // Now add the instructions.
20789 const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
20790 DebugLoc DL = MI->getDebugLoc();
20792 unsigned CountReg = MI->getOperand(0).getReg();
20793 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
20794 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
20796 if (!Subtarget->isTargetWin64()) {
20797 // If %al is 0, branch around the XMM save block.
20798 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
20799 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
20800 MBB->addSuccessor(EndMBB);
20803 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
20804 // that was just emitted, but clearly shouldn't be "saved".
20805 assert((MI->getNumOperands() <= 3 ||
20806 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
20807 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
20808 && "Expected last argument to be EFLAGS");
20809 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
20810 // In the XMM save block, save all the XMM argument registers.
20811 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
20812 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
20813 MachineMemOperand *MMO =
20814 F->getMachineMemOperand(
20815 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
20816 MachineMemOperand::MOStore,
20817 /*Size=*/16, /*Align=*/16);
20818 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
20819 .addFrameIndex(RegSaveFrameIndex)
20820 .addImm(/*Scale=*/1)
20821 .addReg(/*IndexReg=*/0)
20822 .addImm(/*Disp=*/Offset)
20823 .addReg(/*Segment=*/0)
20824 .addReg(MI->getOperand(i).getReg())
20825 .addMemOperand(MMO);
20828 MI->eraseFromParent(); // The pseudo instruction is gone now.
20833 // The EFLAGS operand of SelectItr might be missing a kill marker
20834 // because there were multiple uses of EFLAGS, and ISel didn't know
20835 // which to mark. Figure out whether SelectItr should have had a
20836 // kill marker, and set it if it should. Returns the correct kill
20838 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
20839 MachineBasicBlock* BB,
20840 const TargetRegisterInfo* TRI) {
20841 // Scan forward through BB for a use/def of EFLAGS.
20842 MachineBasicBlock::iterator miI(std::next(SelectItr));
20843 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
20844 const MachineInstr& mi = *miI;
20845 if (mi.readsRegister(X86::EFLAGS))
20847 if (mi.definesRegister(X86::EFLAGS))
20848 break; // Should have kill-flag - update below.
20851 // If we hit the end of the block, check whether EFLAGS is live into a
20853 if (miI == BB->end()) {
20854 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
20855 sEnd = BB->succ_end();
20856 sItr != sEnd; ++sItr) {
20857 MachineBasicBlock* succ = *sItr;
20858 if (succ->isLiveIn(X86::EFLAGS))
20863 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
20864 // out. SelectMI should have a kill flag on EFLAGS.
20865 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
20869 MachineBasicBlock *
20870 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
20871 MachineBasicBlock *BB) const {
20872 const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
20873 DebugLoc DL = MI->getDebugLoc();
20875 // To "insert" a SELECT_CC instruction, we actually have to insert the
20876 // diamond control-flow pattern. The incoming instruction knows the
20877 // destination vreg to set, the condition code register to branch on, the
20878 // true/false values to select between, and a branch opcode to use.
20879 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20880 MachineFunction::iterator It = BB;
20886 // cmpTY ccX, r1, r2
20888 // fallthrough --> copy0MBB
20889 MachineBasicBlock *thisMBB = BB;
20890 MachineFunction *F = BB->getParent();
20891 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
20892 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
20893 F->insert(It, copy0MBB);
20894 F->insert(It, sinkMBB);
20896 // If the EFLAGS register isn't dead in the terminator, then claim that it's
20897 // live into the sink and copy blocks.
20898 const TargetRegisterInfo *TRI =
20899 BB->getParent()->getSubtarget().getRegisterInfo();
20900 if (!MI->killsRegister(X86::EFLAGS) &&
20901 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
20902 copy0MBB->addLiveIn(X86::EFLAGS);
20903 sinkMBB->addLiveIn(X86::EFLAGS);
20906 // Transfer the remainder of BB and its successor edges to sinkMBB.
20907 sinkMBB->splice(sinkMBB->begin(), BB,
20908 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20909 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
20911 // Add the true and fallthrough blocks as its successors.
20912 BB->addSuccessor(copy0MBB);
20913 BB->addSuccessor(sinkMBB);
20915 // Create the conditional branch instruction.
20917 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
20918 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
20921 // %FalseValue = ...
20922 // # fallthrough to sinkMBB
20923 copy0MBB->addSuccessor(sinkMBB);
20926 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
20928 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20929 TII->get(X86::PHI), MI->getOperand(0).getReg())
20930 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
20931 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
20933 MI->eraseFromParent(); // The pseudo instruction is gone now.
20937 MachineBasicBlock *
20938 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
20939 MachineBasicBlock *BB) const {
20940 MachineFunction *MF = BB->getParent();
20941 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
20942 DebugLoc DL = MI->getDebugLoc();
20943 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20945 assert(MF->shouldSplitStack());
20947 const bool Is64Bit = Subtarget->is64Bit();
20948 const bool IsLP64 = Subtarget->isTarget64BitLP64();
20950 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
20951 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
20954 // ... [Till the alloca]
20955 // If stacklet is not large enough, jump to mallocMBB
20958 // Allocate by subtracting from RSP
20959 // Jump to continueMBB
20962 // Allocate by call to runtime
20966 // [rest of original BB]
20969 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20970 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20971 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20973 MachineRegisterInfo &MRI = MF->getRegInfo();
20974 const TargetRegisterClass *AddrRegClass =
20975 getRegClassFor(getPointerTy());
20977 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
20978 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
20979 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
20980 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
20981 sizeVReg = MI->getOperand(1).getReg(),
20982 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
20984 MachineFunction::iterator MBBIter = BB;
20987 MF->insert(MBBIter, bumpMBB);
20988 MF->insert(MBBIter, mallocMBB);
20989 MF->insert(MBBIter, continueMBB);
20991 continueMBB->splice(continueMBB->begin(), BB,
20992 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20993 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
20995 // Add code to the main basic block to check if the stack limit has been hit,
20996 // and if so, jump to mallocMBB otherwise to bumpMBB.
20997 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
20998 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
20999 .addReg(tmpSPVReg).addReg(sizeVReg);
21000 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21001 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21002 .addReg(SPLimitVReg);
21003 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21005 // bumpMBB simply decreases the stack pointer, since we know the current
21006 // stacklet has enough space.
21007 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21008 .addReg(SPLimitVReg);
21009 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21010 .addReg(SPLimitVReg);
21011 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21013 // Calls into a routine in libgcc to allocate more space from the heap.
21014 const uint32_t *RegMask = MF->getTarget()
21015 .getSubtargetImpl()
21016 ->getRegisterInfo()
21017 ->getCallPreservedMask(CallingConv::C);
21019 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21021 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21022 .addExternalSymbol("__morestack_allocate_stack_space")
21023 .addRegMask(RegMask)
21024 .addReg(X86::RDI, RegState::Implicit)
21025 .addReg(X86::RAX, RegState::ImplicitDefine);
21026 } else if (Is64Bit) {
21027 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21029 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21030 .addExternalSymbol("__morestack_allocate_stack_space")
21031 .addRegMask(RegMask)
21032 .addReg(X86::EDI, RegState::Implicit)
21033 .addReg(X86::EAX, RegState::ImplicitDefine);
21035 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21037 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21038 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21039 .addExternalSymbol("__morestack_allocate_stack_space")
21040 .addRegMask(RegMask)
21041 .addReg(X86::EAX, RegState::ImplicitDefine);
21045 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21048 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21049 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21050 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21052 // Set up the CFG correctly.
21053 BB->addSuccessor(bumpMBB);
21054 BB->addSuccessor(mallocMBB);
21055 mallocMBB->addSuccessor(continueMBB);
21056 bumpMBB->addSuccessor(continueMBB);
21058 // Take care of the PHI nodes.
21059 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21060 MI->getOperand(0).getReg())
21061 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21062 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21064 // Delete the original pseudo instruction.
21065 MI->eraseFromParent();
21068 return continueMBB;
21071 MachineBasicBlock *
21072 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21073 MachineBasicBlock *BB) const {
21074 const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
21075 DebugLoc DL = MI->getDebugLoc();
21077 assert(!Subtarget->isTargetMachO());
21079 // The lowering is pretty easy: we're just emitting the call to _alloca. The
21080 // non-trivial part is impdef of ESP.
21082 if (Subtarget->isTargetWin64()) {
21083 if (Subtarget->isTargetCygMing()) {
21084 // ___chkstk(Mingw64):
21085 // Clobbers R10, R11, RAX and EFLAGS.
21087 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
21088 .addExternalSymbol("___chkstk")
21089 .addReg(X86::RAX, RegState::Implicit)
21090 .addReg(X86::RSP, RegState::Implicit)
21091 .addReg(X86::RAX, RegState::Define | RegState::Implicit)
21092 .addReg(X86::RSP, RegState::Define | RegState::Implicit)
21093 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
21095 // __chkstk(MSVCRT): does not update stack pointer.
21096 // Clobbers R10, R11 and EFLAGS.
21097 BuildMI(*BB, MI, DL, TII->get(X86::W64ALLOCA))
21098 .addExternalSymbol("__chkstk")
21099 .addReg(X86::RAX, RegState::Implicit)
21100 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
21101 // RAX has the offset to be subtracted from RSP.
21102 BuildMI(*BB, MI, DL, TII->get(X86::SUB64rr), X86::RSP)
21107 const char *StackProbeSymbol = (Subtarget->isTargetKnownWindowsMSVC() ||
21108 Subtarget->isTargetWindowsItanium())
21112 BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
21113 .addExternalSymbol(StackProbeSymbol)
21114 .addReg(X86::EAX, RegState::Implicit)
21115 .addReg(X86::ESP, RegState::Implicit)
21116 .addReg(X86::EAX, RegState::Define | RegState::Implicit)
21117 .addReg(X86::ESP, RegState::Define | RegState::Implicit)
21118 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
21121 MI->eraseFromParent(); // The pseudo instruction is gone now.
21125 MachineBasicBlock *
21126 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21127 MachineBasicBlock *BB) const {
21128 // This is pretty easy. We're taking the value that we received from
21129 // our load from the relocation, sticking it in either RDI (x86-64)
21130 // or EAX and doing an indirect call. The return value will then
21131 // be in the normal return register.
21132 MachineFunction *F = BB->getParent();
21133 const X86InstrInfo *TII =
21134 static_cast<const X86InstrInfo *>(F->getSubtarget().getInstrInfo());
21135 DebugLoc DL = MI->getDebugLoc();
21137 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21138 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21140 // Get a register mask for the lowered call.
21141 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21142 // proper register mask.
21143 const uint32_t *RegMask = F->getTarget()
21144 .getSubtargetImpl()
21145 ->getRegisterInfo()
21146 ->getCallPreservedMask(CallingConv::C);
21147 if (Subtarget->is64Bit()) {
21148 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21149 TII->get(X86::MOV64rm), X86::RDI)
21151 .addImm(0).addReg(0)
21152 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21153 MI->getOperand(3).getTargetFlags())
21155 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21156 addDirectMem(MIB, X86::RDI);
21157 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21158 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21159 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21160 TII->get(X86::MOV32rm), X86::EAX)
21162 .addImm(0).addReg(0)
21163 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21164 MI->getOperand(3).getTargetFlags())
21166 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21167 addDirectMem(MIB, X86::EAX);
21168 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21170 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21171 TII->get(X86::MOV32rm), X86::EAX)
21172 .addReg(TII->getGlobalBaseReg(F))
21173 .addImm(0).addReg(0)
21174 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21175 MI->getOperand(3).getTargetFlags())
21177 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21178 addDirectMem(MIB, X86::EAX);
21179 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21182 MI->eraseFromParent(); // The pseudo instruction is gone now.
21186 MachineBasicBlock *
21187 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21188 MachineBasicBlock *MBB) const {
21189 DebugLoc DL = MI->getDebugLoc();
21190 MachineFunction *MF = MBB->getParent();
21191 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
21192 MachineRegisterInfo &MRI = MF->getRegInfo();
21194 const BasicBlock *BB = MBB->getBasicBlock();
21195 MachineFunction::iterator I = MBB;
21198 // Memory Reference
21199 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21200 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21203 unsigned MemOpndSlot = 0;
21205 unsigned CurOp = 0;
21207 DstReg = MI->getOperand(CurOp++).getReg();
21208 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21209 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21210 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21211 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21213 MemOpndSlot = CurOp;
21215 MVT PVT = getPointerTy();
21216 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21217 "Invalid Pointer Size!");
21219 // For v = setjmp(buf), we generate
21222 // buf[LabelOffset] = restoreMBB
21223 // SjLjSetup restoreMBB
21229 // v = phi(main, restore)
21232 // if base pointer being used, load it from frame
21235 MachineBasicBlock *thisMBB = MBB;
21236 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21237 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21238 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21239 MF->insert(I, mainMBB);
21240 MF->insert(I, sinkMBB);
21241 MF->push_back(restoreMBB);
21243 MachineInstrBuilder MIB;
21245 // Transfer the remainder of BB and its successor edges to sinkMBB.
21246 sinkMBB->splice(sinkMBB->begin(), MBB,
21247 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21248 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21251 unsigned PtrStoreOpc = 0;
21252 unsigned LabelReg = 0;
21253 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21254 Reloc::Model RM = MF->getTarget().getRelocationModel();
21255 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21256 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21258 // Prepare IP either in reg or imm.
21259 if (!UseImmLabel) {
21260 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21261 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21262 LabelReg = MRI.createVirtualRegister(PtrRC);
21263 if (Subtarget->is64Bit()) {
21264 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21268 .addMBB(restoreMBB)
21271 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21272 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21273 .addReg(XII->getGlobalBaseReg(MF))
21276 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21280 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21282 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21283 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21284 if (i == X86::AddrDisp)
21285 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21287 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21290 MIB.addReg(LabelReg);
21292 MIB.addMBB(restoreMBB);
21293 MIB.setMemRefs(MMOBegin, MMOEnd);
21295 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21296 .addMBB(restoreMBB);
21298 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
21299 MF->getSubtarget().getRegisterInfo());
21300 MIB.addRegMask(RegInfo->getNoPreservedMask());
21301 thisMBB->addSuccessor(mainMBB);
21302 thisMBB->addSuccessor(restoreMBB);
21306 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21307 mainMBB->addSuccessor(sinkMBB);
21310 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21311 TII->get(X86::PHI), DstReg)
21312 .addReg(mainDstReg).addMBB(mainMBB)
21313 .addReg(restoreDstReg).addMBB(restoreMBB);
21316 if (RegInfo->hasBasePointer(*MF)) {
21317 const X86Subtarget &STI = MF->getTarget().getSubtarget<X86Subtarget>();
21318 const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
21319 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21320 X86FI->setRestoreBasePointer(MF);
21321 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21322 unsigned BasePtr = RegInfo->getBaseRegister();
21323 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21324 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21325 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21326 .setMIFlag(MachineInstr::FrameSetup);
21328 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21329 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21330 restoreMBB->addSuccessor(sinkMBB);
21332 MI->eraseFromParent();
21336 MachineBasicBlock *
21337 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21338 MachineBasicBlock *MBB) const {
21339 DebugLoc DL = MI->getDebugLoc();
21340 MachineFunction *MF = MBB->getParent();
21341 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
21342 MachineRegisterInfo &MRI = MF->getRegInfo();
21344 // Memory Reference
21345 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21346 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21348 MVT PVT = getPointerTy();
21349 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21350 "Invalid Pointer Size!");
21352 const TargetRegisterClass *RC =
21353 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21354 unsigned Tmp = MRI.createVirtualRegister(RC);
21355 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21356 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
21357 MF->getSubtarget().getRegisterInfo());
21358 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21359 unsigned SP = RegInfo->getStackRegister();
21361 MachineInstrBuilder MIB;
21363 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21364 const int64_t SPOffset = 2 * PVT.getStoreSize();
21366 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21367 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21370 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21371 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21372 MIB.addOperand(MI->getOperand(i));
21373 MIB.setMemRefs(MMOBegin, MMOEnd);
21375 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21376 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21377 if (i == X86::AddrDisp)
21378 MIB.addDisp(MI->getOperand(i), LabelOffset);
21380 MIB.addOperand(MI->getOperand(i));
21382 MIB.setMemRefs(MMOBegin, MMOEnd);
21384 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21385 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21386 if (i == X86::AddrDisp)
21387 MIB.addDisp(MI->getOperand(i), SPOffset);
21389 MIB.addOperand(MI->getOperand(i));
21391 MIB.setMemRefs(MMOBegin, MMOEnd);
21393 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21395 MI->eraseFromParent();
21399 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21400 // accumulator loops. Writing back to the accumulator allows the coalescer
21401 // to remove extra copies in the loop.
21402 MachineBasicBlock *
21403 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21404 MachineBasicBlock *MBB) const {
21405 MachineOperand &AddendOp = MI->getOperand(3);
21407 // Bail out early if the addend isn't a register - we can't switch these.
21408 if (!AddendOp.isReg())
21411 MachineFunction &MF = *MBB->getParent();
21412 MachineRegisterInfo &MRI = MF.getRegInfo();
21414 // Check whether the addend is defined by a PHI:
21415 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21416 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21417 if (!AddendDef.isPHI())
21420 // Look for the following pattern:
21422 // %addend = phi [%entry, 0], [%loop, %result]
21424 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21428 // %addend = phi [%entry, 0], [%loop, %result]
21430 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21432 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21433 assert(AddendDef.getOperand(i).isReg());
21434 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21435 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21436 if (&PHISrcInst == MI) {
21437 // Found a matching instruction.
21438 unsigned NewFMAOpc = 0;
21439 switch (MI->getOpcode()) {
21440 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21441 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21442 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21443 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21444 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21445 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21446 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21447 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21448 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21449 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21450 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21451 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21452 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21453 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21454 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21455 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21456 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21457 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21458 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21459 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21461 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21462 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21463 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21464 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21465 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21466 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21467 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21468 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21469 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21470 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21471 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21472 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21473 default: llvm_unreachable("Unrecognized FMA variant.");
21476 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
21477 MachineInstrBuilder MIB =
21478 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21479 .addOperand(MI->getOperand(0))
21480 .addOperand(MI->getOperand(3))
21481 .addOperand(MI->getOperand(2))
21482 .addOperand(MI->getOperand(1));
21483 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21484 MI->eraseFromParent();
21491 MachineBasicBlock *
21492 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21493 MachineBasicBlock *BB) const {
21494 switch (MI->getOpcode()) {
21495 default: llvm_unreachable("Unexpected instr type to insert");
21496 case X86::TAILJMPd64:
21497 case X86::TAILJMPr64:
21498 case X86::TAILJMPm64:
21499 llvm_unreachable("TAILJMP64 would not be touched here.");
21500 case X86::TCRETURNdi64:
21501 case X86::TCRETURNri64:
21502 case X86::TCRETURNmi64:
21504 case X86::WIN_ALLOCA:
21505 return EmitLoweredWinAlloca(MI, BB);
21506 case X86::SEG_ALLOCA_32:
21507 case X86::SEG_ALLOCA_64:
21508 return EmitLoweredSegAlloca(MI, BB);
21509 case X86::TLSCall_32:
21510 case X86::TLSCall_64:
21511 return EmitLoweredTLSCall(MI, BB);
21512 case X86::CMOV_GR8:
21513 case X86::CMOV_FR32:
21514 case X86::CMOV_FR64:
21515 case X86::CMOV_V4F32:
21516 case X86::CMOV_V2F64:
21517 case X86::CMOV_V2I64:
21518 case X86::CMOV_V8F32:
21519 case X86::CMOV_V4F64:
21520 case X86::CMOV_V4I64:
21521 case X86::CMOV_V16F32:
21522 case X86::CMOV_V8F64:
21523 case X86::CMOV_V8I64:
21524 case X86::CMOV_GR16:
21525 case X86::CMOV_GR32:
21526 case X86::CMOV_RFP32:
21527 case X86::CMOV_RFP64:
21528 case X86::CMOV_RFP80:
21529 return EmitLoweredSelect(MI, BB);
21531 case X86::FP32_TO_INT16_IN_MEM:
21532 case X86::FP32_TO_INT32_IN_MEM:
21533 case X86::FP32_TO_INT64_IN_MEM:
21534 case X86::FP64_TO_INT16_IN_MEM:
21535 case X86::FP64_TO_INT32_IN_MEM:
21536 case X86::FP64_TO_INT64_IN_MEM:
21537 case X86::FP80_TO_INT16_IN_MEM:
21538 case X86::FP80_TO_INT32_IN_MEM:
21539 case X86::FP80_TO_INT64_IN_MEM: {
21540 MachineFunction *F = BB->getParent();
21541 const TargetInstrInfo *TII = F->getSubtarget().getInstrInfo();
21542 DebugLoc DL = MI->getDebugLoc();
21544 // Change the floating point control register to use "round towards zero"
21545 // mode when truncating to an integer value.
21546 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21547 addFrameReference(BuildMI(*BB, MI, DL,
21548 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21550 // Load the old value of the high byte of the control word...
21552 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21553 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21556 // Set the high part to be round to zero...
21557 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21560 // Reload the modified control word now...
21561 addFrameReference(BuildMI(*BB, MI, DL,
21562 TII->get(X86::FLDCW16m)), CWFrameIdx);
21564 // Restore the memory image of control word to original value
21565 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21568 // Get the X86 opcode to use.
21570 switch (MI->getOpcode()) {
21571 default: llvm_unreachable("illegal opcode!");
21572 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21573 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21574 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21575 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21576 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21577 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21578 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21579 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21580 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21584 MachineOperand &Op = MI->getOperand(0);
21586 AM.BaseType = X86AddressMode::RegBase;
21587 AM.Base.Reg = Op.getReg();
21589 AM.BaseType = X86AddressMode::FrameIndexBase;
21590 AM.Base.FrameIndex = Op.getIndex();
21592 Op = MI->getOperand(1);
21594 AM.Scale = Op.getImm();
21595 Op = MI->getOperand(2);
21597 AM.IndexReg = Op.getImm();
21598 Op = MI->getOperand(3);
21599 if (Op.isGlobal()) {
21600 AM.GV = Op.getGlobal();
21602 AM.Disp = Op.getImm();
21604 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21605 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21607 // Reload the original control word now.
21608 addFrameReference(BuildMI(*BB, MI, DL,
21609 TII->get(X86::FLDCW16m)), CWFrameIdx);
21611 MI->eraseFromParent(); // The pseudo instruction is gone now.
21614 // String/text processing lowering.
21615 case X86::PCMPISTRM128REG:
21616 case X86::VPCMPISTRM128REG:
21617 case X86::PCMPISTRM128MEM:
21618 case X86::VPCMPISTRM128MEM:
21619 case X86::PCMPESTRM128REG:
21620 case X86::VPCMPESTRM128REG:
21621 case X86::PCMPESTRM128MEM:
21622 case X86::VPCMPESTRM128MEM:
21623 assert(Subtarget->hasSSE42() &&
21624 "Target must have SSE4.2 or AVX features enabled");
21625 return EmitPCMPSTRM(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21627 // String/text processing lowering.
21628 case X86::PCMPISTRIREG:
21629 case X86::VPCMPISTRIREG:
21630 case X86::PCMPISTRIMEM:
21631 case X86::VPCMPISTRIMEM:
21632 case X86::PCMPESTRIREG:
21633 case X86::VPCMPESTRIREG:
21634 case X86::PCMPESTRIMEM:
21635 case X86::VPCMPESTRIMEM:
21636 assert(Subtarget->hasSSE42() &&
21637 "Target must have SSE4.2 or AVX features enabled");
21638 return EmitPCMPSTRI(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21640 // Thread synchronization.
21642 return EmitMonitor(MI, BB, BB->getParent()->getSubtarget().getInstrInfo(),
21647 return EmitXBegin(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21649 case X86::VASTART_SAVE_XMM_REGS:
21650 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21652 case X86::VAARG_64:
21653 return EmitVAARG64WithCustomInserter(MI, BB);
21655 case X86::EH_SjLj_SetJmp32:
21656 case X86::EH_SjLj_SetJmp64:
21657 return emitEHSjLjSetJmp(MI, BB);
21659 case X86::EH_SjLj_LongJmp32:
21660 case X86::EH_SjLj_LongJmp64:
21661 return emitEHSjLjLongJmp(MI, BB);
21663 case TargetOpcode::STATEPOINT:
21664 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21665 // this point in the process. We diverge later.
21666 return emitPatchPoint(MI, BB);
21668 case TargetOpcode::STACKMAP:
21669 case TargetOpcode::PATCHPOINT:
21670 return emitPatchPoint(MI, BB);
21672 case X86::VFMADDPDr213r:
21673 case X86::VFMADDPSr213r:
21674 case X86::VFMADDSDr213r:
21675 case X86::VFMADDSSr213r:
21676 case X86::VFMSUBPDr213r:
21677 case X86::VFMSUBPSr213r:
21678 case X86::VFMSUBSDr213r:
21679 case X86::VFMSUBSSr213r:
21680 case X86::VFNMADDPDr213r:
21681 case X86::VFNMADDPSr213r:
21682 case X86::VFNMADDSDr213r:
21683 case X86::VFNMADDSSr213r:
21684 case X86::VFNMSUBPDr213r:
21685 case X86::VFNMSUBPSr213r:
21686 case X86::VFNMSUBSDr213r:
21687 case X86::VFNMSUBSSr213r:
21688 case X86::VFMADDSUBPDr213r:
21689 case X86::VFMADDSUBPSr213r:
21690 case X86::VFMSUBADDPDr213r:
21691 case X86::VFMSUBADDPSr213r:
21692 case X86::VFMADDPDr213rY:
21693 case X86::VFMADDPSr213rY:
21694 case X86::VFMSUBPDr213rY:
21695 case X86::VFMSUBPSr213rY:
21696 case X86::VFNMADDPDr213rY:
21697 case X86::VFNMADDPSr213rY:
21698 case X86::VFNMSUBPDr213rY:
21699 case X86::VFNMSUBPSr213rY:
21700 case X86::VFMADDSUBPDr213rY:
21701 case X86::VFMADDSUBPSr213rY:
21702 case X86::VFMSUBADDPDr213rY:
21703 case X86::VFMSUBADDPSr213rY:
21704 return emitFMA3Instr(MI, BB);
21708 //===----------------------------------------------------------------------===//
21709 // X86 Optimization Hooks
21710 //===----------------------------------------------------------------------===//
21712 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21715 const SelectionDAG &DAG,
21716 unsigned Depth) const {
21717 unsigned BitWidth = KnownZero.getBitWidth();
21718 unsigned Opc = Op.getOpcode();
21719 assert((Opc >= ISD::BUILTIN_OP_END ||
21720 Opc == ISD::INTRINSIC_WO_CHAIN ||
21721 Opc == ISD::INTRINSIC_W_CHAIN ||
21722 Opc == ISD::INTRINSIC_VOID) &&
21723 "Should use MaskedValueIsZero if you don't know whether Op"
21724 " is a target node!");
21726 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21740 // These nodes' second result is a boolean.
21741 if (Op.getResNo() == 0)
21744 case X86ISD::SETCC:
21745 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21747 case ISD::INTRINSIC_WO_CHAIN: {
21748 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21749 unsigned NumLoBits = 0;
21752 case Intrinsic::x86_sse_movmsk_ps:
21753 case Intrinsic::x86_avx_movmsk_ps_256:
21754 case Intrinsic::x86_sse2_movmsk_pd:
21755 case Intrinsic::x86_avx_movmsk_pd_256:
21756 case Intrinsic::x86_mmx_pmovmskb:
21757 case Intrinsic::x86_sse2_pmovmskb_128:
21758 case Intrinsic::x86_avx2_pmovmskb: {
21759 // High bits of movmskp{s|d}, pmovmskb are known zero.
21761 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21762 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21763 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21764 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21765 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21766 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21767 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21768 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21770 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21779 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21781 const SelectionDAG &,
21782 unsigned Depth) const {
21783 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21784 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21785 return Op.getValueType().getScalarType().getSizeInBits();
21791 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21792 /// node is a GlobalAddress + offset.
21793 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21794 const GlobalValue* &GA,
21795 int64_t &Offset) const {
21796 if (N->getOpcode() == X86ISD::Wrapper) {
21797 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21798 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21799 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21803 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21806 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21807 /// same as extracting the high 128-bit part of 256-bit vector and then
21808 /// inserting the result into the low part of a new 256-bit vector
21809 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21810 EVT VT = SVOp->getValueType(0);
21811 unsigned NumElems = VT.getVectorNumElements();
21813 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21814 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
21815 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21816 SVOp->getMaskElt(j) >= 0)
21822 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
21823 /// same as extracting the low 128-bit part of 256-bit vector and then
21824 /// inserting the result into the high part of a new 256-bit vector
21825 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
21826 EVT VT = SVOp->getValueType(0);
21827 unsigned NumElems = VT.getVectorNumElements();
21829 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21830 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
21831 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21832 SVOp->getMaskElt(j) >= 0)
21838 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
21839 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
21840 TargetLowering::DAGCombinerInfo &DCI,
21841 const X86Subtarget* Subtarget) {
21843 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
21844 SDValue V1 = SVOp->getOperand(0);
21845 SDValue V2 = SVOp->getOperand(1);
21846 EVT VT = SVOp->getValueType(0);
21847 unsigned NumElems = VT.getVectorNumElements();
21849 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
21850 V2.getOpcode() == ISD::CONCAT_VECTORS) {
21854 // V UNDEF BUILD_VECTOR UNDEF
21856 // CONCAT_VECTOR CONCAT_VECTOR
21859 // RESULT: V + zero extended
21861 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
21862 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
21863 V1.getOperand(1).getOpcode() != ISD::UNDEF)
21866 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
21869 // To match the shuffle mask, the first half of the mask should
21870 // be exactly the first vector, and all the rest a splat with the
21871 // first element of the second one.
21872 for (unsigned i = 0; i != NumElems/2; ++i)
21873 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
21874 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
21877 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
21878 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
21879 if (Ld->hasNUsesOfValue(1, 0)) {
21880 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
21881 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
21883 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
21885 Ld->getPointerInfo(),
21886 Ld->getAlignment(),
21887 false/*isVolatile*/, true/*ReadMem*/,
21888 false/*WriteMem*/);
21890 // Make sure the newly-created LOAD is in the same position as Ld in
21891 // terms of dependency. We create a TokenFactor for Ld and ResNode,
21892 // and update uses of Ld's output chain to use the TokenFactor.
21893 if (Ld->hasAnyUseOfValue(1)) {
21894 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
21895 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
21896 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
21897 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
21898 SDValue(ResNode.getNode(), 1));
21901 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
21905 // Emit a zeroed vector and insert the desired subvector on its
21907 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
21908 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
21909 return DCI.CombineTo(N, InsV);
21912 //===--------------------------------------------------------------------===//
21913 // Combine some shuffles into subvector extracts and inserts:
21916 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21917 if (isShuffleHigh128VectorInsertLow(SVOp)) {
21918 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
21919 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
21920 return DCI.CombineTo(N, InsV);
21923 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21924 if (isShuffleLow128VectorInsertHigh(SVOp)) {
21925 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
21926 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
21927 return DCI.CombineTo(N, InsV);
21933 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
21936 /// This is the leaf of the recursive combinine below. When we have found some
21937 /// chain of single-use x86 shuffle instructions and accumulated the combined
21938 /// shuffle mask represented by them, this will try to pattern match that mask
21939 /// into either a single instruction if there is a special purpose instruction
21940 /// for this operation, or into a PSHUFB instruction which is a fully general
21941 /// instruction but should only be used to replace chains over a certain depth.
21942 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
21943 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
21944 TargetLowering::DAGCombinerInfo &DCI,
21945 const X86Subtarget *Subtarget) {
21946 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
21948 // Find the operand that enters the chain. Note that multiple uses are OK
21949 // here, we're not going to remove the operand we find.
21950 SDValue Input = Op.getOperand(0);
21951 while (Input.getOpcode() == ISD::BITCAST)
21952 Input = Input.getOperand(0);
21954 MVT VT = Input.getSimpleValueType();
21955 MVT RootVT = Root.getSimpleValueType();
21958 // Just remove no-op shuffle masks.
21959 if (Mask.size() == 1) {
21960 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
21965 // Use the float domain if the operand type is a floating point type.
21966 bool FloatDomain = VT.isFloatingPoint();
21968 // For floating point shuffles, we don't have free copies in the shuffle
21969 // instructions or the ability to load as part of the instruction, so
21970 // canonicalize their shuffles to UNPCK or MOV variants.
21972 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
21973 // vectors because it can have a load folded into it that UNPCK cannot. This
21974 // doesn't preclude something switching to the shorter encoding post-RA.
21976 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
21977 bool Lo = Mask.equals(0, 0);
21980 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
21981 // is no slower than UNPCKLPD but has the option to fold the input operand
21982 // into even an unaligned memory load.
21983 if (Lo && Subtarget->hasSSE3()) {
21984 Shuffle = X86ISD::MOVDDUP;
21985 ShuffleVT = MVT::v2f64;
21987 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
21988 // than the UNPCK variants.
21989 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
21990 ShuffleVT = MVT::v4f32;
21992 if (Depth == 1 && Root->getOpcode() == Shuffle)
21993 return false; // Nothing to do!
21994 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21995 DCI.AddToWorklist(Op.getNode());
21996 if (Shuffle == X86ISD::MOVDDUP)
21997 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
21999 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22000 DCI.AddToWorklist(Op.getNode());
22001 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22005 if (Subtarget->hasSSE3() &&
22006 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22007 bool Lo = Mask.equals(0, 0, 2, 2);
22008 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22009 MVT ShuffleVT = MVT::v4f32;
22010 if (Depth == 1 && Root->getOpcode() == Shuffle)
22011 return false; // Nothing to do!
22012 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22013 DCI.AddToWorklist(Op.getNode());
22014 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22015 DCI.AddToWorklist(Op.getNode());
22016 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22020 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22021 bool Lo = Mask.equals(0, 0, 1, 1);
22022 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22023 MVT ShuffleVT = MVT::v4f32;
22024 if (Depth == 1 && Root->getOpcode() == Shuffle)
22025 return false; // Nothing to do!
22026 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22027 DCI.AddToWorklist(Op.getNode());
22028 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22029 DCI.AddToWorklist(Op.getNode());
22030 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22036 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22037 // variants as none of these have single-instruction variants that are
22038 // superior to the UNPCK formulation.
22039 if (!FloatDomain &&
22040 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22041 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22042 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22043 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22045 bool Lo = Mask[0] == 0;
22046 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22047 if (Depth == 1 && Root->getOpcode() == Shuffle)
22048 return false; // Nothing to do!
22050 switch (Mask.size()) {
22052 ShuffleVT = MVT::v8i16;
22055 ShuffleVT = MVT::v16i8;
22058 llvm_unreachable("Impossible mask size!");
22060 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22061 DCI.AddToWorklist(Op.getNode());
22062 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22063 DCI.AddToWorklist(Op.getNode());
22064 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22069 // Don't try to re-form single instruction chains under any circumstances now
22070 // that we've done encoding canonicalization for them.
22074 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22075 // can replace them with a single PSHUFB instruction profitably. Intel's
22076 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22077 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22078 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22079 SmallVector<SDValue, 16> PSHUFBMask;
22080 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22081 int Ratio = 16 / Mask.size();
22082 for (unsigned i = 0; i < 16; ++i) {
22083 if (Mask[i / Ratio] == SM_SentinelUndef) {
22084 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22087 int M = Mask[i / Ratio] != SM_SentinelZero
22088 ? Ratio * Mask[i / Ratio] + i % Ratio
22090 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22092 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22093 DCI.AddToWorklist(Op.getNode());
22094 SDValue PSHUFBMaskOp =
22095 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22096 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22097 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22098 DCI.AddToWorklist(Op.getNode());
22099 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22104 // Failed to find any combines.
22108 /// \brief Fully generic combining of x86 shuffle instructions.
22110 /// This should be the last combine run over the x86 shuffle instructions. Once
22111 /// they have been fully optimized, this will recursively consider all chains
22112 /// of single-use shuffle instructions, build a generic model of the cumulative
22113 /// shuffle operation, and check for simpler instructions which implement this
22114 /// operation. We use this primarily for two purposes:
22116 /// 1) Collapse generic shuffles to specialized single instructions when
22117 /// equivalent. In most cases, this is just an encoding size win, but
22118 /// sometimes we will collapse multiple generic shuffles into a single
22119 /// special-purpose shuffle.
22120 /// 2) Look for sequences of shuffle instructions with 3 or more total
22121 /// instructions, and replace them with the slightly more expensive SSSE3
22122 /// PSHUFB instruction if available. We do this as the last combining step
22123 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22124 /// a suitable short sequence of other instructions. The PHUFB will either
22125 /// use a register or have to read from memory and so is slightly (but only
22126 /// slightly) more expensive than the other shuffle instructions.
22128 /// Because this is inherently a quadratic operation (for each shuffle in
22129 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22130 /// This should never be an issue in practice as the shuffle lowering doesn't
22131 /// produce sequences of more than 8 instructions.
22133 /// FIXME: We will currently miss some cases where the redundant shuffling
22134 /// would simplify under the threshold for PSHUFB formation because of
22135 /// combine-ordering. To fix this, we should do the redundant instruction
22136 /// combining in this recursive walk.
22137 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22138 ArrayRef<int> RootMask,
22139 int Depth, bool HasPSHUFB,
22141 TargetLowering::DAGCombinerInfo &DCI,
22142 const X86Subtarget *Subtarget) {
22143 // Bound the depth of our recursive combine because this is ultimately
22144 // quadratic in nature.
22148 // Directly rip through bitcasts to find the underlying operand.
22149 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22150 Op = Op.getOperand(0);
22152 MVT VT = Op.getSimpleValueType();
22153 if (!VT.isVector())
22154 return false; // Bail if we hit a non-vector.
22155 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22156 // version should be added.
22157 if (VT.getSizeInBits() != 128)
22160 assert(Root.getSimpleValueType().isVector() &&
22161 "Shuffles operate on vector types!");
22162 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22163 "Can only combine shuffles of the same vector register size.");
22165 if (!isTargetShuffle(Op.getOpcode()))
22167 SmallVector<int, 16> OpMask;
22169 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22170 // We only can combine unary shuffles which we can decode the mask for.
22171 if (!HaveMask || !IsUnary)
22174 assert(VT.getVectorNumElements() == OpMask.size() &&
22175 "Different mask size from vector size!");
22176 assert(((RootMask.size() > OpMask.size() &&
22177 RootMask.size() % OpMask.size() == 0) ||
22178 (OpMask.size() > RootMask.size() &&
22179 OpMask.size() % RootMask.size() == 0) ||
22180 OpMask.size() == RootMask.size()) &&
22181 "The smaller number of elements must divide the larger.");
22182 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22183 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22184 assert(((RootRatio == 1 && OpRatio == 1) ||
22185 (RootRatio == 1) != (OpRatio == 1)) &&
22186 "Must not have a ratio for both incoming and op masks!");
22188 SmallVector<int, 16> Mask;
22189 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22191 // Merge this shuffle operation's mask into our accumulated mask. Note that
22192 // this shuffle's mask will be the first applied to the input, followed by the
22193 // root mask to get us all the way to the root value arrangement. The reason
22194 // for this order is that we are recursing up the operation chain.
22195 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22196 int RootIdx = i / RootRatio;
22197 if (RootMask[RootIdx] < 0) {
22198 // This is a zero or undef lane, we're done.
22199 Mask.push_back(RootMask[RootIdx]);
22203 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22204 int OpIdx = RootMaskedIdx / OpRatio;
22205 if (OpMask[OpIdx] < 0) {
22206 // The incoming lanes are zero or undef, it doesn't matter which ones we
22208 Mask.push_back(OpMask[OpIdx]);
22212 // Ok, we have non-zero lanes, map them through.
22213 Mask.push_back(OpMask[OpIdx] * OpRatio +
22214 RootMaskedIdx % OpRatio);
22217 // See if we can recurse into the operand to combine more things.
22218 switch (Op.getOpcode()) {
22219 case X86ISD::PSHUFB:
22221 case X86ISD::PSHUFD:
22222 case X86ISD::PSHUFHW:
22223 case X86ISD::PSHUFLW:
22224 if (Op.getOperand(0).hasOneUse() &&
22225 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22226 HasPSHUFB, DAG, DCI, Subtarget))
22230 case X86ISD::UNPCKL:
22231 case X86ISD::UNPCKH:
22232 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22233 // We can't check for single use, we have to check that this shuffle is the only user.
22234 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22235 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22236 HasPSHUFB, DAG, DCI, Subtarget))
22241 // Minor canonicalization of the accumulated shuffle mask to make it easier
22242 // to match below. All this does is detect masks with squential pairs of
22243 // elements, and shrink them to the half-width mask. It does this in a loop
22244 // so it will reduce the size of the mask to the minimal width mask which
22245 // performs an equivalent shuffle.
22246 SmallVector<int, 16> WidenedMask;
22247 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22248 Mask = std::move(WidenedMask);
22249 WidenedMask.clear();
22252 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22256 /// \brief Get the PSHUF-style mask from PSHUF node.
22258 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22259 /// PSHUF-style masks that can be reused with such instructions.
22260 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22261 SmallVector<int, 4> Mask;
22263 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22267 switch (N.getOpcode()) {
22268 case X86ISD::PSHUFD:
22270 case X86ISD::PSHUFLW:
22273 case X86ISD::PSHUFHW:
22274 Mask.erase(Mask.begin(), Mask.begin() + 4);
22275 for (int &M : Mask)
22279 llvm_unreachable("No valid shuffle instruction found!");
22283 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22285 /// We walk up the chain and look for a combinable shuffle, skipping over
22286 /// shuffles that we could hoist this shuffle's transformation past without
22287 /// altering anything.
22289 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22291 TargetLowering::DAGCombinerInfo &DCI) {
22292 assert(N.getOpcode() == X86ISD::PSHUFD &&
22293 "Called with something other than an x86 128-bit half shuffle!");
22296 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22297 // of the shuffles in the chain so that we can form a fresh chain to replace
22299 SmallVector<SDValue, 8> Chain;
22300 SDValue V = N.getOperand(0);
22301 for (; V.hasOneUse(); V = V.getOperand(0)) {
22302 switch (V.getOpcode()) {
22304 return SDValue(); // Nothing combined!
22307 // Skip bitcasts as we always know the type for the target specific
22311 case X86ISD::PSHUFD:
22312 // Found another dword shuffle.
22315 case X86ISD::PSHUFLW:
22316 // Check that the low words (being shuffled) are the identity in the
22317 // dword shuffle, and the high words are self-contained.
22318 if (Mask[0] != 0 || Mask[1] != 1 ||
22319 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22322 Chain.push_back(V);
22325 case X86ISD::PSHUFHW:
22326 // Check that the high words (being shuffled) are the identity in the
22327 // dword shuffle, and the low words are self-contained.
22328 if (Mask[2] != 2 || Mask[3] != 3 ||
22329 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22332 Chain.push_back(V);
22335 case X86ISD::UNPCKL:
22336 case X86ISD::UNPCKH:
22337 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22338 // shuffle into a preceding word shuffle.
22339 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22342 // Search for a half-shuffle which we can combine with.
22343 unsigned CombineOp =
22344 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22345 if (V.getOperand(0) != V.getOperand(1) ||
22346 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22348 Chain.push_back(V);
22349 V = V.getOperand(0);
22351 switch (V.getOpcode()) {
22353 return SDValue(); // Nothing to combine.
22355 case X86ISD::PSHUFLW:
22356 case X86ISD::PSHUFHW:
22357 if (V.getOpcode() == CombineOp)
22360 Chain.push_back(V);
22364 V = V.getOperand(0);
22368 } while (V.hasOneUse());
22371 // Break out of the loop if we break out of the switch.
22375 if (!V.hasOneUse())
22376 // We fell out of the loop without finding a viable combining instruction.
22379 // Merge this node's mask and our incoming mask.
22380 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22381 for (int &M : Mask)
22383 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22384 getV4X86ShuffleImm8ForMask(Mask, DAG));
22386 // Rebuild the chain around this new shuffle.
22387 while (!Chain.empty()) {
22388 SDValue W = Chain.pop_back_val();
22390 if (V.getValueType() != W.getOperand(0).getValueType())
22391 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22393 switch (W.getOpcode()) {
22395 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22397 case X86ISD::UNPCKL:
22398 case X86ISD::UNPCKH:
22399 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22402 case X86ISD::PSHUFD:
22403 case X86ISD::PSHUFLW:
22404 case X86ISD::PSHUFHW:
22405 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22409 if (V.getValueType() != N.getValueType())
22410 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22412 // Return the new chain to replace N.
22416 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22418 /// We walk up the chain, skipping shuffles of the other half and looking
22419 /// through shuffles which switch halves trying to find a shuffle of the same
22420 /// pair of dwords.
22421 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22423 TargetLowering::DAGCombinerInfo &DCI) {
22425 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22426 "Called with something other than an x86 128-bit half shuffle!");
22428 unsigned CombineOpcode = N.getOpcode();
22430 // Walk up a single-use chain looking for a combinable shuffle.
22431 SDValue V = N.getOperand(0);
22432 for (; V.hasOneUse(); V = V.getOperand(0)) {
22433 switch (V.getOpcode()) {
22435 return false; // Nothing combined!
22438 // Skip bitcasts as we always know the type for the target specific
22442 case X86ISD::PSHUFLW:
22443 case X86ISD::PSHUFHW:
22444 if (V.getOpcode() == CombineOpcode)
22447 // Other-half shuffles are no-ops.
22450 // Break out of the loop if we break out of the switch.
22454 if (!V.hasOneUse())
22455 // We fell out of the loop without finding a viable combining instruction.
22458 // Combine away the bottom node as its shuffle will be accumulated into
22459 // a preceding shuffle.
22460 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22462 // Record the old value.
22465 // Merge this node's mask and our incoming mask (adjusted to account for all
22466 // the pshufd instructions encountered).
22467 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22468 for (int &M : Mask)
22470 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22471 getV4X86ShuffleImm8ForMask(Mask, DAG));
22473 // Check that the shuffles didn't cancel each other out. If not, we need to
22474 // combine to the new one.
22476 // Replace the combinable shuffle with the combined one, updating all users
22477 // so that we re-evaluate the chain here.
22478 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22483 /// \brief Try to combine x86 target specific shuffles.
22484 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22485 TargetLowering::DAGCombinerInfo &DCI,
22486 const X86Subtarget *Subtarget) {
22488 MVT VT = N.getSimpleValueType();
22489 SmallVector<int, 4> Mask;
22491 switch (N.getOpcode()) {
22492 case X86ISD::PSHUFD:
22493 case X86ISD::PSHUFLW:
22494 case X86ISD::PSHUFHW:
22495 Mask = getPSHUFShuffleMask(N);
22496 assert(Mask.size() == 4);
22502 // Nuke no-op shuffles that show up after combining.
22503 if (isNoopShuffleMask(Mask))
22504 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22506 // Look for simplifications involving one or two shuffle instructions.
22507 SDValue V = N.getOperand(0);
22508 switch (N.getOpcode()) {
22511 case X86ISD::PSHUFLW:
22512 case X86ISD::PSHUFHW:
22513 assert(VT == MVT::v8i16);
22516 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22517 return SDValue(); // We combined away this shuffle, so we're done.
22519 // See if this reduces to a PSHUFD which is no more expensive and can
22520 // combine with more operations. Note that it has to at least flip the
22521 // dwords as otherwise it would have been removed as a no-op.
22522 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22523 int DMask[] = {0, 1, 2, 3};
22524 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22525 DMask[DOffset + 0] = DOffset + 1;
22526 DMask[DOffset + 1] = DOffset + 0;
22527 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22528 DCI.AddToWorklist(V.getNode());
22529 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22530 getV4X86ShuffleImm8ForMask(DMask, DAG));
22531 DCI.AddToWorklist(V.getNode());
22532 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22535 // Look for shuffle patterns which can be implemented as a single unpack.
22536 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22537 // only works when we have a PSHUFD followed by two half-shuffles.
22538 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22539 (V.getOpcode() == X86ISD::PSHUFLW ||
22540 V.getOpcode() == X86ISD::PSHUFHW) &&
22541 V.getOpcode() != N.getOpcode() &&
22543 SDValue D = V.getOperand(0);
22544 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22545 D = D.getOperand(0);
22546 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22547 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22548 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22549 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22550 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22552 for (int i = 0; i < 4; ++i) {
22553 WordMask[i + NOffset] = Mask[i] + NOffset;
22554 WordMask[i + VOffset] = VMask[i] + VOffset;
22556 // Map the word mask through the DWord mask.
22558 for (int i = 0; i < 8; ++i)
22559 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22560 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22561 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22562 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22563 std::begin(UnpackLoMask)) ||
22564 std::equal(std::begin(MappedMask), std::end(MappedMask),
22565 std::begin(UnpackHiMask))) {
22566 // We can replace all three shuffles with an unpack.
22567 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22568 DCI.AddToWorklist(V.getNode());
22569 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22571 DL, MVT::v8i16, V, V);
22578 case X86ISD::PSHUFD:
22579 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22588 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22590 /// We combine this directly on the abstract vector shuffle nodes so it is
22591 /// easier to generically match. We also insert dummy vector shuffle nodes for
22592 /// the operands which explicitly discard the lanes which are unused by this
22593 /// operation to try to flow through the rest of the combiner the fact that
22594 /// they're unused.
22595 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22597 EVT VT = N->getValueType(0);
22599 // We only handle target-independent shuffles.
22600 // FIXME: It would be easy and harmless to use the target shuffle mask
22601 // extraction tool to support more.
22602 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22605 auto *SVN = cast<ShuffleVectorSDNode>(N);
22606 ArrayRef<int> Mask = SVN->getMask();
22607 SDValue V1 = N->getOperand(0);
22608 SDValue V2 = N->getOperand(1);
22610 // We require the first shuffle operand to be the SUB node, and the second to
22611 // be the ADD node.
22612 // FIXME: We should support the commuted patterns.
22613 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22616 // If there are other uses of these operations we can't fold them.
22617 if (!V1->hasOneUse() || !V2->hasOneUse())
22620 // Ensure that both operations have the same operands. Note that we can
22621 // commute the FADD operands.
22622 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22623 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22624 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22627 // We're looking for blends between FADD and FSUB nodes. We insist on these
22628 // nodes being lined up in a specific expected pattern.
22629 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22630 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22631 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22634 // Only specific types are legal at this point, assert so we notice if and
22635 // when these change.
22636 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22637 VT == MVT::v4f64) &&
22638 "Unknown vector type encountered!");
22640 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22643 /// PerformShuffleCombine - Performs several different shuffle combines.
22644 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22645 TargetLowering::DAGCombinerInfo &DCI,
22646 const X86Subtarget *Subtarget) {
22648 SDValue N0 = N->getOperand(0);
22649 SDValue N1 = N->getOperand(1);
22650 EVT VT = N->getValueType(0);
22652 // Don't create instructions with illegal types after legalize types has run.
22653 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22654 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22657 // If we have legalized the vector types, look for blends of FADD and FSUB
22658 // nodes that we can fuse into an ADDSUB node.
22659 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22660 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22663 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22664 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22665 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22666 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22668 // During Type Legalization, when promoting illegal vector types,
22669 // the backend might introduce new shuffle dag nodes and bitcasts.
22671 // This code performs the following transformation:
22672 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22673 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22675 // We do this only if both the bitcast and the BINOP dag nodes have
22676 // one use. Also, perform this transformation only if the new binary
22677 // operation is legal. This is to avoid introducing dag nodes that
22678 // potentially need to be further expanded (or custom lowered) into a
22679 // less optimal sequence of dag nodes.
22680 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22681 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22682 N0.getOpcode() == ISD::BITCAST) {
22683 SDValue BC0 = N0.getOperand(0);
22684 EVT SVT = BC0.getValueType();
22685 unsigned Opcode = BC0.getOpcode();
22686 unsigned NumElts = VT.getVectorNumElements();
22688 if (BC0.hasOneUse() && SVT.isVector() &&
22689 SVT.getVectorNumElements() * 2 == NumElts &&
22690 TLI.isOperationLegal(Opcode, VT)) {
22691 bool CanFold = false;
22703 unsigned SVTNumElts = SVT.getVectorNumElements();
22704 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22705 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22706 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22707 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22708 CanFold = SVOp->getMaskElt(i) < 0;
22711 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22712 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22713 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22714 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22719 // Only handle 128 wide vector from here on.
22720 if (!VT.is128BitVector())
22723 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22724 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22725 // consecutive, non-overlapping, and in the right order.
22726 SmallVector<SDValue, 16> Elts;
22727 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22728 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22730 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22734 if (isTargetShuffle(N->getOpcode())) {
22736 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22737 if (Shuffle.getNode())
22740 // Try recursively combining arbitrary sequences of x86 shuffle
22741 // instructions into higher-order shuffles. We do this after combining
22742 // specific PSHUF instruction sequences into their minimal form so that we
22743 // can evaluate how many specialized shuffle instructions are involved in
22744 // a particular chain.
22745 SmallVector<int, 1> NonceMask; // Just a placeholder.
22746 NonceMask.push_back(0);
22747 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22748 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22750 return SDValue(); // This routine will use CombineTo to replace N.
22756 /// PerformTruncateCombine - Converts truncate operation to
22757 /// a sequence of vector shuffle operations.
22758 /// It is possible when we truncate 256-bit vector to 128-bit vector
22759 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22760 TargetLowering::DAGCombinerInfo &DCI,
22761 const X86Subtarget *Subtarget) {
22765 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22766 /// specific shuffle of a load can be folded into a single element load.
22767 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22768 /// shuffles have been custom lowered so we need to handle those here.
22769 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22770 TargetLowering::DAGCombinerInfo &DCI) {
22771 if (DCI.isBeforeLegalizeOps())
22774 SDValue InVec = N->getOperand(0);
22775 SDValue EltNo = N->getOperand(1);
22777 if (!isa<ConstantSDNode>(EltNo))
22780 EVT OriginalVT = InVec.getValueType();
22782 if (InVec.getOpcode() == ISD::BITCAST) {
22783 // Don't duplicate a load with other uses.
22784 if (!InVec.hasOneUse())
22786 EVT BCVT = InVec.getOperand(0).getValueType();
22787 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22789 InVec = InVec.getOperand(0);
22792 EVT CurrentVT = InVec.getValueType();
22794 if (!isTargetShuffle(InVec.getOpcode()))
22797 // Don't duplicate a load with other uses.
22798 if (!InVec.hasOneUse())
22801 SmallVector<int, 16> ShuffleMask;
22803 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22804 ShuffleMask, UnaryShuffle))
22807 // Select the input vector, guarding against out of range extract vector.
22808 unsigned NumElems = CurrentVT.getVectorNumElements();
22809 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22810 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
22811 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
22812 : InVec.getOperand(1);
22814 // If inputs to shuffle are the same for both ops, then allow 2 uses
22815 unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
22817 if (LdNode.getOpcode() == ISD::BITCAST) {
22818 // Don't duplicate a load with other uses.
22819 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
22822 AllowedUses = 1; // only allow 1 load use if we have a bitcast
22823 LdNode = LdNode.getOperand(0);
22826 if (!ISD::isNormalLoad(LdNode.getNode()))
22829 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
22831 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
22834 EVT EltVT = N->getValueType(0);
22835 // If there's a bitcast before the shuffle, check if the load type and
22836 // alignment is valid.
22837 unsigned Align = LN0->getAlignment();
22838 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22839 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
22840 EltVT.getTypeForEVT(*DAG.getContext()));
22842 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
22845 // All checks match so transform back to vector_shuffle so that DAG combiner
22846 // can finish the job
22849 // Create shuffle node taking into account the case that its a unary shuffle
22850 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
22851 : InVec.getOperand(1);
22852 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
22853 InVec.getOperand(0), Shuffle,
22855 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
22856 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
22860 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
22861 /// generation and convert it from being a bunch of shuffles and extracts
22862 /// into a somewhat faster sequence. For i686, the best sequence is apparently
22863 /// storing the value and loading scalars back, while for x64 we should
22864 /// use 64-bit extracts and shifts.
22865 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
22866 TargetLowering::DAGCombinerInfo &DCI) {
22867 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
22868 if (NewOp.getNode())
22871 SDValue InputVector = N->getOperand(0);
22873 // Detect whether we are trying to convert from mmx to i32 and the bitcast
22874 // from mmx to v2i32 has a single usage.
22875 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
22876 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx &&
22877 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32)
22878 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22879 N->getValueType(0),
22880 InputVector.getNode()->getOperand(0));
22882 // Only operate on vectors of 4 elements, where the alternative shuffling
22883 // gets to be more expensive.
22884 if (InputVector.getValueType() != MVT::v4i32)
22887 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
22888 // single use which is a sign-extend or zero-extend, and all elements are
22890 SmallVector<SDNode *, 4> Uses;
22891 unsigned ExtractedElements = 0;
22892 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
22893 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
22894 if (UI.getUse().getResNo() != InputVector.getResNo())
22897 SDNode *Extract = *UI;
22898 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22901 if (Extract->getValueType(0) != MVT::i32)
22903 if (!Extract->hasOneUse())
22905 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
22906 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
22908 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
22911 // Record which element was extracted.
22912 ExtractedElements |=
22913 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
22915 Uses.push_back(Extract);
22918 // If not all the elements were used, this may not be worthwhile.
22919 if (ExtractedElements != 15)
22922 // Ok, we've now decided to do the transformation.
22923 // If 64-bit shifts are legal, use the extract-shift sequence,
22924 // otherwise bounce the vector off the cache.
22925 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22927 SDLoc dl(InputVector);
22929 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
22930 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
22931 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
22932 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22933 DAG.getConstant(0, VecIdxTy));
22934 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22935 DAG.getConstant(1, VecIdxTy));
22937 SDValue ShAmt = DAG.getConstant(32,
22938 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
22939 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
22940 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22941 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
22942 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
22943 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22944 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
22946 // Store the value to a temporary stack slot.
22947 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
22948 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
22949 MachinePointerInfo(), false, false, 0);
22951 EVT ElementType = InputVector.getValueType().getVectorElementType();
22952 unsigned EltSize = ElementType.getSizeInBits() / 8;
22954 // Replace each use (extract) with a load of the appropriate element.
22955 for (unsigned i = 0; i < 4; ++i) {
22956 uint64_t Offset = EltSize * i;
22957 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
22959 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
22960 StackPtr, OffsetVal);
22962 // Load the scalar.
22963 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
22964 ScalarAddr, MachinePointerInfo(),
22965 false, false, false, 0);
22970 // Replace the extracts
22971 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
22972 UE = Uses.end(); UI != UE; ++UI) {
22973 SDNode *Extract = *UI;
22975 SDValue Idx = Extract->getOperand(1);
22976 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
22977 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
22980 // The replacement was made in place; don't return anything.
22984 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
22985 static std::pair<unsigned, bool>
22986 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
22987 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
22988 if (!VT.isVector())
22989 return std::make_pair(0, false);
22991 bool NeedSplit = false;
22992 switch (VT.getSimpleVT().SimpleTy) {
22993 default: return std::make_pair(0, false);
22996 if (!Subtarget->hasVLX())
22997 return std::make_pair(0, false);
23001 if (!Subtarget->hasBWI())
23002 return std::make_pair(0, false);
23006 if (!Subtarget->hasAVX512())
23007 return std::make_pair(0, false);
23012 if (!Subtarget->hasAVX2())
23014 if (!Subtarget->hasAVX())
23015 return std::make_pair(0, false);
23020 if (!Subtarget->hasSSE2())
23021 return std::make_pair(0, false);
23024 // SSE2 has only a small subset of the operations.
23025 bool hasUnsigned = Subtarget->hasSSE41() ||
23026 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23027 bool hasSigned = Subtarget->hasSSE41() ||
23028 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23030 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23033 // Check for x CC y ? x : y.
23034 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23035 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23040 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23043 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23046 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23049 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23051 // Check for x CC y ? y : x -- a min/max with reversed arms.
23052 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23053 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23058 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23061 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23064 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23067 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23071 return std::make_pair(Opc, NeedSplit);
23075 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23076 const X86Subtarget *Subtarget) {
23078 SDValue Cond = N->getOperand(0);
23079 SDValue LHS = N->getOperand(1);
23080 SDValue RHS = N->getOperand(2);
23082 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23083 SDValue CondSrc = Cond->getOperand(0);
23084 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23085 Cond = CondSrc->getOperand(0);
23088 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23091 // A vselect where all conditions and data are constants can be optimized into
23092 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23093 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23094 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23097 unsigned MaskValue = 0;
23098 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23101 MVT VT = N->getSimpleValueType(0);
23102 unsigned NumElems = VT.getVectorNumElements();
23103 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23104 for (unsigned i = 0; i < NumElems; ++i) {
23105 // Be sure we emit undef where we can.
23106 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23107 ShuffleMask[i] = -1;
23109 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23112 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23113 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23115 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23118 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23120 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23121 TargetLowering::DAGCombinerInfo &DCI,
23122 const X86Subtarget *Subtarget) {
23124 SDValue Cond = N->getOperand(0);
23125 // Get the LHS/RHS of the select.
23126 SDValue LHS = N->getOperand(1);
23127 SDValue RHS = N->getOperand(2);
23128 EVT VT = LHS.getValueType();
23129 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23131 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23132 // instructions match the semantics of the common C idiom x<y?x:y but not
23133 // x<=y?x:y, because of how they handle negative zero (which can be
23134 // ignored in unsafe-math mode).
23135 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23136 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23137 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23138 (Subtarget->hasSSE2() ||
23139 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23140 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23142 unsigned Opcode = 0;
23143 // Check for x CC y ? x : y.
23144 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23145 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23149 // Converting this to a min would handle NaNs incorrectly, and swapping
23150 // the operands would cause it to handle comparisons between positive
23151 // and negative zero incorrectly.
23152 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23153 if (!DAG.getTarget().Options.UnsafeFPMath &&
23154 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23156 std::swap(LHS, RHS);
23158 Opcode = X86ISD::FMIN;
23161 // Converting this to a min would handle comparisons between positive
23162 // and negative zero incorrectly.
23163 if (!DAG.getTarget().Options.UnsafeFPMath &&
23164 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23166 Opcode = X86ISD::FMIN;
23169 // Converting this to a min would handle both negative zeros and NaNs
23170 // incorrectly, but we can swap the operands to fix both.
23171 std::swap(LHS, RHS);
23175 Opcode = X86ISD::FMIN;
23179 // Converting this to a max would handle comparisons between positive
23180 // and negative zero incorrectly.
23181 if (!DAG.getTarget().Options.UnsafeFPMath &&
23182 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23184 Opcode = X86ISD::FMAX;
23187 // Converting this to a max would handle NaNs incorrectly, and swapping
23188 // the operands would cause it to handle comparisons between positive
23189 // and negative zero incorrectly.
23190 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23191 if (!DAG.getTarget().Options.UnsafeFPMath &&
23192 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23194 std::swap(LHS, RHS);
23196 Opcode = X86ISD::FMAX;
23199 // Converting this to a max would handle both negative zeros and NaNs
23200 // incorrectly, but we can swap the operands to fix both.
23201 std::swap(LHS, RHS);
23205 Opcode = X86ISD::FMAX;
23208 // Check for x CC y ? y : x -- a min/max with reversed arms.
23209 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23210 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23214 // Converting this to a min would handle comparisons between positive
23215 // and negative zero incorrectly, and swapping the operands would
23216 // cause it to handle NaNs incorrectly.
23217 if (!DAG.getTarget().Options.UnsafeFPMath &&
23218 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23219 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23221 std::swap(LHS, RHS);
23223 Opcode = X86ISD::FMIN;
23226 // Converting this to a min would handle NaNs incorrectly.
23227 if (!DAG.getTarget().Options.UnsafeFPMath &&
23228 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23230 Opcode = X86ISD::FMIN;
23233 // Converting this to a min would handle both negative zeros and NaNs
23234 // incorrectly, but we can swap the operands to fix both.
23235 std::swap(LHS, RHS);
23239 Opcode = X86ISD::FMIN;
23243 // Converting this to a max would handle NaNs incorrectly.
23244 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23246 Opcode = X86ISD::FMAX;
23249 // Converting this to a max would handle comparisons between positive
23250 // and negative zero incorrectly, and swapping the operands would
23251 // cause it to handle NaNs incorrectly.
23252 if (!DAG.getTarget().Options.UnsafeFPMath &&
23253 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23254 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23256 std::swap(LHS, RHS);
23258 Opcode = X86ISD::FMAX;
23261 // Converting this to a max would handle both negative zeros and NaNs
23262 // incorrectly, but we can swap the operands to fix both.
23263 std::swap(LHS, RHS);
23267 Opcode = X86ISD::FMAX;
23273 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23276 EVT CondVT = Cond.getValueType();
23277 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23278 CondVT.getVectorElementType() == MVT::i1) {
23279 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23280 // lowering on KNL. In this case we convert it to
23281 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23282 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23283 // Since SKX these selects have a proper lowering.
23284 EVT OpVT = LHS.getValueType();
23285 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23286 (OpVT.getVectorElementType() == MVT::i8 ||
23287 OpVT.getVectorElementType() == MVT::i16) &&
23288 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23289 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23290 DCI.AddToWorklist(Cond.getNode());
23291 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23294 // If this is a select between two integer constants, try to do some
23296 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23297 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23298 // Don't do this for crazy integer types.
23299 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23300 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23301 // so that TrueC (the true value) is larger than FalseC.
23302 bool NeedsCondInvert = false;
23304 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23305 // Efficiently invertible.
23306 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23307 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23308 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23309 NeedsCondInvert = true;
23310 std::swap(TrueC, FalseC);
23313 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23314 if (FalseC->getAPIntValue() == 0 &&
23315 TrueC->getAPIntValue().isPowerOf2()) {
23316 if (NeedsCondInvert) // Invert the condition if needed.
23317 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23318 DAG.getConstant(1, Cond.getValueType()));
23320 // Zero extend the condition if needed.
23321 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23323 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23324 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23325 DAG.getConstant(ShAmt, MVT::i8));
23328 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23329 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23330 if (NeedsCondInvert) // Invert the condition if needed.
23331 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23332 DAG.getConstant(1, Cond.getValueType()));
23334 // Zero extend the condition if needed.
23335 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23336 FalseC->getValueType(0), Cond);
23337 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23338 SDValue(FalseC, 0));
23341 // Optimize cases that will turn into an LEA instruction. This requires
23342 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23343 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23344 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23345 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23347 bool isFastMultiplier = false;
23349 switch ((unsigned char)Diff) {
23351 case 1: // result = add base, cond
23352 case 2: // result = lea base( , cond*2)
23353 case 3: // result = lea base(cond, cond*2)
23354 case 4: // result = lea base( , cond*4)
23355 case 5: // result = lea base(cond, cond*4)
23356 case 8: // result = lea base( , cond*8)
23357 case 9: // result = lea base(cond, cond*8)
23358 isFastMultiplier = true;
23363 if (isFastMultiplier) {
23364 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23365 if (NeedsCondInvert) // Invert the condition if needed.
23366 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23367 DAG.getConstant(1, Cond.getValueType()));
23369 // Zero extend the condition if needed.
23370 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23372 // Scale the condition by the difference.
23374 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23375 DAG.getConstant(Diff, Cond.getValueType()));
23377 // Add the base if non-zero.
23378 if (FalseC->getAPIntValue() != 0)
23379 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23380 SDValue(FalseC, 0));
23387 // Canonicalize max and min:
23388 // (x > y) ? x : y -> (x >= y) ? x : y
23389 // (x < y) ? x : y -> (x <= y) ? x : y
23390 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23391 // the need for an extra compare
23392 // against zero. e.g.
23393 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23395 // testl %edi, %edi
23397 // cmovgl %edi, %eax
23401 // cmovsl %eax, %edi
23402 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23403 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23404 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23405 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23410 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23411 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23412 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23413 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23418 // Early exit check
23419 if (!TLI.isTypeLegal(VT))
23422 // Match VSELECTs into subs with unsigned saturation.
23423 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23424 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23425 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23426 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23427 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23429 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23430 // left side invert the predicate to simplify logic below.
23432 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23434 CC = ISD::getSetCCInverse(CC, true);
23435 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23439 if (Other.getNode() && Other->getNumOperands() == 2 &&
23440 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23441 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23442 SDValue CondRHS = Cond->getOperand(1);
23444 // Look for a general sub with unsigned saturation first.
23445 // x >= y ? x-y : 0 --> subus x, y
23446 // x > y ? x-y : 0 --> subus x, y
23447 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23448 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23449 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23451 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23452 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23453 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23454 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23455 // If the RHS is a constant we have to reverse the const
23456 // canonicalization.
23457 // x > C-1 ? x+-C : 0 --> subus x, C
23458 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23459 CondRHSConst->getAPIntValue() ==
23460 (-OpRHSConst->getAPIntValue() - 1))
23461 return DAG.getNode(
23462 X86ISD::SUBUS, DL, VT, OpLHS,
23463 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23465 // Another special case: If C was a sign bit, the sub has been
23466 // canonicalized into a xor.
23467 // FIXME: Would it be better to use computeKnownBits to determine
23468 // whether it's safe to decanonicalize the xor?
23469 // x s< 0 ? x^C : 0 --> subus x, C
23470 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23471 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23472 OpRHSConst->getAPIntValue().isSignBit())
23473 // Note that we have to rebuild the RHS constant here to ensure we
23474 // don't rely on particular values of undef lanes.
23475 return DAG.getNode(
23476 X86ISD::SUBUS, DL, VT, OpLHS,
23477 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23482 // Try to match a min/max vector operation.
23483 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23484 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23485 unsigned Opc = ret.first;
23486 bool NeedSplit = ret.second;
23488 if (Opc && NeedSplit) {
23489 unsigned NumElems = VT.getVectorNumElements();
23490 // Extract the LHS vectors
23491 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23492 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23494 // Extract the RHS vectors
23495 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23496 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23498 // Create min/max for each subvector
23499 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23500 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23502 // Merge the result
23503 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23505 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23508 // Simplify vector selection if condition value type matches vselect
23510 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23511 assert(Cond.getValueType().isVector() &&
23512 "vector select expects a vector selector!");
23514 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23515 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23517 // Try invert the condition if true value is not all 1s and false value
23519 if (!TValIsAllOnes && !FValIsAllZeros &&
23520 // Check if the selector will be produced by CMPP*/PCMP*
23521 Cond.getOpcode() == ISD::SETCC &&
23522 // Check if SETCC has already been promoted
23523 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23524 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23525 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23527 if (TValIsAllZeros || FValIsAllOnes) {
23528 SDValue CC = Cond.getOperand(2);
23529 ISD::CondCode NewCC =
23530 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23531 Cond.getOperand(0).getValueType().isInteger());
23532 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23533 std::swap(LHS, RHS);
23534 TValIsAllOnes = FValIsAllOnes;
23535 FValIsAllZeros = TValIsAllZeros;
23539 if (TValIsAllOnes || FValIsAllZeros) {
23542 if (TValIsAllOnes && FValIsAllZeros)
23544 else if (TValIsAllOnes)
23545 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23546 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23547 else if (FValIsAllZeros)
23548 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23549 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23551 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23555 // If we know that this node is legal then we know that it is going to be
23556 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23557 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23558 // to simplify previous instructions.
23559 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23560 !DCI.isBeforeLegalize() &&
23561 // We explicitly check against v8i16 and v16i16 because, although
23562 // they're marked as Custom, they might only be legal when Cond is a
23563 // build_vector of constants. This will be taken care in a later
23565 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23566 VT != MVT::v8i16) &&
23567 // Don't optimize vector of constants. Those are handled by
23568 // the generic code and all the bits must be properly set for
23569 // the generic optimizer.
23570 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23571 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23573 // Don't optimize vector selects that map to mask-registers.
23577 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23578 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23580 APInt KnownZero, KnownOne;
23581 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23582 DCI.isBeforeLegalizeOps());
23583 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23584 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23586 // If we changed the computation somewhere in the DAG, this change
23587 // will affect all users of Cond.
23588 // Make sure it is fine and update all the nodes so that we do not
23589 // use the generic VSELECT anymore. Otherwise, we may perform
23590 // wrong optimizations as we messed up with the actual expectation
23591 // for the vector boolean values.
23592 if (Cond != TLO.Old) {
23593 // Check all uses of that condition operand to check whether it will be
23594 // consumed by non-BLEND instructions, which may depend on all bits are
23596 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23598 if (I->getOpcode() != ISD::VSELECT)
23599 // TODO: Add other opcodes eventually lowered into BLEND.
23602 // Update all the users of the condition, before committing the change,
23603 // so that the VSELECT optimizations that expect the correct vector
23604 // boolean value will not be triggered.
23605 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23607 DAG.ReplaceAllUsesOfValueWith(
23609 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23610 Cond, I->getOperand(1), I->getOperand(2)));
23611 DCI.CommitTargetLoweringOpt(TLO);
23614 // At this point, only Cond is changed. Change the condition
23615 // just for N to keep the opportunity to optimize all other
23616 // users their own way.
23617 DAG.ReplaceAllUsesOfValueWith(
23619 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23620 TLO.New, N->getOperand(1), N->getOperand(2)));
23625 // We should generate an X86ISD::BLENDI from a vselect if its argument
23626 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23627 // constants. This specific pattern gets generated when we split a
23628 // selector for a 512 bit vector in a machine without AVX512 (but with
23629 // 256-bit vectors), during legalization:
23631 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23633 // Iff we find this pattern and the build_vectors are built from
23634 // constants, we translate the vselect into a shuffle_vector that we
23635 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23636 if ((N->getOpcode() == ISD::VSELECT ||
23637 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23638 !DCI.isBeforeLegalize()) {
23639 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23640 if (Shuffle.getNode())
23647 // Check whether a boolean test is testing a boolean value generated by
23648 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23651 // Simplify the following patterns:
23652 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23653 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23654 // to (Op EFLAGS Cond)
23656 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23657 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23658 // to (Op EFLAGS !Cond)
23660 // where Op could be BRCOND or CMOV.
23662 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23663 // Quit if not CMP and SUB with its value result used.
23664 if (Cmp.getOpcode() != X86ISD::CMP &&
23665 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23668 // Quit if not used as a boolean value.
23669 if (CC != X86::COND_E && CC != X86::COND_NE)
23672 // Check CMP operands. One of them should be 0 or 1 and the other should be
23673 // an SetCC or extended from it.
23674 SDValue Op1 = Cmp.getOperand(0);
23675 SDValue Op2 = Cmp.getOperand(1);
23678 const ConstantSDNode* C = nullptr;
23679 bool needOppositeCond = (CC == X86::COND_E);
23680 bool checkAgainstTrue = false; // Is it a comparison against 1?
23682 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23684 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23686 else // Quit if all operands are not constants.
23689 if (C->getZExtValue() == 1) {
23690 needOppositeCond = !needOppositeCond;
23691 checkAgainstTrue = true;
23692 } else if (C->getZExtValue() != 0)
23693 // Quit if the constant is neither 0 or 1.
23696 bool truncatedToBoolWithAnd = false;
23697 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23698 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23699 SetCC.getOpcode() == ISD::TRUNCATE ||
23700 SetCC.getOpcode() == ISD::AND) {
23701 if (SetCC.getOpcode() == ISD::AND) {
23703 ConstantSDNode *CS;
23704 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23705 CS->getZExtValue() == 1)
23707 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23708 CS->getZExtValue() == 1)
23712 SetCC = SetCC.getOperand(OpIdx);
23713 truncatedToBoolWithAnd = true;
23715 SetCC = SetCC.getOperand(0);
23718 switch (SetCC.getOpcode()) {
23719 case X86ISD::SETCC_CARRY:
23720 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23721 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23722 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23723 // truncated to i1 using 'and'.
23724 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23726 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23727 "Invalid use of SETCC_CARRY!");
23729 case X86ISD::SETCC:
23730 // Set the condition code or opposite one if necessary.
23731 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23732 if (needOppositeCond)
23733 CC = X86::GetOppositeBranchCondition(CC);
23734 return SetCC.getOperand(1);
23735 case X86ISD::CMOV: {
23736 // Check whether false/true value has canonical one, i.e. 0 or 1.
23737 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23738 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23739 // Quit if true value is not a constant.
23742 // Quit if false value is not a constant.
23744 SDValue Op = SetCC.getOperand(0);
23745 // Skip 'zext' or 'trunc' node.
23746 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23747 Op.getOpcode() == ISD::TRUNCATE)
23748 Op = Op.getOperand(0);
23749 // A special case for rdrand/rdseed, where 0 is set if false cond is
23751 if ((Op.getOpcode() != X86ISD::RDRAND &&
23752 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23755 // Quit if false value is not the constant 0 or 1.
23756 bool FValIsFalse = true;
23757 if (FVal && FVal->getZExtValue() != 0) {
23758 if (FVal->getZExtValue() != 1)
23760 // If FVal is 1, opposite cond is needed.
23761 needOppositeCond = !needOppositeCond;
23762 FValIsFalse = false;
23764 // Quit if TVal is not the constant opposite of FVal.
23765 if (FValIsFalse && TVal->getZExtValue() != 1)
23767 if (!FValIsFalse && TVal->getZExtValue() != 0)
23769 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23770 if (needOppositeCond)
23771 CC = X86::GetOppositeBranchCondition(CC);
23772 return SetCC.getOperand(3);
23779 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
23780 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
23781 TargetLowering::DAGCombinerInfo &DCI,
23782 const X86Subtarget *Subtarget) {
23785 // If the flag operand isn't dead, don't touch this CMOV.
23786 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
23789 SDValue FalseOp = N->getOperand(0);
23790 SDValue TrueOp = N->getOperand(1);
23791 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
23792 SDValue Cond = N->getOperand(3);
23794 if (CC == X86::COND_E || CC == X86::COND_NE) {
23795 switch (Cond.getOpcode()) {
23799 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
23800 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
23801 return (CC == X86::COND_E) ? FalseOp : TrueOp;
23807 Flags = checkBoolTestSetCCCombine(Cond, CC);
23808 if (Flags.getNode() &&
23809 // Extra check as FCMOV only supports a subset of X86 cond.
23810 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
23811 SDValue Ops[] = { FalseOp, TrueOp,
23812 DAG.getConstant(CC, MVT::i8), Flags };
23813 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
23816 // If this is a select between two integer constants, try to do some
23817 // optimizations. Note that the operands are ordered the opposite of SELECT
23819 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
23820 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
23821 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
23822 // larger than FalseC (the false value).
23823 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
23824 CC = X86::GetOppositeBranchCondition(CC);
23825 std::swap(TrueC, FalseC);
23826 std::swap(TrueOp, FalseOp);
23829 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
23830 // This is efficient for any integer data type (including i8/i16) and
23832 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
23833 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23834 DAG.getConstant(CC, MVT::i8), Cond);
23836 // Zero extend the condition if needed.
23837 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
23839 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23840 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
23841 DAG.getConstant(ShAmt, MVT::i8));
23842 if (N->getNumValues() == 2) // Dead flag value?
23843 return DCI.CombineTo(N, Cond, SDValue());
23847 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
23848 // for any integer data type, including i8/i16.
23849 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23850 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23851 DAG.getConstant(CC, MVT::i8), Cond);
23853 // Zero extend the condition if needed.
23854 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23855 FalseC->getValueType(0), Cond);
23856 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23857 SDValue(FalseC, 0));
23859 if (N->getNumValues() == 2) // Dead flag value?
23860 return DCI.CombineTo(N, Cond, SDValue());
23864 // Optimize cases that will turn into an LEA instruction. This requires
23865 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23866 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23867 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23868 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23870 bool isFastMultiplier = false;
23872 switch ((unsigned char)Diff) {
23874 case 1: // result = add base, cond
23875 case 2: // result = lea base( , cond*2)
23876 case 3: // result = lea base(cond, cond*2)
23877 case 4: // result = lea base( , cond*4)
23878 case 5: // result = lea base(cond, cond*4)
23879 case 8: // result = lea base( , cond*8)
23880 case 9: // result = lea base(cond, cond*8)
23881 isFastMultiplier = true;
23886 if (isFastMultiplier) {
23887 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23888 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23889 DAG.getConstant(CC, MVT::i8), Cond);
23890 // Zero extend the condition if needed.
23891 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23893 // Scale the condition by the difference.
23895 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23896 DAG.getConstant(Diff, Cond.getValueType()));
23898 // Add the base if non-zero.
23899 if (FalseC->getAPIntValue() != 0)
23900 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23901 SDValue(FalseC, 0));
23902 if (N->getNumValues() == 2) // Dead flag value?
23903 return DCI.CombineTo(N, Cond, SDValue());
23910 // Handle these cases:
23911 // (select (x != c), e, c) -> select (x != c), e, x),
23912 // (select (x == c), c, e) -> select (x == c), x, e)
23913 // where the c is an integer constant, and the "select" is the combination
23914 // of CMOV and CMP.
23916 // The rationale for this change is that the conditional-move from a constant
23917 // needs two instructions, however, conditional-move from a register needs
23918 // only one instruction.
23920 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
23921 // some instruction-combining opportunities. This opt needs to be
23922 // postponed as late as possible.
23924 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
23925 // the DCI.xxxx conditions are provided to postpone the optimization as
23926 // late as possible.
23928 ConstantSDNode *CmpAgainst = nullptr;
23929 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
23930 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
23931 !isa<ConstantSDNode>(Cond.getOperand(0))) {
23933 if (CC == X86::COND_NE &&
23934 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
23935 CC = X86::GetOppositeBranchCondition(CC);
23936 std::swap(TrueOp, FalseOp);
23939 if (CC == X86::COND_E &&
23940 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
23941 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
23942 DAG.getConstant(CC, MVT::i8), Cond };
23943 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
23951 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
23952 const X86Subtarget *Subtarget) {
23953 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
23955 default: return SDValue();
23956 // SSE/AVX/AVX2 blend intrinsics.
23957 case Intrinsic::x86_avx2_pblendvb:
23958 case Intrinsic::x86_avx2_pblendw:
23959 case Intrinsic::x86_avx2_pblendd_128:
23960 case Intrinsic::x86_avx2_pblendd_256:
23961 // Don't try to simplify this intrinsic if we don't have AVX2.
23962 if (!Subtarget->hasAVX2())
23965 case Intrinsic::x86_avx_blend_pd_256:
23966 case Intrinsic::x86_avx_blend_ps_256:
23967 case Intrinsic::x86_avx_blendv_pd_256:
23968 case Intrinsic::x86_avx_blendv_ps_256:
23969 // Don't try to simplify this intrinsic if we don't have AVX.
23970 if (!Subtarget->hasAVX())
23973 case Intrinsic::x86_sse41_pblendw:
23974 case Intrinsic::x86_sse41_blendpd:
23975 case Intrinsic::x86_sse41_blendps:
23976 case Intrinsic::x86_sse41_blendvps:
23977 case Intrinsic::x86_sse41_blendvpd:
23978 case Intrinsic::x86_sse41_pblendvb: {
23979 SDValue Op0 = N->getOperand(1);
23980 SDValue Op1 = N->getOperand(2);
23981 SDValue Mask = N->getOperand(3);
23983 // Don't try to simplify this intrinsic if we don't have SSE4.1.
23984 if (!Subtarget->hasSSE41())
23987 // fold (blend A, A, Mask) -> A
23990 // fold (blend A, B, allZeros) -> A
23991 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
23993 // fold (blend A, B, allOnes) -> B
23994 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
23997 // Simplify the case where the mask is a constant i32 value.
23998 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
23999 if (C->isNullValue())
24001 if (C->isAllOnesValue())
24008 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24009 case Intrinsic::x86_sse2_psrai_w:
24010 case Intrinsic::x86_sse2_psrai_d:
24011 case Intrinsic::x86_avx2_psrai_w:
24012 case Intrinsic::x86_avx2_psrai_d:
24013 case Intrinsic::x86_sse2_psra_w:
24014 case Intrinsic::x86_sse2_psra_d:
24015 case Intrinsic::x86_avx2_psra_w:
24016 case Intrinsic::x86_avx2_psra_d: {
24017 SDValue Op0 = N->getOperand(1);
24018 SDValue Op1 = N->getOperand(2);
24019 EVT VT = Op0.getValueType();
24020 assert(VT.isVector() && "Expected a vector type!");
24022 if (isa<BuildVectorSDNode>(Op1))
24023 Op1 = Op1.getOperand(0);
24025 if (!isa<ConstantSDNode>(Op1))
24028 EVT SVT = VT.getVectorElementType();
24029 unsigned SVTBits = SVT.getSizeInBits();
24031 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24032 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24033 uint64_t ShAmt = C.getZExtValue();
24035 // Don't try to convert this shift into a ISD::SRA if the shift
24036 // count is bigger than or equal to the element size.
24037 if (ShAmt >= SVTBits)
24040 // Trivial case: if the shift count is zero, then fold this
24041 // into the first operand.
24045 // Replace this packed shift intrinsic with a target independent
24047 SDValue Splat = DAG.getConstant(C, VT);
24048 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24053 /// PerformMulCombine - Optimize a single multiply with constant into two
24054 /// in order to implement it with two cheaper instructions, e.g.
24055 /// LEA + SHL, LEA + LEA.
24056 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24057 TargetLowering::DAGCombinerInfo &DCI) {
24058 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24061 EVT VT = N->getValueType(0);
24062 if (VT != MVT::i64)
24065 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24068 uint64_t MulAmt = C->getZExtValue();
24069 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24072 uint64_t MulAmt1 = 0;
24073 uint64_t MulAmt2 = 0;
24074 if ((MulAmt % 9) == 0) {
24076 MulAmt2 = MulAmt / 9;
24077 } else if ((MulAmt % 5) == 0) {
24079 MulAmt2 = MulAmt / 5;
24080 } else if ((MulAmt % 3) == 0) {
24082 MulAmt2 = MulAmt / 3;
24085 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24088 if (isPowerOf2_64(MulAmt2) &&
24089 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24090 // If second multiplifer is pow2, issue it first. We want the multiply by
24091 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24093 std::swap(MulAmt1, MulAmt2);
24096 if (isPowerOf2_64(MulAmt1))
24097 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24098 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24100 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24101 DAG.getConstant(MulAmt1, VT));
24103 if (isPowerOf2_64(MulAmt2))
24104 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24105 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24107 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24108 DAG.getConstant(MulAmt2, VT));
24110 // Do not add new nodes to DAG combiner worklist.
24111 DCI.CombineTo(N, NewMul, false);
24116 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24117 SDValue N0 = N->getOperand(0);
24118 SDValue N1 = N->getOperand(1);
24119 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24120 EVT VT = N0.getValueType();
24122 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24123 // since the result of setcc_c is all zero's or all ones.
24124 if (VT.isInteger() && !VT.isVector() &&
24125 N1C && N0.getOpcode() == ISD::AND &&
24126 N0.getOperand(1).getOpcode() == ISD::Constant) {
24127 SDValue N00 = N0.getOperand(0);
24128 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24129 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24130 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24131 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24132 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24133 APInt ShAmt = N1C->getAPIntValue();
24134 Mask = Mask.shl(ShAmt);
24136 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24137 N00, DAG.getConstant(Mask, VT));
24141 // Hardware support for vector shifts is sparse which makes us scalarize the
24142 // vector operations in many cases. Also, on sandybridge ADD is faster than
24144 // (shl V, 1) -> add V,V
24145 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24146 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24147 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24148 // We shift all of the values by one. In many cases we do not have
24149 // hardware support for this operation. This is better expressed as an ADD
24151 if (N1SplatC->getZExtValue() == 1)
24152 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24158 /// \brief Returns a vector of 0s if the node in input is a vector logical
24159 /// shift by a constant amount which is known to be bigger than or equal
24160 /// to the vector element size in bits.
24161 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24162 const X86Subtarget *Subtarget) {
24163 EVT VT = N->getValueType(0);
24165 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24166 (!Subtarget->hasInt256() ||
24167 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24170 SDValue Amt = N->getOperand(1);
24172 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24173 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24174 APInt ShiftAmt = AmtSplat->getAPIntValue();
24175 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24177 // SSE2/AVX2 logical shifts always return a vector of 0s
24178 // if the shift amount is bigger than or equal to
24179 // the element size. The constant shift amount will be
24180 // encoded as a 8-bit immediate.
24181 if (ShiftAmt.trunc(8).uge(MaxAmount))
24182 return getZeroVector(VT, Subtarget, DAG, DL);
24188 /// PerformShiftCombine - Combine shifts.
24189 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24190 TargetLowering::DAGCombinerInfo &DCI,
24191 const X86Subtarget *Subtarget) {
24192 if (N->getOpcode() == ISD::SHL) {
24193 SDValue V = PerformSHLCombine(N, DAG);
24194 if (V.getNode()) return V;
24197 if (N->getOpcode() != ISD::SRA) {
24198 // Try to fold this logical shift into a zero vector.
24199 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24200 if (V.getNode()) return V;
24206 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24207 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24208 // and friends. Likewise for OR -> CMPNEQSS.
24209 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24210 TargetLowering::DAGCombinerInfo &DCI,
24211 const X86Subtarget *Subtarget) {
24214 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24215 // we're requiring SSE2 for both.
24216 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24217 SDValue N0 = N->getOperand(0);
24218 SDValue N1 = N->getOperand(1);
24219 SDValue CMP0 = N0->getOperand(1);
24220 SDValue CMP1 = N1->getOperand(1);
24223 // The SETCCs should both refer to the same CMP.
24224 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24227 SDValue CMP00 = CMP0->getOperand(0);
24228 SDValue CMP01 = CMP0->getOperand(1);
24229 EVT VT = CMP00.getValueType();
24231 if (VT == MVT::f32 || VT == MVT::f64) {
24232 bool ExpectingFlags = false;
24233 // Check for any users that want flags:
24234 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24235 !ExpectingFlags && UI != UE; ++UI)
24236 switch (UI->getOpcode()) {
24241 ExpectingFlags = true;
24243 case ISD::CopyToReg:
24244 case ISD::SIGN_EXTEND:
24245 case ISD::ZERO_EXTEND:
24246 case ISD::ANY_EXTEND:
24250 if (!ExpectingFlags) {
24251 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24252 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24254 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24255 X86::CondCode tmp = cc0;
24260 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24261 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24262 // FIXME: need symbolic constants for these magic numbers.
24263 // See X86ATTInstPrinter.cpp:printSSECC().
24264 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24265 if (Subtarget->hasAVX512()) {
24266 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24267 CMP01, DAG.getConstant(x86cc, MVT::i8));
24268 if (N->getValueType(0) != MVT::i1)
24269 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24273 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24274 CMP00.getValueType(), CMP00, CMP01,
24275 DAG.getConstant(x86cc, MVT::i8));
24277 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24278 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24280 if (is64BitFP && !Subtarget->is64Bit()) {
24281 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24282 // 64-bit integer, since that's not a legal type. Since
24283 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24284 // bits, but can do this little dance to extract the lowest 32 bits
24285 // and work with those going forward.
24286 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24288 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24290 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24291 Vector32, DAG.getIntPtrConstant(0));
24295 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24296 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24297 DAG.getConstant(1, IntVT));
24298 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24299 return OneBitOfTruth;
24307 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24308 /// so it can be folded inside ANDNP.
24309 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24310 EVT VT = N->getValueType(0);
24312 // Match direct AllOnes for 128 and 256-bit vectors
24313 if (ISD::isBuildVectorAllOnes(N))
24316 // Look through a bit convert.
24317 if (N->getOpcode() == ISD::BITCAST)
24318 N = N->getOperand(0).getNode();
24320 // Sometimes the operand may come from a insert_subvector building a 256-bit
24322 if (VT.is256BitVector() &&
24323 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24324 SDValue V1 = N->getOperand(0);
24325 SDValue V2 = N->getOperand(1);
24327 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24328 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24329 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24330 ISD::isBuildVectorAllOnes(V2.getNode()))
24337 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24338 // register. In most cases we actually compare or select YMM-sized registers
24339 // and mixing the two types creates horrible code. This method optimizes
24340 // some of the transition sequences.
24341 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24342 TargetLowering::DAGCombinerInfo &DCI,
24343 const X86Subtarget *Subtarget) {
24344 EVT VT = N->getValueType(0);
24345 if (!VT.is256BitVector())
24348 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24349 N->getOpcode() == ISD::ZERO_EXTEND ||
24350 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24352 SDValue Narrow = N->getOperand(0);
24353 EVT NarrowVT = Narrow->getValueType(0);
24354 if (!NarrowVT.is128BitVector())
24357 if (Narrow->getOpcode() != ISD::XOR &&
24358 Narrow->getOpcode() != ISD::AND &&
24359 Narrow->getOpcode() != ISD::OR)
24362 SDValue N0 = Narrow->getOperand(0);
24363 SDValue N1 = Narrow->getOperand(1);
24366 // The Left side has to be a trunc.
24367 if (N0.getOpcode() != ISD::TRUNCATE)
24370 // The type of the truncated inputs.
24371 EVT WideVT = N0->getOperand(0)->getValueType(0);
24375 // The right side has to be a 'trunc' or a constant vector.
24376 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24377 ConstantSDNode *RHSConstSplat = nullptr;
24378 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24379 RHSConstSplat = RHSBV->getConstantSplatNode();
24380 if (!RHSTrunc && !RHSConstSplat)
24383 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24385 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24388 // Set N0 and N1 to hold the inputs to the new wide operation.
24389 N0 = N0->getOperand(0);
24390 if (RHSConstSplat) {
24391 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24392 SDValue(RHSConstSplat, 0));
24393 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24394 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24395 } else if (RHSTrunc) {
24396 N1 = N1->getOperand(0);
24399 // Generate the wide operation.
24400 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24401 unsigned Opcode = N->getOpcode();
24403 case ISD::ANY_EXTEND:
24405 case ISD::ZERO_EXTEND: {
24406 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24407 APInt Mask = APInt::getAllOnesValue(InBits);
24408 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24409 return DAG.getNode(ISD::AND, DL, VT,
24410 Op, DAG.getConstant(Mask, VT));
24412 case ISD::SIGN_EXTEND:
24413 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24414 Op, DAG.getValueType(NarrowVT));
24416 llvm_unreachable("Unexpected opcode");
24420 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24421 TargetLowering::DAGCombinerInfo &DCI,
24422 const X86Subtarget *Subtarget) {
24423 EVT VT = N->getValueType(0);
24424 if (DCI.isBeforeLegalizeOps())
24427 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24431 // Create BEXTR instructions
24432 // BEXTR is ((X >> imm) & (2**size-1))
24433 if (VT == MVT::i32 || VT == MVT::i64) {
24434 SDValue N0 = N->getOperand(0);
24435 SDValue N1 = N->getOperand(1);
24438 // Check for BEXTR.
24439 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24440 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24441 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24442 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24443 if (MaskNode && ShiftNode) {
24444 uint64_t Mask = MaskNode->getZExtValue();
24445 uint64_t Shift = ShiftNode->getZExtValue();
24446 if (isMask_64(Mask)) {
24447 uint64_t MaskSize = CountPopulation_64(Mask);
24448 if (Shift + MaskSize <= VT.getSizeInBits())
24449 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24450 DAG.getConstant(Shift | (MaskSize << 8), VT));
24458 // Want to form ANDNP nodes:
24459 // 1) In the hopes of then easily combining them with OR and AND nodes
24460 // to form PBLEND/PSIGN.
24461 // 2) To match ANDN packed intrinsics
24462 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24465 SDValue N0 = N->getOperand(0);
24466 SDValue N1 = N->getOperand(1);
24469 // Check LHS for vnot
24470 if (N0.getOpcode() == ISD::XOR &&
24471 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24472 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24473 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24475 // Check RHS for vnot
24476 if (N1.getOpcode() == ISD::XOR &&
24477 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24478 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24479 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24484 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24485 TargetLowering::DAGCombinerInfo &DCI,
24486 const X86Subtarget *Subtarget) {
24487 if (DCI.isBeforeLegalizeOps())
24490 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24494 SDValue N0 = N->getOperand(0);
24495 SDValue N1 = N->getOperand(1);
24496 EVT VT = N->getValueType(0);
24498 // look for psign/blend
24499 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24500 if (!Subtarget->hasSSSE3() ||
24501 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24504 // Canonicalize pandn to RHS
24505 if (N0.getOpcode() == X86ISD::ANDNP)
24507 // or (and (m, y), (pandn m, x))
24508 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24509 SDValue Mask = N1.getOperand(0);
24510 SDValue X = N1.getOperand(1);
24512 if (N0.getOperand(0) == Mask)
24513 Y = N0.getOperand(1);
24514 if (N0.getOperand(1) == Mask)
24515 Y = N0.getOperand(0);
24517 // Check to see if the mask appeared in both the AND and ANDNP and
24521 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24522 // Look through mask bitcast.
24523 if (Mask.getOpcode() == ISD::BITCAST)
24524 Mask = Mask.getOperand(0);
24525 if (X.getOpcode() == ISD::BITCAST)
24526 X = X.getOperand(0);
24527 if (Y.getOpcode() == ISD::BITCAST)
24528 Y = Y.getOperand(0);
24530 EVT MaskVT = Mask.getValueType();
24532 // Validate that the Mask operand is a vector sra node.
24533 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24534 // there is no psrai.b
24535 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24536 unsigned SraAmt = ~0;
24537 if (Mask.getOpcode() == ISD::SRA) {
24538 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24539 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24540 SraAmt = AmtConst->getZExtValue();
24541 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24542 SDValue SraC = Mask.getOperand(1);
24543 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24545 if ((SraAmt + 1) != EltBits)
24550 // Now we know we at least have a plendvb with the mask val. See if
24551 // we can form a psignb/w/d.
24552 // psign = x.type == y.type == mask.type && y = sub(0, x);
24553 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24554 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24555 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24556 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24557 "Unsupported VT for PSIGN");
24558 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24559 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24561 // PBLENDVB only available on SSE 4.1
24562 if (!Subtarget->hasSSE41())
24565 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24567 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24568 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24569 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24570 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24571 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24575 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24578 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24579 MachineFunction &MF = DAG.getMachineFunction();
24580 bool OptForSize = MF.getFunction()->getAttributes().
24581 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
24583 // SHLD/SHRD instructions have lower register pressure, but on some
24584 // platforms they have higher latency than the equivalent
24585 // series of shifts/or that would otherwise be generated.
24586 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24587 // have higher latencies and we are not optimizing for size.
24588 if (!OptForSize && Subtarget->isSHLDSlow())
24591 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24593 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24595 if (!N0.hasOneUse() || !N1.hasOneUse())
24598 SDValue ShAmt0 = N0.getOperand(1);
24599 if (ShAmt0.getValueType() != MVT::i8)
24601 SDValue ShAmt1 = N1.getOperand(1);
24602 if (ShAmt1.getValueType() != MVT::i8)
24604 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24605 ShAmt0 = ShAmt0.getOperand(0);
24606 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24607 ShAmt1 = ShAmt1.getOperand(0);
24610 unsigned Opc = X86ISD::SHLD;
24611 SDValue Op0 = N0.getOperand(0);
24612 SDValue Op1 = N1.getOperand(0);
24613 if (ShAmt0.getOpcode() == ISD::SUB) {
24614 Opc = X86ISD::SHRD;
24615 std::swap(Op0, Op1);
24616 std::swap(ShAmt0, ShAmt1);
24619 unsigned Bits = VT.getSizeInBits();
24620 if (ShAmt1.getOpcode() == ISD::SUB) {
24621 SDValue Sum = ShAmt1.getOperand(0);
24622 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24623 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24624 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24625 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24626 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24627 return DAG.getNode(Opc, DL, VT,
24629 DAG.getNode(ISD::TRUNCATE, DL,
24632 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24633 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24635 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24636 return DAG.getNode(Opc, DL, VT,
24637 N0.getOperand(0), N1.getOperand(0),
24638 DAG.getNode(ISD::TRUNCATE, DL,
24645 // Generate NEG and CMOV for integer abs.
24646 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24647 EVT VT = N->getValueType(0);
24649 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24650 // 8-bit integer abs to NEG and CMOV.
24651 if (VT.isInteger() && VT.getSizeInBits() == 8)
24654 SDValue N0 = N->getOperand(0);
24655 SDValue N1 = N->getOperand(1);
24658 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24659 // and change it to SUB and CMOV.
24660 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24661 N0.getOpcode() == ISD::ADD &&
24662 N0.getOperand(1) == N1 &&
24663 N1.getOpcode() == ISD::SRA &&
24664 N1.getOperand(0) == N0.getOperand(0))
24665 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24666 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24667 // Generate SUB & CMOV.
24668 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24669 DAG.getConstant(0, VT), N0.getOperand(0));
24671 SDValue Ops[] = { N0.getOperand(0), Neg,
24672 DAG.getConstant(X86::COND_GE, MVT::i8),
24673 SDValue(Neg.getNode(), 1) };
24674 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24679 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24680 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24681 TargetLowering::DAGCombinerInfo &DCI,
24682 const X86Subtarget *Subtarget) {
24683 if (DCI.isBeforeLegalizeOps())
24686 if (Subtarget->hasCMov()) {
24687 SDValue RV = performIntegerAbsCombine(N, DAG);
24695 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24696 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24697 TargetLowering::DAGCombinerInfo &DCI,
24698 const X86Subtarget *Subtarget) {
24699 LoadSDNode *Ld = cast<LoadSDNode>(N);
24700 EVT RegVT = Ld->getValueType(0);
24701 EVT MemVT = Ld->getMemoryVT();
24703 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24705 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24706 // into two 16-byte operations.
24707 ISD::LoadExtType Ext = Ld->getExtensionType();
24708 unsigned Alignment = Ld->getAlignment();
24709 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24710 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24711 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24712 unsigned NumElems = RegVT.getVectorNumElements();
24716 SDValue Ptr = Ld->getBasePtr();
24717 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24719 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24721 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24722 Ld->getPointerInfo(), Ld->isVolatile(),
24723 Ld->isNonTemporal(), Ld->isInvariant(),
24725 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24726 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24727 Ld->getPointerInfo(), Ld->isVolatile(),
24728 Ld->isNonTemporal(), Ld->isInvariant(),
24729 std::min(16U, Alignment));
24730 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24732 Load2.getValue(1));
24734 SDValue NewVec = DAG.getUNDEF(RegVT);
24735 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
24736 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
24737 return DCI.CombineTo(N, NewVec, TF, true);
24743 /// PerformMLOADCombine - Resolve extending loads
24744 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
24745 TargetLowering::DAGCombinerInfo &DCI,
24746 const X86Subtarget *Subtarget) {
24747 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
24748 if (Mld->getExtensionType() != ISD::SEXTLOAD)
24751 EVT VT = Mld->getValueType(0);
24752 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24753 unsigned NumElems = VT.getVectorNumElements();
24754 EVT LdVT = Mld->getMemoryVT();
24757 assert(LdVT != VT && "Cannot extend to the same type");
24758 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
24759 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
24760 // From, To sizes and ElemCount must be pow of two
24761 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24762 "Unexpected size for extending masked load");
24764 unsigned SizeRatio = ToSz / FromSz;
24765 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
24767 // Create a type on which we perform the shuffle
24768 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24769 LdVT.getScalarType(), NumElems*SizeRatio);
24770 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24772 // Convert Src0 value
24773 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
24774 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
24775 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24776 for (unsigned i = 0; i != NumElems; ++i)
24777 ShuffleVec[i] = i * SizeRatio;
24779 // Can't shuffle using an illegal type.
24780 assert (TLI.isTypeLegal(WideVecVT) && "WideVecVT should be legal");
24781 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
24782 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
24784 // Prepare the new mask
24786 SDValue Mask = Mld->getMask();
24787 if (Mask.getValueType() == VT) {
24788 // Mask and original value have the same type
24789 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24790 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24791 for (unsigned i = 0; i != NumElems; ++i)
24792 ShuffleVec[i] = i * SizeRatio;
24793 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24794 ShuffleVec[i] = NumElems*SizeRatio;
24795 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24796 DAG.getConstant(0, WideVecVT),
24800 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24801 unsigned WidenNumElts = NumElems*SizeRatio;
24802 unsigned MaskNumElts = VT.getVectorNumElements();
24803 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24806 unsigned NumConcat = WidenNumElts / MaskNumElts;
24807 SmallVector<SDValue, 16> Ops(NumConcat);
24808 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24810 for (unsigned i = 1; i != NumConcat; ++i)
24813 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24816 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
24817 Mld->getBasePtr(), NewMask, WideSrc0,
24818 Mld->getMemoryVT(), Mld->getMemOperand(),
24820 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
24821 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
24824 /// PerformMSTORECombine - Resolve truncating stores
24825 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
24826 const X86Subtarget *Subtarget) {
24827 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
24828 if (!Mst->isTruncatingStore())
24831 EVT VT = Mst->getValue().getValueType();
24832 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24833 unsigned NumElems = VT.getVectorNumElements();
24834 EVT StVT = Mst->getMemoryVT();
24837 assert(StVT != VT && "Cannot truncate to the same type");
24838 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24839 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24841 // From, To sizes and ElemCount must be pow of two
24842 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24843 "Unexpected size for truncating masked store");
24844 // We are going to use the original vector elt for storing.
24845 // Accumulated smaller vector elements must be a multiple of the store size.
24846 assert (((NumElems * FromSz) % ToSz) == 0 &&
24847 "Unexpected ratio for truncating masked store");
24849 unsigned SizeRatio = FromSz / ToSz;
24850 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24852 // Create a type on which we perform the shuffle
24853 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24854 StVT.getScalarType(), NumElems*SizeRatio);
24856 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24858 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
24859 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24860 for (unsigned i = 0; i != NumElems; ++i)
24861 ShuffleVec[i] = i * SizeRatio;
24863 // Can't shuffle using an illegal type.
24864 assert (TLI.isTypeLegal(WideVecVT) && "WideVecVT should be legal");
24866 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24867 DAG.getUNDEF(WideVecVT),
24871 SDValue Mask = Mst->getMask();
24872 if (Mask.getValueType() == VT) {
24873 // Mask and original value have the same type
24874 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24875 for (unsigned i = 0; i != NumElems; ++i)
24876 ShuffleVec[i] = i * SizeRatio;
24877 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24878 ShuffleVec[i] = NumElems*SizeRatio;
24879 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24880 DAG.getConstant(0, WideVecVT),
24884 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24885 unsigned WidenNumElts = NumElems*SizeRatio;
24886 unsigned MaskNumElts = VT.getVectorNumElements();
24887 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24890 unsigned NumConcat = WidenNumElts / MaskNumElts;
24891 SmallVector<SDValue, 16> Ops(NumConcat);
24892 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24894 for (unsigned i = 1; i != NumConcat; ++i)
24897 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24900 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
24901 NewMask, StVT, Mst->getMemOperand(), false);
24903 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
24904 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
24905 const X86Subtarget *Subtarget) {
24906 StoreSDNode *St = cast<StoreSDNode>(N);
24907 EVT VT = St->getValue().getValueType();
24908 EVT StVT = St->getMemoryVT();
24910 SDValue StoredVal = St->getOperand(1);
24911 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24913 // If we are saving a concatenation of two XMM registers and 32-byte stores
24914 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
24915 unsigned Alignment = St->getAlignment();
24916 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
24917 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24918 StVT == VT && !IsAligned) {
24919 unsigned NumElems = VT.getVectorNumElements();
24923 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
24924 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
24926 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
24927 SDValue Ptr0 = St->getBasePtr();
24928 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
24930 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
24931 St->getPointerInfo(), St->isVolatile(),
24932 St->isNonTemporal(), Alignment);
24933 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
24934 St->getPointerInfo(), St->isVolatile(),
24935 St->isNonTemporal(),
24936 std::min(16U, Alignment));
24937 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
24940 // Optimize trunc store (of multiple scalars) to shuffle and store.
24941 // First, pack all of the elements in one place. Next, store to memory
24942 // in fewer chunks.
24943 if (St->isTruncatingStore() && VT.isVector()) {
24944 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24945 unsigned NumElems = VT.getVectorNumElements();
24946 assert(StVT != VT && "Cannot truncate to the same type");
24947 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24948 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24950 // From, To sizes and ElemCount must be pow of two
24951 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
24952 // We are going to use the original vector elt for storing.
24953 // Accumulated smaller vector elements must be a multiple of the store size.
24954 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
24956 unsigned SizeRatio = FromSz / ToSz;
24958 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24960 // Create a type on which we perform the shuffle
24961 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24962 StVT.getScalarType(), NumElems*SizeRatio);
24964 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24966 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
24967 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
24968 for (unsigned i = 0; i != NumElems; ++i)
24969 ShuffleVec[i] = i * SizeRatio;
24971 // Can't shuffle using an illegal type.
24972 if (!TLI.isTypeLegal(WideVecVT))
24975 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24976 DAG.getUNDEF(WideVecVT),
24978 // At this point all of the data is stored at the bottom of the
24979 // register. We now need to save it to mem.
24981 // Find the largest store unit
24982 MVT StoreType = MVT::i8;
24983 for (MVT Tp : MVT::integer_valuetypes()) {
24984 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
24988 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
24989 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
24990 (64 <= NumElems * ToSz))
24991 StoreType = MVT::f64;
24993 // Bitcast the original vector into a vector of store-size units
24994 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
24995 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
24996 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
24997 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
24998 SmallVector<SDValue, 8> Chains;
24999 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25000 TLI.getPointerTy());
25001 SDValue Ptr = St->getBasePtr();
25003 // Perform one or more big stores into memory.
25004 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25005 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25006 StoreType, ShuffWide,
25007 DAG.getIntPtrConstant(i));
25008 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25009 St->getPointerInfo(), St->isVolatile(),
25010 St->isNonTemporal(), St->getAlignment());
25011 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25012 Chains.push_back(Ch);
25015 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25018 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25019 // the FP state in cases where an emms may be missing.
25020 // A preferable solution to the general problem is to figure out the right
25021 // places to insert EMMS. This qualifies as a quick hack.
25023 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25024 if (VT.getSizeInBits() != 64)
25027 const Function *F = DAG.getMachineFunction().getFunction();
25028 bool NoImplicitFloatOps = F->getAttributes().
25029 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
25030 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25031 && Subtarget->hasSSE2();
25032 if ((VT.isVector() ||
25033 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25034 isa<LoadSDNode>(St->getValue()) &&
25035 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25036 St->getChain().hasOneUse() && !St->isVolatile()) {
25037 SDNode* LdVal = St->getValue().getNode();
25038 LoadSDNode *Ld = nullptr;
25039 int TokenFactorIndex = -1;
25040 SmallVector<SDValue, 8> Ops;
25041 SDNode* ChainVal = St->getChain().getNode();
25042 // Must be a store of a load. We currently handle two cases: the load
25043 // is a direct child, and it's under an intervening TokenFactor. It is
25044 // possible to dig deeper under nested TokenFactors.
25045 if (ChainVal == LdVal)
25046 Ld = cast<LoadSDNode>(St->getChain());
25047 else if (St->getValue().hasOneUse() &&
25048 ChainVal->getOpcode() == ISD::TokenFactor) {
25049 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25050 if (ChainVal->getOperand(i).getNode() == LdVal) {
25051 TokenFactorIndex = i;
25052 Ld = cast<LoadSDNode>(St->getValue());
25054 Ops.push_back(ChainVal->getOperand(i));
25058 if (!Ld || !ISD::isNormalLoad(Ld))
25061 // If this is not the MMX case, i.e. we are just turning i64 load/store
25062 // into f64 load/store, avoid the transformation if there are multiple
25063 // uses of the loaded value.
25064 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25069 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25070 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25072 if (Subtarget->is64Bit() || F64IsLegal) {
25073 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25074 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25075 Ld->getPointerInfo(), Ld->isVolatile(),
25076 Ld->isNonTemporal(), Ld->isInvariant(),
25077 Ld->getAlignment());
25078 SDValue NewChain = NewLd.getValue(1);
25079 if (TokenFactorIndex != -1) {
25080 Ops.push_back(NewChain);
25081 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25083 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25084 St->getPointerInfo(),
25085 St->isVolatile(), St->isNonTemporal(),
25086 St->getAlignment());
25089 // Otherwise, lower to two pairs of 32-bit loads / stores.
25090 SDValue LoAddr = Ld->getBasePtr();
25091 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25092 DAG.getConstant(4, MVT::i32));
25094 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25095 Ld->getPointerInfo(),
25096 Ld->isVolatile(), Ld->isNonTemporal(),
25097 Ld->isInvariant(), Ld->getAlignment());
25098 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25099 Ld->getPointerInfo().getWithOffset(4),
25100 Ld->isVolatile(), Ld->isNonTemporal(),
25102 MinAlign(Ld->getAlignment(), 4));
25104 SDValue NewChain = LoLd.getValue(1);
25105 if (TokenFactorIndex != -1) {
25106 Ops.push_back(LoLd);
25107 Ops.push_back(HiLd);
25108 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25111 LoAddr = St->getBasePtr();
25112 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25113 DAG.getConstant(4, MVT::i32));
25115 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25116 St->getPointerInfo(),
25117 St->isVolatile(), St->isNonTemporal(),
25118 St->getAlignment());
25119 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25120 St->getPointerInfo().getWithOffset(4),
25122 St->isNonTemporal(),
25123 MinAlign(St->getAlignment(), 4));
25124 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25129 /// Return 'true' if this vector operation is "horizontal"
25130 /// and return the operands for the horizontal operation in LHS and RHS. A
25131 /// horizontal operation performs the binary operation on successive elements
25132 /// of its first operand, then on successive elements of its second operand,
25133 /// returning the resulting values in a vector. For example, if
25134 /// A = < float a0, float a1, float a2, float a3 >
25136 /// B = < float b0, float b1, float b2, float b3 >
25137 /// then the result of doing a horizontal operation on A and B is
25138 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25139 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25140 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25141 /// set to A, RHS to B, and the routine returns 'true'.
25142 /// Note that the binary operation should have the property that if one of the
25143 /// operands is UNDEF then the result is UNDEF.
25144 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25145 // Look for the following pattern: if
25146 // A = < float a0, float a1, float a2, float a3 >
25147 // B = < float b0, float b1, float b2, float b3 >
25149 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25150 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25151 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25152 // which is A horizontal-op B.
25154 // At least one of the operands should be a vector shuffle.
25155 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25156 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25159 MVT VT = LHS.getSimpleValueType();
25161 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25162 "Unsupported vector type for horizontal add/sub");
25164 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25165 // operate independently on 128-bit lanes.
25166 unsigned NumElts = VT.getVectorNumElements();
25167 unsigned NumLanes = VT.getSizeInBits()/128;
25168 unsigned NumLaneElts = NumElts / NumLanes;
25169 assert((NumLaneElts % 2 == 0) &&
25170 "Vector type should have an even number of elements in each lane");
25171 unsigned HalfLaneElts = NumLaneElts/2;
25173 // View LHS in the form
25174 // LHS = VECTOR_SHUFFLE A, B, LMask
25175 // If LHS is not a shuffle then pretend it is the shuffle
25176 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25177 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25180 SmallVector<int, 16> LMask(NumElts);
25181 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25182 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25183 A = LHS.getOperand(0);
25184 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25185 B = LHS.getOperand(1);
25186 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25187 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25189 if (LHS.getOpcode() != ISD::UNDEF)
25191 for (unsigned i = 0; i != NumElts; ++i)
25195 // Likewise, view RHS in the form
25196 // RHS = VECTOR_SHUFFLE C, D, RMask
25198 SmallVector<int, 16> RMask(NumElts);
25199 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25200 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25201 C = RHS.getOperand(0);
25202 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25203 D = RHS.getOperand(1);
25204 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25205 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25207 if (RHS.getOpcode() != ISD::UNDEF)
25209 for (unsigned i = 0; i != NumElts; ++i)
25213 // Check that the shuffles are both shuffling the same vectors.
25214 if (!(A == C && B == D) && !(A == D && B == C))
25217 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25218 if (!A.getNode() && !B.getNode())
25221 // If A and B occur in reverse order in RHS, then "swap" them (which means
25222 // rewriting the mask).
25224 CommuteVectorShuffleMask(RMask, NumElts);
25226 // At this point LHS and RHS are equivalent to
25227 // LHS = VECTOR_SHUFFLE A, B, LMask
25228 // RHS = VECTOR_SHUFFLE A, B, RMask
25229 // Check that the masks correspond to performing a horizontal operation.
25230 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25231 for (unsigned i = 0; i != NumLaneElts; ++i) {
25232 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25234 // Ignore any UNDEF components.
25235 if (LIdx < 0 || RIdx < 0 ||
25236 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25237 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25240 // Check that successive elements are being operated on. If not, this is
25241 // not a horizontal operation.
25242 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25243 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25244 if (!(LIdx == Index && RIdx == Index + 1) &&
25245 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25250 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25251 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25255 /// Do target-specific dag combines on floating point adds.
25256 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25257 const X86Subtarget *Subtarget) {
25258 EVT VT = N->getValueType(0);
25259 SDValue LHS = N->getOperand(0);
25260 SDValue RHS = N->getOperand(1);
25262 // Try to synthesize horizontal adds from adds of shuffles.
25263 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25264 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25265 isHorizontalBinOp(LHS, RHS, true))
25266 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25270 /// Do target-specific dag combines on floating point subs.
25271 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25272 const X86Subtarget *Subtarget) {
25273 EVT VT = N->getValueType(0);
25274 SDValue LHS = N->getOperand(0);
25275 SDValue RHS = N->getOperand(1);
25277 // Try to synthesize horizontal subs from subs of shuffles.
25278 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25279 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25280 isHorizontalBinOp(LHS, RHS, false))
25281 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25285 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25286 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25287 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25288 // F[X]OR(0.0, x) -> x
25289 // F[X]OR(x, 0.0) -> x
25290 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25291 if (C->getValueAPF().isPosZero())
25292 return N->getOperand(1);
25293 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25294 if (C->getValueAPF().isPosZero())
25295 return N->getOperand(0);
25299 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25300 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25301 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25303 // Only perform optimizations if UnsafeMath is used.
25304 if (!DAG.getTarget().Options.UnsafeFPMath)
25307 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25308 // into FMINC and FMAXC, which are Commutative operations.
25309 unsigned NewOp = 0;
25310 switch (N->getOpcode()) {
25311 default: llvm_unreachable("unknown opcode");
25312 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25313 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25316 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25317 N->getOperand(0), N->getOperand(1));
25320 /// Do target-specific dag combines on X86ISD::FAND nodes.
25321 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25322 // FAND(0.0, x) -> 0.0
25323 // FAND(x, 0.0) -> 0.0
25324 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25325 if (C->getValueAPF().isPosZero())
25326 return N->getOperand(0);
25327 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25328 if (C->getValueAPF().isPosZero())
25329 return N->getOperand(1);
25333 /// Do target-specific dag combines on X86ISD::FANDN nodes
25334 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25335 // FANDN(x, 0.0) -> 0.0
25336 // FANDN(0.0, x) -> x
25337 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25338 if (C->getValueAPF().isPosZero())
25339 return N->getOperand(1);
25340 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25341 if (C->getValueAPF().isPosZero())
25342 return N->getOperand(1);
25346 static SDValue PerformBTCombine(SDNode *N,
25348 TargetLowering::DAGCombinerInfo &DCI) {
25349 // BT ignores high bits in the bit index operand.
25350 SDValue Op1 = N->getOperand(1);
25351 if (Op1.hasOneUse()) {
25352 unsigned BitWidth = Op1.getValueSizeInBits();
25353 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25354 APInt KnownZero, KnownOne;
25355 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25356 !DCI.isBeforeLegalizeOps());
25357 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25358 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25359 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25360 DCI.CommitTargetLoweringOpt(TLO);
25365 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25366 SDValue Op = N->getOperand(0);
25367 if (Op.getOpcode() == ISD::BITCAST)
25368 Op = Op.getOperand(0);
25369 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25370 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25371 VT.getVectorElementType().getSizeInBits() ==
25372 OpVT.getVectorElementType().getSizeInBits()) {
25373 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25378 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25379 const X86Subtarget *Subtarget) {
25380 EVT VT = N->getValueType(0);
25381 if (!VT.isVector())
25384 SDValue N0 = N->getOperand(0);
25385 SDValue N1 = N->getOperand(1);
25386 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25389 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25390 // both SSE and AVX2 since there is no sign-extended shift right
25391 // operation on a vector with 64-bit elements.
25392 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25393 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25394 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25395 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25396 SDValue N00 = N0.getOperand(0);
25398 // EXTLOAD has a better solution on AVX2,
25399 // it may be replaced with X86ISD::VSEXT node.
25400 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25401 if (!ISD::isNormalLoad(N00.getNode()))
25404 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25405 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25407 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25413 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25414 TargetLowering::DAGCombinerInfo &DCI,
25415 const X86Subtarget *Subtarget) {
25416 SDValue N0 = N->getOperand(0);
25417 EVT VT = N->getValueType(0);
25419 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25420 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25421 // This exposes the sext to the sdivrem lowering, so that it directly extends
25422 // from AH (which we otherwise need to do contortions to access).
25423 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25424 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25426 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25427 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25428 N0.getOperand(0), N0.getOperand(1));
25429 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25430 return R.getValue(1);
25433 if (!DCI.isBeforeLegalizeOps())
25436 if (!Subtarget->hasFp256())
25439 if (VT.isVector() && VT.getSizeInBits() == 256) {
25440 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25448 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25449 const X86Subtarget* Subtarget) {
25451 EVT VT = N->getValueType(0);
25453 // Let legalize expand this if it isn't a legal type yet.
25454 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25457 EVT ScalarVT = VT.getScalarType();
25458 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25459 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25462 SDValue A = N->getOperand(0);
25463 SDValue B = N->getOperand(1);
25464 SDValue C = N->getOperand(2);
25466 bool NegA = (A.getOpcode() == ISD::FNEG);
25467 bool NegB = (B.getOpcode() == ISD::FNEG);
25468 bool NegC = (C.getOpcode() == ISD::FNEG);
25470 // Negative multiplication when NegA xor NegB
25471 bool NegMul = (NegA != NegB);
25473 A = A.getOperand(0);
25475 B = B.getOperand(0);
25477 C = C.getOperand(0);
25481 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25483 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25485 return DAG.getNode(Opcode, dl, VT, A, B, C);
25488 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25489 TargetLowering::DAGCombinerInfo &DCI,
25490 const X86Subtarget *Subtarget) {
25491 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25492 // (and (i32 x86isd::setcc_carry), 1)
25493 // This eliminates the zext. This transformation is necessary because
25494 // ISD::SETCC is always legalized to i8.
25496 SDValue N0 = N->getOperand(0);
25497 EVT VT = N->getValueType(0);
25499 if (N0.getOpcode() == ISD::AND &&
25501 N0.getOperand(0).hasOneUse()) {
25502 SDValue N00 = N0.getOperand(0);
25503 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25504 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25505 if (!C || C->getZExtValue() != 1)
25507 return DAG.getNode(ISD::AND, dl, VT,
25508 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25509 N00.getOperand(0), N00.getOperand(1)),
25510 DAG.getConstant(1, VT));
25514 if (N0.getOpcode() == ISD::TRUNCATE &&
25516 N0.getOperand(0).hasOneUse()) {
25517 SDValue N00 = N0.getOperand(0);
25518 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25519 return DAG.getNode(ISD::AND, dl, VT,
25520 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25521 N00.getOperand(0), N00.getOperand(1)),
25522 DAG.getConstant(1, VT));
25525 if (VT.is256BitVector()) {
25526 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25531 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25532 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25533 // This exposes the zext to the udivrem lowering, so that it directly extends
25534 // from AH (which we otherwise need to do contortions to access).
25535 if (N0.getOpcode() == ISD::UDIVREM &&
25536 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25537 (VT == MVT::i32 || VT == MVT::i64)) {
25538 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25539 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25540 N0.getOperand(0), N0.getOperand(1));
25541 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25542 return R.getValue(1);
25548 // Optimize x == -y --> x+y == 0
25549 // x != -y --> x+y != 0
25550 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25551 const X86Subtarget* Subtarget) {
25552 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25553 SDValue LHS = N->getOperand(0);
25554 SDValue RHS = N->getOperand(1);
25555 EVT VT = N->getValueType(0);
25558 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25559 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25560 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25561 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25562 LHS.getValueType(), RHS, LHS.getOperand(1));
25563 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25564 addV, DAG.getConstant(0, addV.getValueType()), CC);
25566 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25567 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25568 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25569 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25570 RHS.getValueType(), LHS, RHS.getOperand(1));
25571 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25572 addV, DAG.getConstant(0, addV.getValueType()), CC);
25575 if (VT.getScalarType() == MVT::i1) {
25576 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25577 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25578 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25579 if (!IsSEXT0 && !IsVZero0)
25581 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25582 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25583 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25585 if (!IsSEXT1 && !IsVZero1)
25588 if (IsSEXT0 && IsVZero1) {
25589 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25590 if (CC == ISD::SETEQ)
25591 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25592 return LHS.getOperand(0);
25594 if (IsSEXT1 && IsVZero0) {
25595 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25596 if (CC == ISD::SETEQ)
25597 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25598 return RHS.getOperand(0);
25605 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25606 const X86Subtarget *Subtarget) {
25608 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25609 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25610 "X86insertps is only defined for v4x32");
25612 SDValue Ld = N->getOperand(1);
25613 if (MayFoldLoad(Ld)) {
25614 // Extract the countS bits from the immediate so we can get the proper
25615 // address when narrowing the vector load to a specific element.
25616 // When the second source op is a memory address, interps doesn't use
25617 // countS and just gets an f32 from that address.
25618 unsigned DestIndex =
25619 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25620 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25624 // Create this as a scalar to vector to match the instruction pattern.
25625 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25626 // countS bits are ignored when loading from memory on insertps, which
25627 // means we don't need to explicitly set them to 0.
25628 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25629 LoadScalarToVector, N->getOperand(2));
25632 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25633 // as "sbb reg,reg", since it can be extended without zext and produces
25634 // an all-ones bit which is more useful than 0/1 in some cases.
25635 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25638 return DAG.getNode(ISD::AND, DL, VT,
25639 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25640 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25641 DAG.getConstant(1, VT));
25642 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25643 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25644 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25645 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25648 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25649 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25650 TargetLowering::DAGCombinerInfo &DCI,
25651 const X86Subtarget *Subtarget) {
25653 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25654 SDValue EFLAGS = N->getOperand(1);
25656 if (CC == X86::COND_A) {
25657 // Try to convert COND_A into COND_B in an attempt to facilitate
25658 // materializing "setb reg".
25660 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25661 // cannot take an immediate as its first operand.
25663 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25664 EFLAGS.getValueType().isInteger() &&
25665 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25666 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25667 EFLAGS.getNode()->getVTList(),
25668 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25669 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25670 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25674 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25675 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25677 if (CC == X86::COND_B)
25678 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25682 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25683 if (Flags.getNode()) {
25684 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25685 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25691 // Optimize branch condition evaluation.
25693 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25694 TargetLowering::DAGCombinerInfo &DCI,
25695 const X86Subtarget *Subtarget) {
25697 SDValue Chain = N->getOperand(0);
25698 SDValue Dest = N->getOperand(1);
25699 SDValue EFLAGS = N->getOperand(3);
25700 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25704 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25705 if (Flags.getNode()) {
25706 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25707 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25714 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25715 SelectionDAG &DAG) {
25716 // Take advantage of vector comparisons producing 0 or -1 in each lane to
25717 // optimize away operation when it's from a constant.
25719 // The general transformation is:
25720 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
25721 // AND(VECTOR_CMP(x,y), constant2)
25722 // constant2 = UNARYOP(constant)
25724 // Early exit if this isn't a vector operation, the operand of the
25725 // unary operation isn't a bitwise AND, or if the sizes of the operations
25726 // aren't the same.
25727 EVT VT = N->getValueType(0);
25728 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
25729 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
25730 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
25733 // Now check that the other operand of the AND is a constant. We could
25734 // make the transformation for non-constant splats as well, but it's unclear
25735 // that would be a benefit as it would not eliminate any operations, just
25736 // perform one more step in scalar code before moving to the vector unit.
25737 if (BuildVectorSDNode *BV =
25738 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
25739 // Bail out if the vector isn't a constant.
25740 if (!BV->isConstant())
25743 // Everything checks out. Build up the new and improved node.
25745 EVT IntVT = BV->getValueType(0);
25746 // Create a new constant of the appropriate type for the transformed
25748 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
25749 // The AND node needs bitcasts to/from an integer vector type around it.
25750 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
25751 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
25752 N->getOperand(0)->getOperand(0), MaskConst);
25753 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
25760 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
25761 const X86TargetLowering *XTLI) {
25762 // First try to optimize away the conversion entirely when it's
25763 // conditionally from a constant. Vectors only.
25764 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
25765 if (Res != SDValue())
25768 // Now move on to more general possibilities.
25769 SDValue Op0 = N->getOperand(0);
25770 EVT InVT = Op0->getValueType(0);
25772 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
25773 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
25775 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
25776 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
25777 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
25780 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
25781 // a 32-bit target where SSE doesn't support i64->FP operations.
25782 if (Op0.getOpcode() == ISD::LOAD) {
25783 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
25784 EVT VT = Ld->getValueType(0);
25785 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
25786 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
25787 !XTLI->getSubtarget()->is64Bit() &&
25789 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0),
25790 Ld->getChain(), Op0, DAG);
25791 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
25798 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
25799 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
25800 X86TargetLowering::DAGCombinerInfo &DCI) {
25801 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
25802 // the result is either zero or one (depending on the input carry bit).
25803 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
25804 if (X86::isZeroNode(N->getOperand(0)) &&
25805 X86::isZeroNode(N->getOperand(1)) &&
25806 // We don't have a good way to replace an EFLAGS use, so only do this when
25808 SDValue(N, 1).use_empty()) {
25810 EVT VT = N->getValueType(0);
25811 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
25812 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
25813 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25814 DAG.getConstant(X86::COND_B,MVT::i8),
25816 DAG.getConstant(1, VT));
25817 return DCI.CombineTo(N, Res1, CarryOut);
25823 // fold (add Y, (sete X, 0)) -> adc 0, Y
25824 // (add Y, (setne X, 0)) -> sbb -1, Y
25825 // (sub (sete X, 0), Y) -> sbb 0, Y
25826 // (sub (setne X, 0), Y) -> adc -1, Y
25827 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
25830 // Look through ZExts.
25831 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
25832 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
25835 SDValue SetCC = Ext.getOperand(0);
25836 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
25839 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
25840 if (CC != X86::COND_E && CC != X86::COND_NE)
25843 SDValue Cmp = SetCC.getOperand(1);
25844 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
25845 !X86::isZeroNode(Cmp.getOperand(1)) ||
25846 !Cmp.getOperand(0).getValueType().isInteger())
25849 SDValue CmpOp0 = Cmp.getOperand(0);
25850 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
25851 DAG.getConstant(1, CmpOp0.getValueType()));
25853 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
25854 if (CC == X86::COND_NE)
25855 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
25856 DL, OtherVal.getValueType(), OtherVal,
25857 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
25858 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
25859 DL, OtherVal.getValueType(), OtherVal,
25860 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
25863 /// PerformADDCombine - Do target-specific dag combines on integer adds.
25864 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
25865 const X86Subtarget *Subtarget) {
25866 EVT VT = N->getValueType(0);
25867 SDValue Op0 = N->getOperand(0);
25868 SDValue Op1 = N->getOperand(1);
25870 // Try to synthesize horizontal adds from adds of shuffles.
25871 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25872 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25873 isHorizontalBinOp(Op0, Op1, true))
25874 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
25876 return OptimizeConditionalInDecrement(N, DAG);
25879 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
25880 const X86Subtarget *Subtarget) {
25881 SDValue Op0 = N->getOperand(0);
25882 SDValue Op1 = N->getOperand(1);
25884 // X86 can't encode an immediate LHS of a sub. See if we can push the
25885 // negation into a preceding instruction.
25886 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
25887 // If the RHS of the sub is a XOR with one use and a constant, invert the
25888 // immediate. Then add one to the LHS of the sub so we can turn
25889 // X-Y -> X+~Y+1, saving one register.
25890 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
25891 isa<ConstantSDNode>(Op1.getOperand(1))) {
25892 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
25893 EVT VT = Op0.getValueType();
25894 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
25896 DAG.getConstant(~XorC, VT));
25897 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
25898 DAG.getConstant(C->getAPIntValue()+1, VT));
25902 // Try to synthesize horizontal adds from adds of shuffles.
25903 EVT VT = N->getValueType(0);
25904 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25905 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25906 isHorizontalBinOp(Op0, Op1, true))
25907 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
25909 return OptimizeConditionalInDecrement(N, DAG);
25912 /// performVZEXTCombine - Performs build vector combines
25913 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
25914 TargetLowering::DAGCombinerInfo &DCI,
25915 const X86Subtarget *Subtarget) {
25917 MVT VT = N->getSimpleValueType(0);
25918 SDValue Op = N->getOperand(0);
25919 MVT OpVT = Op.getSimpleValueType();
25920 MVT OpEltVT = OpVT.getVectorElementType();
25921 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
25923 // (vzext (bitcast (vzext (x)) -> (vzext x)
25925 while (V.getOpcode() == ISD::BITCAST)
25926 V = V.getOperand(0);
25928 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
25929 MVT InnerVT = V.getSimpleValueType();
25930 MVT InnerEltVT = InnerVT.getVectorElementType();
25932 // If the element sizes match exactly, we can just do one larger vzext. This
25933 // is always an exact type match as vzext operates on integer types.
25934 if (OpEltVT == InnerEltVT) {
25935 assert(OpVT == InnerVT && "Types must match for vzext!");
25936 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
25939 // The only other way we can combine them is if only a single element of the
25940 // inner vzext is used in the input to the outer vzext.
25941 if (InnerEltVT.getSizeInBits() < InputBits)
25944 // In this case, the inner vzext is completely dead because we're going to
25945 // only look at bits inside of the low element. Just do the outer vzext on
25946 // a bitcast of the input to the inner.
25947 return DAG.getNode(X86ISD::VZEXT, DL, VT,
25948 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
25951 // Check if we can bypass extracting and re-inserting an element of an input
25952 // vector. Essentialy:
25953 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
25954 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
25955 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
25956 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
25957 SDValue ExtractedV = V.getOperand(0);
25958 SDValue OrigV = ExtractedV.getOperand(0);
25959 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
25960 if (ExtractIdx->getZExtValue() == 0) {
25961 MVT OrigVT = OrigV.getSimpleValueType();
25962 // Extract a subvector if necessary...
25963 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
25964 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
25965 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
25966 OrigVT.getVectorNumElements() / Ratio);
25967 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
25968 DAG.getIntPtrConstant(0));
25970 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
25971 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
25978 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
25979 DAGCombinerInfo &DCI) const {
25980 SelectionDAG &DAG = DCI.DAG;
25981 switch (N->getOpcode()) {
25983 case ISD::EXTRACT_VECTOR_ELT:
25984 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
25987 case X86ISD::SHRUNKBLEND:
25988 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
25989 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
25990 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
25991 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
25992 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
25993 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
25996 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
25997 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
25998 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
25999 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26000 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26001 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26002 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26003 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26004 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
26005 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26006 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26008 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26010 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26011 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26012 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26013 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26014 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26015 case ISD::ANY_EXTEND:
26016 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26017 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26018 case ISD::SIGN_EXTEND_INREG:
26019 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26020 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26021 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26022 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26023 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26024 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26025 case X86ISD::SHUFP: // Handle all target specific shuffles
26026 case X86ISD::PALIGNR:
26027 case X86ISD::UNPCKH:
26028 case X86ISD::UNPCKL:
26029 case X86ISD::MOVHLPS:
26030 case X86ISD::MOVLHPS:
26031 case X86ISD::PSHUFB:
26032 case X86ISD::PSHUFD:
26033 case X86ISD::PSHUFHW:
26034 case X86ISD::PSHUFLW:
26035 case X86ISD::MOVSS:
26036 case X86ISD::MOVSD:
26037 case X86ISD::VPERMILPI:
26038 case X86ISD::VPERM2X128:
26039 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26040 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26041 case ISD::INTRINSIC_WO_CHAIN:
26042 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26043 case X86ISD::INSERTPS:
26044 return PerformINSERTPSCombine(N, DAG, Subtarget);
26045 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26051 /// isTypeDesirableForOp - Return true if the target has native support for
26052 /// the specified value type and it is 'desirable' to use the type for the
26053 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26054 /// instruction encodings are longer and some i16 instructions are slow.
26055 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26056 if (!isTypeLegal(VT))
26058 if (VT != MVT::i16)
26065 case ISD::SIGN_EXTEND:
26066 case ISD::ZERO_EXTEND:
26067 case ISD::ANY_EXTEND:
26080 /// IsDesirableToPromoteOp - This method query the target whether it is
26081 /// beneficial for dag combiner to promote the specified node. If true, it
26082 /// should return the desired promotion type by reference.
26083 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26084 EVT VT = Op.getValueType();
26085 if (VT != MVT::i16)
26088 bool Promote = false;
26089 bool Commute = false;
26090 switch (Op.getOpcode()) {
26093 LoadSDNode *LD = cast<LoadSDNode>(Op);
26094 // If the non-extending load has a single use and it's not live out, then it
26095 // might be folded.
26096 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26097 Op.hasOneUse()*/) {
26098 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26099 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26100 // The only case where we'd want to promote LOAD (rather then it being
26101 // promoted as an operand is when it's only use is liveout.
26102 if (UI->getOpcode() != ISD::CopyToReg)
26109 case ISD::SIGN_EXTEND:
26110 case ISD::ZERO_EXTEND:
26111 case ISD::ANY_EXTEND:
26116 SDValue N0 = Op.getOperand(0);
26117 // Look out for (store (shl (load), x)).
26118 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26131 SDValue N0 = Op.getOperand(0);
26132 SDValue N1 = Op.getOperand(1);
26133 if (!Commute && MayFoldLoad(N1))
26135 // Avoid disabling potential load folding opportunities.
26136 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26138 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26148 //===----------------------------------------------------------------------===//
26149 // X86 Inline Assembly Support
26150 //===----------------------------------------------------------------------===//
26153 // Helper to match a string separated by whitespace.
26154 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26155 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26157 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26158 StringRef piece(*args[i]);
26159 if (!s.startswith(piece)) // Check if the piece matches.
26162 s = s.substr(piece.size());
26163 StringRef::size_type pos = s.find_first_not_of(" \t");
26164 if (pos == 0) // We matched a prefix.
26172 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26175 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26177 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26178 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26179 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26180 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26182 if (AsmPieces.size() == 3)
26184 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26191 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26192 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26194 std::string AsmStr = IA->getAsmString();
26196 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26197 if (!Ty || Ty->getBitWidth() % 16 != 0)
26200 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26201 SmallVector<StringRef, 4> AsmPieces;
26202 SplitString(AsmStr, AsmPieces, ";\n");
26204 switch (AsmPieces.size()) {
26205 default: return false;
26207 // FIXME: this should verify that we are targeting a 486 or better. If not,
26208 // we will turn this bswap into something that will be lowered to logical
26209 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26210 // lower so don't worry about this.
26212 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26213 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26214 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26215 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26216 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26217 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26218 // No need to check constraints, nothing other than the equivalent of
26219 // "=r,0" would be valid here.
26220 return IntrinsicLowering::LowerToByteSwap(CI);
26223 // rorw $$8, ${0:w} --> llvm.bswap.i16
26224 if (CI->getType()->isIntegerTy(16) &&
26225 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26226 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26227 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26229 const std::string &ConstraintsStr = IA->getConstraintString();
26230 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26231 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26232 if (clobbersFlagRegisters(AsmPieces))
26233 return IntrinsicLowering::LowerToByteSwap(CI);
26237 if (CI->getType()->isIntegerTy(32) &&
26238 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26239 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26240 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26241 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26243 const std::string &ConstraintsStr = IA->getConstraintString();
26244 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26245 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26246 if (clobbersFlagRegisters(AsmPieces))
26247 return IntrinsicLowering::LowerToByteSwap(CI);
26250 if (CI->getType()->isIntegerTy(64)) {
26251 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26252 if (Constraints.size() >= 2 &&
26253 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26254 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26255 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26256 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26257 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26258 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26259 return IntrinsicLowering::LowerToByteSwap(CI);
26267 /// getConstraintType - Given a constraint letter, return the type of
26268 /// constraint it is for this target.
26269 X86TargetLowering::ConstraintType
26270 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26271 if (Constraint.size() == 1) {
26272 switch (Constraint[0]) {
26283 return C_RegisterClass;
26307 return TargetLowering::getConstraintType(Constraint);
26310 /// Examine constraint type and operand type and determine a weight value.
26311 /// This object must already have been set up with the operand type
26312 /// and the current alternative constraint selected.
26313 TargetLowering::ConstraintWeight
26314 X86TargetLowering::getSingleConstraintMatchWeight(
26315 AsmOperandInfo &info, const char *constraint) const {
26316 ConstraintWeight weight = CW_Invalid;
26317 Value *CallOperandVal = info.CallOperandVal;
26318 // If we don't have a value, we can't do a match,
26319 // but allow it at the lowest weight.
26320 if (!CallOperandVal)
26322 Type *type = CallOperandVal->getType();
26323 // Look at the constraint type.
26324 switch (*constraint) {
26326 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26337 if (CallOperandVal->getType()->isIntegerTy())
26338 weight = CW_SpecificReg;
26343 if (type->isFloatingPointTy())
26344 weight = CW_SpecificReg;
26347 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26348 weight = CW_SpecificReg;
26352 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26353 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26354 weight = CW_Register;
26357 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26358 if (C->getZExtValue() <= 31)
26359 weight = CW_Constant;
26363 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26364 if (C->getZExtValue() <= 63)
26365 weight = CW_Constant;
26369 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26370 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26371 weight = CW_Constant;
26375 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26376 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26377 weight = CW_Constant;
26381 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26382 if (C->getZExtValue() <= 3)
26383 weight = CW_Constant;
26387 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26388 if (C->getZExtValue() <= 0xff)
26389 weight = CW_Constant;
26394 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26395 weight = CW_Constant;
26399 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26400 if ((C->getSExtValue() >= -0x80000000LL) &&
26401 (C->getSExtValue() <= 0x7fffffffLL))
26402 weight = CW_Constant;
26406 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26407 if (C->getZExtValue() <= 0xffffffff)
26408 weight = CW_Constant;
26415 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26416 /// with another that has more specific requirements based on the type of the
26417 /// corresponding operand.
26418 const char *X86TargetLowering::
26419 LowerXConstraint(EVT ConstraintVT) const {
26420 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26421 // 'f' like normal targets.
26422 if (ConstraintVT.isFloatingPoint()) {
26423 if (Subtarget->hasSSE2())
26425 if (Subtarget->hasSSE1())
26429 return TargetLowering::LowerXConstraint(ConstraintVT);
26432 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26433 /// vector. If it is invalid, don't add anything to Ops.
26434 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26435 std::string &Constraint,
26436 std::vector<SDValue>&Ops,
26437 SelectionDAG &DAG) const {
26440 // Only support length 1 constraints for now.
26441 if (Constraint.length() > 1) return;
26443 char ConstraintLetter = Constraint[0];
26444 switch (ConstraintLetter) {
26447 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26448 if (C->getZExtValue() <= 31) {
26449 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26455 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26456 if (C->getZExtValue() <= 63) {
26457 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26463 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26464 if (isInt<8>(C->getSExtValue())) {
26465 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26471 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26472 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26473 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26474 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26480 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26481 if (C->getZExtValue() <= 3) {
26482 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26488 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26489 if (C->getZExtValue() <= 255) {
26490 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26496 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26497 if (C->getZExtValue() <= 127) {
26498 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26504 // 32-bit signed value
26505 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26506 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26507 C->getSExtValue())) {
26508 // Widen to 64 bits here to get it sign extended.
26509 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26512 // FIXME gcc accepts some relocatable values here too, but only in certain
26513 // memory models; it's complicated.
26518 // 32-bit unsigned value
26519 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26520 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26521 C->getZExtValue())) {
26522 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26526 // FIXME gcc accepts some relocatable values here too, but only in certain
26527 // memory models; it's complicated.
26531 // Literal immediates are always ok.
26532 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26533 // Widen to 64 bits here to get it sign extended.
26534 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26538 // In any sort of PIC mode addresses need to be computed at runtime by
26539 // adding in a register or some sort of table lookup. These can't
26540 // be used as immediates.
26541 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26544 // If we are in non-pic codegen mode, we allow the address of a global (with
26545 // an optional displacement) to be used with 'i'.
26546 GlobalAddressSDNode *GA = nullptr;
26547 int64_t Offset = 0;
26549 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26551 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26552 Offset += GA->getOffset();
26554 } else if (Op.getOpcode() == ISD::ADD) {
26555 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26556 Offset += C->getZExtValue();
26557 Op = Op.getOperand(0);
26560 } else if (Op.getOpcode() == ISD::SUB) {
26561 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26562 Offset += -C->getZExtValue();
26563 Op = Op.getOperand(0);
26568 // Otherwise, this isn't something we can handle, reject it.
26572 const GlobalValue *GV = GA->getGlobal();
26573 // If we require an extra load to get this address, as in PIC mode, we
26574 // can't accept it.
26575 if (isGlobalStubReference(
26576 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26579 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26580 GA->getValueType(0), Offset);
26585 if (Result.getNode()) {
26586 Ops.push_back(Result);
26589 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26592 std::pair<unsigned, const TargetRegisterClass*>
26593 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26595 // First, see if this is a constraint that directly corresponds to an LLVM
26597 if (Constraint.size() == 1) {
26598 // GCC Constraint Letters
26599 switch (Constraint[0]) {
26601 // TODO: Slight differences here in allocation order and leaving
26602 // RIP in the class. Do they matter any more here than they do
26603 // in the normal allocation?
26604 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26605 if (Subtarget->is64Bit()) {
26606 if (VT == MVT::i32 || VT == MVT::f32)
26607 return std::make_pair(0U, &X86::GR32RegClass);
26608 if (VT == MVT::i16)
26609 return std::make_pair(0U, &X86::GR16RegClass);
26610 if (VT == MVT::i8 || VT == MVT::i1)
26611 return std::make_pair(0U, &X86::GR8RegClass);
26612 if (VT == MVT::i64 || VT == MVT::f64)
26613 return std::make_pair(0U, &X86::GR64RegClass);
26616 // 32-bit fallthrough
26617 case 'Q': // Q_REGS
26618 if (VT == MVT::i32 || VT == MVT::f32)
26619 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26620 if (VT == MVT::i16)
26621 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26622 if (VT == MVT::i8 || VT == MVT::i1)
26623 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26624 if (VT == MVT::i64)
26625 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26627 case 'r': // GENERAL_REGS
26628 case 'l': // INDEX_REGS
26629 if (VT == MVT::i8 || VT == MVT::i1)
26630 return std::make_pair(0U, &X86::GR8RegClass);
26631 if (VT == MVT::i16)
26632 return std::make_pair(0U, &X86::GR16RegClass);
26633 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26634 return std::make_pair(0U, &X86::GR32RegClass);
26635 return std::make_pair(0U, &X86::GR64RegClass);
26636 case 'R': // LEGACY_REGS
26637 if (VT == MVT::i8 || VT == MVT::i1)
26638 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26639 if (VT == MVT::i16)
26640 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26641 if (VT == MVT::i32 || !Subtarget->is64Bit())
26642 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26643 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26644 case 'f': // FP Stack registers.
26645 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26646 // value to the correct fpstack register class.
26647 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26648 return std::make_pair(0U, &X86::RFP32RegClass);
26649 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26650 return std::make_pair(0U, &X86::RFP64RegClass);
26651 return std::make_pair(0U, &X86::RFP80RegClass);
26652 case 'y': // MMX_REGS if MMX allowed.
26653 if (!Subtarget->hasMMX()) break;
26654 return std::make_pair(0U, &X86::VR64RegClass);
26655 case 'Y': // SSE_REGS if SSE2 allowed
26656 if (!Subtarget->hasSSE2()) break;
26658 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26659 if (!Subtarget->hasSSE1()) break;
26661 switch (VT.SimpleTy) {
26663 // Scalar SSE types.
26666 return std::make_pair(0U, &X86::FR32RegClass);
26669 return std::make_pair(0U, &X86::FR64RegClass);
26677 return std::make_pair(0U, &X86::VR128RegClass);
26685 return std::make_pair(0U, &X86::VR256RegClass);
26690 return std::make_pair(0U, &X86::VR512RegClass);
26696 // Use the default implementation in TargetLowering to convert the register
26697 // constraint into a member of a register class.
26698 std::pair<unsigned, const TargetRegisterClass*> Res;
26699 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26701 // Not found as a standard register?
26703 // Map st(0) -> st(7) -> ST0
26704 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26705 tolower(Constraint[1]) == 's' &&
26706 tolower(Constraint[2]) == 't' &&
26707 Constraint[3] == '(' &&
26708 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26709 Constraint[5] == ')' &&
26710 Constraint[6] == '}') {
26712 Res.first = X86::FP0+Constraint[4]-'0';
26713 Res.second = &X86::RFP80RegClass;
26717 // GCC allows "st(0)" to be called just plain "st".
26718 if (StringRef("{st}").equals_lower(Constraint)) {
26719 Res.first = X86::FP0;
26720 Res.second = &X86::RFP80RegClass;
26725 if (StringRef("{flags}").equals_lower(Constraint)) {
26726 Res.first = X86::EFLAGS;
26727 Res.second = &X86::CCRRegClass;
26731 // 'A' means EAX + EDX.
26732 if (Constraint == "A") {
26733 Res.first = X86::EAX;
26734 Res.second = &X86::GR32_ADRegClass;
26740 // Otherwise, check to see if this is a register class of the wrong value
26741 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
26742 // turn into {ax},{dx}.
26743 if (Res.second->hasType(VT))
26744 return Res; // Correct type already, nothing to do.
26746 // All of the single-register GCC register classes map their values onto
26747 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
26748 // really want an 8-bit or 32-bit register, map to the appropriate register
26749 // class and return the appropriate register.
26750 if (Res.second == &X86::GR16RegClass) {
26751 if (VT == MVT::i8 || VT == MVT::i1) {
26752 unsigned DestReg = 0;
26753 switch (Res.first) {
26755 case X86::AX: DestReg = X86::AL; break;
26756 case X86::DX: DestReg = X86::DL; break;
26757 case X86::CX: DestReg = X86::CL; break;
26758 case X86::BX: DestReg = X86::BL; break;
26761 Res.first = DestReg;
26762 Res.second = &X86::GR8RegClass;
26764 } else if (VT == MVT::i32 || VT == MVT::f32) {
26765 unsigned DestReg = 0;
26766 switch (Res.first) {
26768 case X86::AX: DestReg = X86::EAX; break;
26769 case X86::DX: DestReg = X86::EDX; break;
26770 case X86::CX: DestReg = X86::ECX; break;
26771 case X86::BX: DestReg = X86::EBX; break;
26772 case X86::SI: DestReg = X86::ESI; break;
26773 case X86::DI: DestReg = X86::EDI; break;
26774 case X86::BP: DestReg = X86::EBP; break;
26775 case X86::SP: DestReg = X86::ESP; break;
26778 Res.first = DestReg;
26779 Res.second = &X86::GR32RegClass;
26781 } else if (VT == MVT::i64 || VT == MVT::f64) {
26782 unsigned DestReg = 0;
26783 switch (Res.first) {
26785 case X86::AX: DestReg = X86::RAX; break;
26786 case X86::DX: DestReg = X86::RDX; break;
26787 case X86::CX: DestReg = X86::RCX; break;
26788 case X86::BX: DestReg = X86::RBX; break;
26789 case X86::SI: DestReg = X86::RSI; break;
26790 case X86::DI: DestReg = X86::RDI; break;
26791 case X86::BP: DestReg = X86::RBP; break;
26792 case X86::SP: DestReg = X86::RSP; break;
26795 Res.first = DestReg;
26796 Res.second = &X86::GR64RegClass;
26799 } else if (Res.second == &X86::FR32RegClass ||
26800 Res.second == &X86::FR64RegClass ||
26801 Res.second == &X86::VR128RegClass ||
26802 Res.second == &X86::VR256RegClass ||
26803 Res.second == &X86::FR32XRegClass ||
26804 Res.second == &X86::FR64XRegClass ||
26805 Res.second == &X86::VR128XRegClass ||
26806 Res.second == &X86::VR256XRegClass ||
26807 Res.second == &X86::VR512RegClass) {
26808 // Handle references to XMM physical registers that got mapped into the
26809 // wrong class. This can happen with constraints like {xmm0} where the
26810 // target independent register mapper will just pick the first match it can
26811 // find, ignoring the required type.
26813 if (VT == MVT::f32 || VT == MVT::i32)
26814 Res.second = &X86::FR32RegClass;
26815 else if (VT == MVT::f64 || VT == MVT::i64)
26816 Res.second = &X86::FR64RegClass;
26817 else if (X86::VR128RegClass.hasType(VT))
26818 Res.second = &X86::VR128RegClass;
26819 else if (X86::VR256RegClass.hasType(VT))
26820 Res.second = &X86::VR256RegClass;
26821 else if (X86::VR512RegClass.hasType(VT))
26822 Res.second = &X86::VR512RegClass;
26828 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
26830 // Scaling factors are not free at all.
26831 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
26832 // will take 2 allocations in the out of order engine instead of 1
26833 // for plain addressing mode, i.e. inst (reg1).
26835 // vaddps (%rsi,%drx), %ymm0, %ymm1
26836 // Requires two allocations (one for the load, one for the computation)
26838 // vaddps (%rsi), %ymm0, %ymm1
26839 // Requires just 1 allocation, i.e., freeing allocations for other operations
26840 // and having less micro operations to execute.
26842 // For some X86 architectures, this is even worse because for instance for
26843 // stores, the complex addressing mode forces the instruction to use the
26844 // "load" ports instead of the dedicated "store" port.
26845 // E.g., on Haswell:
26846 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
26847 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
26848 if (isLegalAddressingMode(AM, Ty))
26849 // Scale represents reg2 * scale, thus account for 1
26850 // as soon as we use a second register.
26851 return AM.Scale != 0;
26855 bool X86TargetLowering::isTargetFTOL() const {
26856 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();