2 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
4 // The LLVM Compiler Infrastructure
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file defines the interfaces that X86 uses to lower LLVM code into a
14 //===----------------------------------------------------------------------===//
16 #include "X86ISelLowering.h"
17 #include "Utils/X86ShuffleDecode.h"
18 #include "X86CallingConv.h"
19 #include "X86FrameLowering.h"
20 #include "X86InstrBuilder.h"
21 #include "X86IntrinsicsInfo.h"
22 #include "X86MachineFunctionInfo.h"
23 #include "X86ShuffleDecodeConstantPool.h"
24 #include "X86TargetMachine.h"
25 #include "X86TargetObjectFile.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/ADT/StringSwitch.h"
31 #include "llvm/Analysis/EHPersonalities.h"
32 #include "llvm/CodeGen/IntrinsicLowering.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineJumpTableInfo.h"
37 #include "llvm/CodeGen/MachineModuleInfo.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/WinEHFuncInfo.h"
40 #include "llvm/IR/CallSite.h"
41 #include "llvm/IR/CallingConv.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalVariable.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/Intrinsics.h"
50 #include "llvm/MC/MCAsmInfo.h"
51 #include "llvm/MC/MCContext.h"
52 #include "llvm/MC/MCExpr.h"
53 #include "llvm/MC/MCSymbol.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/KnownBits.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Target/TargetLowering.h"
60 #include "llvm/Target/TargetOptions.h"
67 #define DEBUG_TYPE "x86-isel"
69 STATISTIC(NumTailCalls, "Number of tail calls");
71 static cl::opt<bool> ExperimentalVectorWideningLegalization(
72 "x86-experimental-vector-widening-legalization", cl::init(false),
73 cl::desc("Enable an experimental vector type legalization through widening "
74 "rather than promotion."),
77 static cl::opt<int> ExperimentalPrefLoopAlignment(
78 "x86-experimental-pref-loop-alignment", cl::init(4),
79 cl::desc("Sets the preferable loop alignment for experiments "
80 "(the last x86-experimental-pref-loop-alignment bits"
81 " of the loop header PC will be 0)."),
84 /// Call this when the user attempts to do something unsupported, like
85 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
86 /// report_fatal_error, so calling code should attempt to recover without
88 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
90 MachineFunction &MF = DAG.getMachineFunction();
91 DAG.getContext()->diagnose(
92 DiagnosticInfoUnsupported(*MF.getFunction(), Msg, dl.getDebugLoc()));
95 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
96 const X86Subtarget &STI)
97 : TargetLowering(TM), Subtarget(STI) {
98 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
99 X86ScalarSSEf64 = Subtarget.hasSSE2();
100 X86ScalarSSEf32 = Subtarget.hasSSE1();
101 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
103 // Set up the TargetLowering object.
105 // X86 is weird. It always uses i8 for shift amounts and setcc results.
106 setBooleanContents(ZeroOrOneBooleanContent);
107 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
108 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
110 // For 64-bit, since we have so many registers, use the ILP scheduler.
111 // For 32-bit, use the register pressure specific scheduling.
112 // For Atom, always use ILP scheduling.
113 if (Subtarget.isAtom())
114 setSchedulingPreference(Sched::ILP);
115 else if (Subtarget.is64Bit())
116 setSchedulingPreference(Sched::ILP);
118 setSchedulingPreference(Sched::RegPressure);
119 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
120 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
122 // Bypass expensive divides and use cheaper ones.
123 if (TM.getOptLevel() >= CodeGenOpt::Default) {
124 if (Subtarget.hasSlowDivide32())
125 addBypassSlowDiv(32, 8);
126 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
127 addBypassSlowDiv(64, 32);
130 if (Subtarget.isTargetKnownWindowsMSVC() ||
131 Subtarget.isTargetWindowsItanium()) {
132 // Setup Windows compiler runtime calls.
133 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
134 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
135 setLibcallName(RTLIB::SREM_I64, "_allrem");
136 setLibcallName(RTLIB::UREM_I64, "_aullrem");
137 setLibcallName(RTLIB::MUL_I64, "_allmul");
138 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
139 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
140 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
141 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
142 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
145 if (Subtarget.isTargetDarwin()) {
146 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
147 setUseUnderscoreSetJmp(false);
148 setUseUnderscoreLongJmp(false);
149 } else if (Subtarget.isTargetWindowsGNU()) {
150 // MS runtime is weird: it exports _setjmp, but longjmp!
151 setUseUnderscoreSetJmp(true);
152 setUseUnderscoreLongJmp(false);
154 setUseUnderscoreSetJmp(true);
155 setUseUnderscoreLongJmp(true);
158 // Set up the register classes.
159 addRegisterClass(MVT::i8, &X86::GR8RegClass);
160 addRegisterClass(MVT::i16, &X86::GR16RegClass);
161 addRegisterClass(MVT::i32, &X86::GR32RegClass);
162 if (Subtarget.is64Bit())
163 addRegisterClass(MVT::i64, &X86::GR64RegClass);
165 for (MVT VT : MVT::integer_valuetypes())
166 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
168 // We don't accept any truncstore of integer registers.
169 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
170 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
171 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
172 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
173 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
174 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
176 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
178 // SETOEQ and SETUNE require checking two conditions.
179 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
180 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
181 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
182 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
183 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
184 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
186 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
188 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
189 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
190 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
192 if (Subtarget.is64Bit()) {
193 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
194 // f32/f64 are legal, f80 is custom.
195 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
197 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
198 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
199 } else if (!Subtarget.useSoftFloat()) {
200 // We have an algorithm for SSE2->double, and we turn this into a
201 // 64-bit FILD followed by conditional FADD for other targets.
202 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
203 // We have an algorithm for SSE2, and we turn this into a 64-bit
204 // FILD or VCVTUSI2SS/SD for other targets.
205 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
208 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
210 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
211 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
213 if (!Subtarget.useSoftFloat()) {
214 // SSE has no i16 to fp conversion, only i32.
215 if (X86ScalarSSEf32) {
216 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
217 // f32 and f64 cases are Legal, f80 case is not
218 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
220 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
221 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
224 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
225 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
228 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
230 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
231 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
233 if (!Subtarget.useSoftFloat()) {
234 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
235 // are Legal, f80 is custom lowered.
236 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
237 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
239 if (X86ScalarSSEf32) {
240 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
241 // f32 and f64 cases are Legal, f80 case is not
242 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
244 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
245 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
248 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
249 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
250 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
253 // Handle FP_TO_UINT by promoting the destination to a larger signed
255 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
256 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
257 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
259 if (Subtarget.is64Bit()) {
260 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
261 // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
262 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
263 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
265 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
266 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
268 } else if (!Subtarget.useSoftFloat()) {
269 // Since AVX is a superset of SSE3, only check for SSE here.
270 if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
271 // Expand FP_TO_UINT into a select.
272 // FIXME: We would like to use a Custom expander here eventually to do
273 // the optimal thing for SSE vs. the default expansion in the legalizer.
274 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
276 // With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
277 // With SSE3 we can use fisttpll to convert to a signed i64; without
278 // SSE, we're stuck with a fistpll.
279 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
281 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
284 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
285 if (!X86ScalarSSEf64) {
286 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
287 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
288 if (Subtarget.is64Bit()) {
289 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
290 // Without SSE, i64->f64 goes through memory.
291 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
293 } else if (!Subtarget.is64Bit())
294 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
296 // Scalar integer divide and remainder are lowered to use operations that
297 // produce two results, to match the available instructions. This exposes
298 // the two-result form to trivial CSE, which is able to combine x/y and x%y
299 // into a single instruction.
301 // Scalar integer multiply-high is also lowered to use two-result
302 // operations, to match the available instructions. However, plain multiply
303 // (low) operations are left as Legal, as there are single-result
304 // instructions for this in x86. Using the two-result multiply instructions
305 // when both high and low results are needed must be arranged by dagcombine.
306 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
307 setOperationAction(ISD::MULHS, VT, Expand);
308 setOperationAction(ISD::MULHU, VT, Expand);
309 setOperationAction(ISD::SDIV, VT, Expand);
310 setOperationAction(ISD::UDIV, VT, Expand);
311 setOperationAction(ISD::SREM, VT, Expand);
312 setOperationAction(ISD::UREM, VT, Expand);
315 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
316 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
317 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
318 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
319 setOperationAction(ISD::BR_CC, VT, Expand);
320 setOperationAction(ISD::SELECT_CC, VT, Expand);
322 if (Subtarget.is64Bit())
323 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
324 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
325 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
327 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
329 setOperationAction(ISD::FREM , MVT::f32 , Expand);
330 setOperationAction(ISD::FREM , MVT::f64 , Expand);
331 setOperationAction(ISD::FREM , MVT::f80 , Expand);
332 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
334 // Promote the i8 variants and force them on up to i32 which has a shorter
336 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
337 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
338 if (!Subtarget.hasBMI()) {
339 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
340 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
341 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
342 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
343 if (Subtarget.is64Bit()) {
344 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
345 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
349 if (Subtarget.hasLZCNT()) {
350 // When promoting the i8 variants, force them to i32 for a shorter
352 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
353 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
355 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
356 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
357 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
358 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
359 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
360 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
361 if (Subtarget.is64Bit()) {
362 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
363 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
367 // Special handling for half-precision floating point conversions.
368 // If we don't have F16C support, then lower half float conversions
369 // into library calls.
370 if (Subtarget.useSoftFloat() ||
371 (!Subtarget.hasF16C() && !Subtarget.hasAVX512())) {
372 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
373 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
376 // There's never any support for operations beyond MVT::f32.
377 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
378 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
379 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
380 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
382 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
383 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
384 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
385 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
386 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
387 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
389 if (Subtarget.hasPOPCNT()) {
390 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
392 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
393 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
394 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
395 if (Subtarget.is64Bit())
396 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
399 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
401 if (!Subtarget.hasMOVBE())
402 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
404 // These should be promoted to a larger select which is supported.
405 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
406 // X86 wants to expand cmov itself.
407 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
408 setOperationAction(ISD::SELECT, VT, Custom);
409 setOperationAction(ISD::SETCC, VT, Custom);
411 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
412 if (VT == MVT::i64 && !Subtarget.is64Bit())
414 setOperationAction(ISD::SELECT, VT, Custom);
415 setOperationAction(ISD::SETCC, VT, Custom);
417 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
418 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
419 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
420 // support continuation, user-level threading, and etc.. As a result, no
421 // other SjLj exception interfaces are implemented and please don't build
422 // your own exception handling based on them.
423 // LLVM/Clang supports zero-cost DWARF exception handling.
424 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
425 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
426 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
427 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
428 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
431 for (auto VT : { MVT::i32, MVT::i64 }) {
432 if (VT == MVT::i64 && !Subtarget.is64Bit())
434 setOperationAction(ISD::ConstantPool , VT, Custom);
435 setOperationAction(ISD::JumpTable , VT, Custom);
436 setOperationAction(ISD::GlobalAddress , VT, Custom);
437 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
438 setOperationAction(ISD::ExternalSymbol , VT, Custom);
439 setOperationAction(ISD::BlockAddress , VT, Custom);
442 // 64-bit shl, sra, srl (iff 32-bit x86)
443 for (auto VT : { MVT::i32, MVT::i64 }) {
444 if (VT == MVT::i64 && !Subtarget.is64Bit())
446 setOperationAction(ISD::SHL_PARTS, VT, Custom);
447 setOperationAction(ISD::SRA_PARTS, VT, Custom);
448 setOperationAction(ISD::SRL_PARTS, VT, Custom);
451 if (Subtarget.hasSSE1())
452 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
454 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
456 // Expand certain atomics
457 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
458 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
459 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
460 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
461 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
462 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
463 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
464 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
467 if (Subtarget.hasCmpxchg16b()) {
468 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
471 // FIXME - use subtarget debug flags
472 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
473 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
474 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
475 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
478 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
479 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
481 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
482 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
484 setOperationAction(ISD::TRAP, MVT::Other, Legal);
485 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
487 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
488 setOperationAction(ISD::VASTART , MVT::Other, Custom);
489 setOperationAction(ISD::VAEND , MVT::Other, Expand);
490 bool Is64Bit = Subtarget.is64Bit();
491 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
492 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
494 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
495 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
497 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
499 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
500 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
501 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
503 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
504 // f32 and f64 use SSE.
505 // Set up the FP register classes.
506 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
507 : &X86::FR32RegClass);
508 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
509 : &X86::FR64RegClass);
511 for (auto VT : { MVT::f32, MVT::f64 }) {
512 // Use ANDPD to simulate FABS.
513 setOperationAction(ISD::FABS, VT, Custom);
515 // Use XORP to simulate FNEG.
516 setOperationAction(ISD::FNEG, VT, Custom);
518 // Use ANDPD and ORPD to simulate FCOPYSIGN.
519 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
521 // We don't support sin/cos/fmod
522 setOperationAction(ISD::FSIN , VT, Expand);
523 setOperationAction(ISD::FCOS , VT, Expand);
524 setOperationAction(ISD::FSINCOS, VT, Expand);
527 // Lower this to MOVMSK plus an AND.
528 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
529 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
531 // Expand FP immediates into loads from the stack, except for the special
533 addLegalFPImmediate(APFloat(+0.0)); // xorpd
534 addLegalFPImmediate(APFloat(+0.0f)); // xorps
535 } else if (UseX87 && X86ScalarSSEf32) {
536 // Use SSE for f32, x87 for f64.
537 // Set up the FP register classes.
538 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
539 : &X86::FR32RegClass);
540 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
542 // Use ANDPS to simulate FABS.
543 setOperationAction(ISD::FABS , MVT::f32, Custom);
545 // Use XORP to simulate FNEG.
546 setOperationAction(ISD::FNEG , MVT::f32, Custom);
548 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
550 // Use ANDPS and ORPS to simulate FCOPYSIGN.
551 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
552 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
554 // We don't support sin/cos/fmod
555 setOperationAction(ISD::FSIN , MVT::f32, Expand);
556 setOperationAction(ISD::FCOS , MVT::f32, Expand);
557 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
559 // Special cases we handle for FP constants.
560 addLegalFPImmediate(APFloat(+0.0f)); // xorps
561 addLegalFPImmediate(APFloat(+0.0)); // FLD0
562 addLegalFPImmediate(APFloat(+1.0)); // FLD1
563 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
564 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
566 if (!TM.Options.UnsafeFPMath) {
567 setOperationAction(ISD::FSIN , MVT::f64, Expand);
568 setOperationAction(ISD::FCOS , MVT::f64, Expand);
569 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
572 // f32 and f64 in x87.
573 // Set up the FP register classes.
574 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
575 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
577 for (auto VT : { MVT::f32, MVT::f64 }) {
578 setOperationAction(ISD::UNDEF, VT, Expand);
579 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
581 if (!TM.Options.UnsafeFPMath) {
582 setOperationAction(ISD::FSIN , VT, Expand);
583 setOperationAction(ISD::FCOS , VT, Expand);
584 setOperationAction(ISD::FSINCOS, VT, Expand);
587 addLegalFPImmediate(APFloat(+0.0)); // FLD0
588 addLegalFPImmediate(APFloat(+1.0)); // FLD1
589 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
590 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
591 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
592 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
593 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
594 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
597 // We don't support FMA.
598 setOperationAction(ISD::FMA, MVT::f64, Expand);
599 setOperationAction(ISD::FMA, MVT::f32, Expand);
601 // Long double always uses X87, except f128 in MMX.
603 if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
604 addRegisterClass(MVT::f128, &X86::FR128RegClass);
605 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
606 setOperationAction(ISD::FABS , MVT::f128, Custom);
607 setOperationAction(ISD::FNEG , MVT::f128, Custom);
608 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
611 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
612 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
613 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
615 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
616 addLegalFPImmediate(TmpFlt); // FLD0
618 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
621 APFloat TmpFlt2(+1.0);
622 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
624 addLegalFPImmediate(TmpFlt2); // FLD1
625 TmpFlt2.changeSign();
626 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
629 if (!TM.Options.UnsafeFPMath) {
630 setOperationAction(ISD::FSIN , MVT::f80, Expand);
631 setOperationAction(ISD::FCOS , MVT::f80, Expand);
632 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
635 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
636 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
637 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
638 setOperationAction(ISD::FRINT, MVT::f80, Expand);
639 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
640 setOperationAction(ISD::FMA, MVT::f80, Expand);
643 // Always use a library call for pow.
644 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
645 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
646 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
648 setOperationAction(ISD::FLOG, MVT::f80, Expand);
649 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
650 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
651 setOperationAction(ISD::FEXP, MVT::f80, Expand);
652 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
653 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
654 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
656 // Some FP actions are always expanded for vector types.
657 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
658 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
659 setOperationAction(ISD::FSIN, VT, Expand);
660 setOperationAction(ISD::FSINCOS, VT, Expand);
661 setOperationAction(ISD::FCOS, VT, Expand);
662 setOperationAction(ISD::FREM, VT, Expand);
663 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
664 setOperationAction(ISD::FPOW, VT, Expand);
665 setOperationAction(ISD::FLOG, VT, Expand);
666 setOperationAction(ISD::FLOG2, VT, Expand);
667 setOperationAction(ISD::FLOG10, VT, Expand);
668 setOperationAction(ISD::FEXP, VT, Expand);
669 setOperationAction(ISD::FEXP2, VT, Expand);
672 // First set operation action for all vector types to either promote
673 // (for widening) or expand (for scalarization). Then we will selectively
674 // turn on ones that can be effectively codegen'd.
675 for (MVT VT : MVT::vector_valuetypes()) {
676 setOperationAction(ISD::SDIV, VT, Expand);
677 setOperationAction(ISD::UDIV, VT, Expand);
678 setOperationAction(ISD::SREM, VT, Expand);
679 setOperationAction(ISD::UREM, VT, Expand);
680 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
681 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
682 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
683 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
684 setOperationAction(ISD::FMA, VT, Expand);
685 setOperationAction(ISD::FFLOOR, VT, Expand);
686 setOperationAction(ISD::FCEIL, VT, Expand);
687 setOperationAction(ISD::FTRUNC, VT, Expand);
688 setOperationAction(ISD::FRINT, VT, Expand);
689 setOperationAction(ISD::FNEARBYINT, VT, Expand);
690 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
691 setOperationAction(ISD::MULHS, VT, Expand);
692 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
693 setOperationAction(ISD::MULHU, VT, Expand);
694 setOperationAction(ISD::SDIVREM, VT, Expand);
695 setOperationAction(ISD::UDIVREM, VT, Expand);
696 setOperationAction(ISD::CTPOP, VT, Expand);
697 setOperationAction(ISD::CTTZ, VT, Expand);
698 setOperationAction(ISD::CTLZ, VT, Expand);
699 setOperationAction(ISD::ROTL, VT, Expand);
700 setOperationAction(ISD::ROTR, VT, Expand);
701 setOperationAction(ISD::BSWAP, VT, Expand);
702 setOperationAction(ISD::SETCC, VT, Expand);
703 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
704 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
705 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
706 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
707 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
708 setOperationAction(ISD::TRUNCATE, VT, Expand);
709 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
710 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
711 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
712 setOperationAction(ISD::SELECT_CC, VT, Expand);
713 for (MVT InnerVT : MVT::vector_valuetypes()) {
714 setTruncStoreAction(InnerVT, VT, Expand);
716 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
717 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
719 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
720 // types, we have to deal with them whether we ask for Expansion or not.
721 // Setting Expand causes its own optimisation problems though, so leave
723 if (VT.getVectorElementType() == MVT::i1)
724 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
726 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
727 // split/scalarized right now.
728 if (VT.getVectorElementType() == MVT::f16)
729 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
733 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
734 // with -msoft-float, disable use of MMX as well.
735 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
736 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
737 // No operations on x86mmx supported, everything uses intrinsics.
740 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
741 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
742 : &X86::VR128RegClass);
744 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
745 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
746 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
747 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
748 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
749 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
750 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
751 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
752 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
755 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
756 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
757 : &X86::VR128RegClass);
759 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
760 // registers cannot be used even for integer operations.
761 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
762 : &X86::VR128RegClass);
763 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
764 : &X86::VR128RegClass);
765 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
766 : &X86::VR128RegClass);
767 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
768 : &X86::VR128RegClass);
770 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
771 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
772 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
773 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
774 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
775 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
776 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
777 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
778 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
779 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
780 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
781 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
782 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
784 setOperationAction(ISD::SMAX, MVT::v8i16, Legal);
785 setOperationAction(ISD::UMAX, MVT::v16i8, Legal);
786 setOperationAction(ISD::SMIN, MVT::v8i16, Legal);
787 setOperationAction(ISD::UMIN, MVT::v16i8, Legal);
789 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
790 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
791 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
793 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
794 setOperationAction(ISD::SETCC, VT, Custom);
795 setOperationAction(ISD::CTPOP, VT, Custom);
796 setOperationAction(ISD::CTTZ, VT, Custom);
799 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
800 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
801 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
802 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
803 setOperationAction(ISD::VSELECT, VT, Custom);
804 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
807 // We support custom legalizing of sext and anyext loads for specific
808 // memory vector types which we can load as a scalar (or sequence of
809 // scalars) and extend in-register to a legal 128-bit vector type. For sext
810 // loads these must work with a single scalar load.
811 for (MVT VT : MVT::integer_vector_valuetypes()) {
812 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
813 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
814 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
815 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
816 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
817 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
818 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
819 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
820 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
823 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
824 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
825 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
826 setOperationAction(ISD::VSELECT, VT, Custom);
828 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
831 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
832 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
835 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
836 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
837 setOperationPromotedToType(ISD::AND, VT, MVT::v2i64);
838 setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
839 setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
840 setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
841 setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
844 // Custom lower v2i64 and v2f64 selects.
845 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
846 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
848 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
849 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
851 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
852 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
854 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
855 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
856 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
858 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
859 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
861 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
862 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
864 for (MVT VT : MVT::fp_vector_valuetypes())
865 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
867 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
868 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
869 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
871 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
872 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
873 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
875 // In the customized shift lowering, the legal v4i32/v2i64 cases
876 // in AVX2 will be recognized.
877 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
878 setOperationAction(ISD::SRL, VT, Custom);
879 setOperationAction(ISD::SHL, VT, Custom);
880 setOperationAction(ISD::SRA, VT, Custom);
884 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
885 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
886 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
887 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
888 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
889 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
890 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
891 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
892 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
895 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
896 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
897 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
898 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
899 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
900 setOperationAction(ISD::FRINT, RoundedTy, Legal);
901 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
904 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
905 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
906 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
907 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
908 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
909 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
910 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
911 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
913 // FIXME: Do we need to handle scalar-to-vector here?
914 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
916 // We directly match byte blends in the backend as they match the VSELECT
918 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
920 // SSE41 brings specific instructions for doing vector sign extend even in
921 // cases where we don't have SRA.
922 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
923 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
924 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
927 for (MVT VT : MVT::integer_vector_valuetypes()) {
928 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
929 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
930 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
933 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
934 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
935 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
936 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
937 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
938 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
939 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
940 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
943 // i8 vectors are custom because the source register and source
944 // source memory operand types are not the same width.
945 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
948 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
949 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
950 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
951 setOperationAction(ISD::ROTL, VT, Custom);
953 // XOP can efficiently perform BITREVERSE with VPPERM.
954 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
955 setOperationAction(ISD::BITREVERSE, VT, Custom);
957 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
958 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
959 setOperationAction(ISD::BITREVERSE, VT, Custom);
962 if (!Subtarget.useSoftFloat() && Subtarget.hasFp256()) {
963 bool HasInt256 = Subtarget.hasInt256();
965 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
966 : &X86::VR256RegClass);
967 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
968 : &X86::VR256RegClass);
969 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
970 : &X86::VR256RegClass);
971 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
972 : &X86::VR256RegClass);
973 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
974 : &X86::VR256RegClass);
975 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
976 : &X86::VR256RegClass);
978 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
979 setOperationAction(ISD::FFLOOR, VT, Legal);
980 setOperationAction(ISD::FCEIL, VT, Legal);
981 setOperationAction(ISD::FTRUNC, VT, Legal);
982 setOperationAction(ISD::FRINT, VT, Legal);
983 setOperationAction(ISD::FNEARBYINT, VT, Legal);
984 setOperationAction(ISD::FNEG, VT, Custom);
985 setOperationAction(ISD::FABS, VT, Custom);
986 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
989 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
990 // even though v8i16 is a legal type.
991 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
992 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
993 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
995 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
996 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
997 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
999 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1000 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1002 for (MVT VT : MVT::fp_vector_valuetypes())
1003 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1005 // In the customized shift lowering, the legal v8i32/v4i64 cases
1006 // in AVX2 will be recognized.
1007 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1008 setOperationAction(ISD::SRL, VT, Custom);
1009 setOperationAction(ISD::SHL, VT, Custom);
1010 setOperationAction(ISD::SRA, VT, Custom);
1013 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1014 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1015 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1017 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1018 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1019 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1020 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1023 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1024 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1025 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1026 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1028 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1029 setOperationAction(ISD::SETCC, VT, Custom);
1030 setOperationAction(ISD::CTPOP, VT, Custom);
1031 setOperationAction(ISD::CTTZ, VT, Custom);
1032 setOperationAction(ISD::CTLZ, VT, Custom);
1035 if (Subtarget.hasAnyFMA()) {
1036 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1037 MVT::v2f64, MVT::v4f64 })
1038 setOperationAction(ISD::FMA, VT, Legal);
1041 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1042 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1043 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1046 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1047 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1048 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1049 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1051 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1052 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1054 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1055 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1056 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1057 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1059 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1060 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1061 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1062 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1063 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1064 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1068 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
1069 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i32, Custom);
1070 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i16, Custom);
1072 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1073 // when we have a 256bit-wide blend with immediate.
1074 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1076 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1077 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1078 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1079 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1080 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1081 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1082 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1083 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1087 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1088 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1089 setOperationAction(ISD::MLOAD, VT, Legal);
1090 setOperationAction(ISD::MSTORE, VT, Legal);
1093 // Extract subvector is special because the value type
1094 // (result) is 128-bit but the source is 256-bit wide.
1095 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1096 MVT::v4f32, MVT::v2f64 }) {
1097 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1100 // Custom lower several nodes for 256-bit types.
1101 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1102 MVT::v8f32, MVT::v4f64 }) {
1103 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1104 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1105 setOperationAction(ISD::VSELECT, VT, Custom);
1106 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1107 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1108 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1109 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1110 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1114 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1116 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1117 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1118 setOperationPromotedToType(ISD::AND, VT, MVT::v4i64);
1119 setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
1120 setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
1121 setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
1122 setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
1126 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1127 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1128 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1129 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1130 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1132 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1133 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1134 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1136 for (MVT VT : MVT::fp_vector_valuetypes())
1137 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1139 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1140 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1141 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1142 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1143 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1144 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1145 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1148 for (MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i32, MVT::v4i64, MVT::v8i16,
1149 MVT::v16i8, MVT::v16i16, MVT::v32i8, MVT::v16i32,
1150 MVT::v8i64, MVT::v32i16, MVT::v64i8}) {
1151 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
1152 setLoadExtAction(ISD::SEXTLOAD, VT, MaskVT, Custom);
1153 setLoadExtAction(ISD::ZEXTLOAD, VT, MaskVT, Custom);
1154 setLoadExtAction(ISD::EXTLOAD, VT, MaskVT, Custom);
1155 setTruncStoreAction(VT, MaskVT, Custom);
1158 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1159 setOperationAction(ISD::FNEG, VT, Custom);
1160 setOperationAction(ISD::FABS, VT, Custom);
1161 setOperationAction(ISD::FMA, VT, Legal);
1162 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1165 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1166 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1167 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1168 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1169 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1170 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1171 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1172 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1173 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1174 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1175 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1176 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1177 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1178 setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
1179 setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Custom);
1180 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1181 setOperationAction(ISD::UINT_TO_FP, MVT::v16i1, Custom);
1182 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1183 setOperationAction(ISD::UINT_TO_FP, MVT::v8i1, Custom);
1184 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1185 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1186 setOperationAction(ISD::SINT_TO_FP, MVT::v2i1, Custom);
1187 setOperationAction(ISD::UINT_TO_FP, MVT::v2i1, Custom);
1188 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1191 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1192 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1193 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1194 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1195 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1196 if (Subtarget.hasVLX()){
1197 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1198 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1199 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1200 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1201 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1203 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1204 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1205 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1206 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1207 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1209 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1210 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1211 setOperationAction(ISD::MLOAD, VT, Custom);
1212 setOperationAction(ISD::MSTORE, VT, Custom);
1215 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1216 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1218 if (Subtarget.hasDQI()) {
1219 for (auto VT : { MVT::v2i64, MVT::v4i64, MVT::v8i64 }) {
1220 setOperationAction(ISD::SINT_TO_FP, VT, Legal);
1221 setOperationAction(ISD::UINT_TO_FP, VT, Legal);
1222 setOperationAction(ISD::FP_TO_SINT, VT, Legal);
1223 setOperationAction(ISD::FP_TO_UINT, VT, Legal);
1225 if (Subtarget.hasVLX()) {
1226 // Fast v2f32 SINT_TO_FP( v2i32 ) custom conversion.
1227 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1228 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1229 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1232 if (Subtarget.hasVLX()) {
1233 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1234 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1235 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1236 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1237 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1238 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1239 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1240 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Custom);
1241 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Custom);
1242 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Custom);
1243 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Custom);
1245 // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
1246 setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1247 setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1248 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1249 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1250 setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1251 setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1252 setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1253 setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1254 setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1255 setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1258 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1260 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1262 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1263 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1264 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1265 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1266 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1267 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1269 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1270 setOperationAction(ISD::FFLOOR, VT, Legal);
1271 setOperationAction(ISD::FCEIL, VT, Legal);
1272 setOperationAction(ISD::FTRUNC, VT, Legal);
1273 setOperationAction(ISD::FRINT, VT, Legal);
1274 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1277 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i64, Custom);
1278 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i32, Custom);
1280 // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1281 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v64i8, Custom);
1282 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v64i8, Custom);
1284 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1285 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1286 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1287 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1288 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
1290 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1292 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1293 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
1294 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1295 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1296 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1297 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1299 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1301 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1302 setOperationAction(ISD::ABS, MVT::v4i64, Legal);
1303 setOperationAction(ISD::ABS, MVT::v2i64, Legal);
1305 for (auto VT : { MVT::v8i1, MVT::v16i1 }) {
1306 setOperationAction(ISD::ADD, VT, Custom);
1307 setOperationAction(ISD::SUB, VT, Custom);
1308 setOperationAction(ISD::MUL, VT, Custom);
1309 setOperationAction(ISD::SETCC, VT, Custom);
1310 setOperationAction(ISD::SELECT, VT, Custom);
1311 setOperationAction(ISD::TRUNCATE, VT, Custom);
1313 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1314 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1315 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1316 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1317 setOperationAction(ISD::VSELECT, VT, Expand);
1320 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1321 setOperationAction(ISD::SMAX, VT, Legal);
1322 setOperationAction(ISD::UMAX, VT, Legal);
1323 setOperationAction(ISD::SMIN, VT, Legal);
1324 setOperationAction(ISD::UMIN, VT, Legal);
1325 setOperationAction(ISD::ABS, VT, Legal);
1326 setOperationAction(ISD::SRL, VT, Custom);
1327 setOperationAction(ISD::SHL, VT, Custom);
1328 setOperationAction(ISD::SRA, VT, Custom);
1329 setOperationAction(ISD::CTPOP, VT, Custom);
1330 setOperationAction(ISD::CTTZ, VT, Custom);
1333 // Need to promote to 64-bit even though we have 32-bit masked instructions
1334 // because the IR optimizers rearrange bitcasts around logic ops leaving
1335 // too many variations to handle if we don't promote them.
1336 setOperationPromotedToType(ISD::AND, MVT::v16i32, MVT::v8i64);
1337 setOperationPromotedToType(ISD::OR, MVT::v16i32, MVT::v8i64);
1338 setOperationPromotedToType(ISD::XOR, MVT::v16i32, MVT::v8i64);
1340 if (Subtarget.hasCDI()) {
1341 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1342 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v16i32, MVT::v2i64,
1343 MVT::v4i64, MVT::v8i64}) {
1344 setOperationAction(ISD::CTLZ, VT, Legal);
1345 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
1347 } // Subtarget.hasCDI()
1349 if (Subtarget.hasDQI()) {
1350 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1351 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
1352 setOperationAction(ISD::MUL, MVT::v4i64, Legal);
1353 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1356 if (Subtarget.hasVPOPCNTDQ()) {
1357 // VPOPCNTDQ sub-targets extend 128/256 vectors to use the avx512
1358 // version of popcntd/q.
1359 for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v8i32, MVT::v4i64,
1360 MVT::v4i32, MVT::v2i64})
1361 setOperationAction(ISD::CTPOP, VT, Legal);
1364 // Custom lower several nodes.
1365 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1366 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1367 setOperationAction(ISD::MGATHER, VT, Custom);
1368 setOperationAction(ISD::MSCATTER, VT, Custom);
1370 // Extract subvector is special because the value type
1371 // (result) is 256-bit but the source is 512-bit wide.
1372 // 128-bit was made Custom under AVX1.
1373 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1374 MVT::v8f32, MVT::v4f64 })
1375 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1376 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1,
1377 MVT::v16i1, MVT::v32i1, MVT::v64i1 })
1378 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1380 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1381 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1382 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1383 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1384 setOperationAction(ISD::VSELECT, VT, Custom);
1385 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1386 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1387 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1388 setOperationAction(ISD::MLOAD, VT, Legal);
1389 setOperationAction(ISD::MSTORE, VT, Legal);
1390 setOperationAction(ISD::MGATHER, VT, Legal);
1391 setOperationAction(ISD::MSCATTER, VT, Custom);
1393 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
1394 setOperationPromotedToType(ISD::LOAD, VT, MVT::v8i64);
1395 setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
1399 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1400 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1404 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1406 setOperationAction(ISD::ADD, MVT::v32i1, Custom);
1407 setOperationAction(ISD::ADD, MVT::v64i1, Custom);
1408 setOperationAction(ISD::SUB, MVT::v32i1, Custom);
1409 setOperationAction(ISD::SUB, MVT::v64i1, Custom);
1410 setOperationAction(ISD::MUL, MVT::v32i1, Custom);
1411 setOperationAction(ISD::MUL, MVT::v64i1, Custom);
1413 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1414 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1415 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1416 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1417 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1418 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1419 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1420 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1421 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1422 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1423 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1424 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1425 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
1426 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
1427 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1428 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1429 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i1, Custom);
1430 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i1, Custom);
1431 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1432 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1433 setOperationAction(ISD::SELECT, MVT::v32i1, Custom);
1434 setOperationAction(ISD::SELECT, MVT::v64i1, Custom);
1435 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1436 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1437 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1438 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1439 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1440 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1441 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1442 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1443 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1444 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i1, Custom);
1445 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i1, Custom);
1446 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1447 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1448 setOperationAction(ISD::TRUNCATE, MVT::v32i1, Custom);
1449 setOperationAction(ISD::TRUNCATE, MVT::v64i1, Custom);
1450 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1451 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i1, Custom);
1452 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i1, Custom);
1453 setOperationAction(ISD::BUILD_VECTOR, MVT::v32i1, Custom);
1454 setOperationAction(ISD::BUILD_VECTOR, MVT::v64i1, Custom);
1455 setOperationAction(ISD::VSELECT, MVT::v32i1, Expand);
1456 setOperationAction(ISD::VSELECT, MVT::v64i1, Expand);
1457 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1459 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1461 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1462 if (Subtarget.hasVLX()) {
1463 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1464 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1467 LegalizeAction Action = Subtarget.hasVLX() ? Legal : Custom;
1468 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1469 setOperationAction(ISD::MLOAD, VT, Action);
1470 setOperationAction(ISD::MSTORE, VT, Action);
1473 if (Subtarget.hasCDI()) {
1474 setOperationAction(ISD::CTLZ, MVT::v32i16, Custom);
1475 setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
1478 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1479 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1480 setOperationAction(ISD::VSELECT, VT, Custom);
1481 setOperationAction(ISD::ABS, VT, Legal);
1482 setOperationAction(ISD::SRL, VT, Custom);
1483 setOperationAction(ISD::SHL, VT, Custom);
1484 setOperationAction(ISD::SRA, VT, Custom);
1485 setOperationAction(ISD::MLOAD, VT, Legal);
1486 setOperationAction(ISD::MSTORE, VT, Legal);
1487 setOperationAction(ISD::CTPOP, VT, Custom);
1488 setOperationAction(ISD::CTTZ, VT, Custom);
1489 setOperationAction(ISD::SMAX, VT, Legal);
1490 setOperationAction(ISD::UMAX, VT, Legal);
1491 setOperationAction(ISD::SMIN, VT, Legal);
1492 setOperationAction(ISD::UMIN, VT, Legal);
1494 setOperationPromotedToType(ISD::AND, VT, MVT::v8i64);
1495 setOperationPromotedToType(ISD::OR, VT, MVT::v8i64);
1496 setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64);
1499 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1500 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1501 if (Subtarget.hasVLX()) {
1502 // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
1503 setLoadExtAction(ExtType, MVT::v16i16, MVT::v16i8, Legal);
1504 setLoadExtAction(ExtType, MVT::v8i16, MVT::v8i8, Legal);
1509 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1510 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1511 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1513 for (auto VT : { MVT::v2i1, MVT::v4i1 }) {
1514 setOperationAction(ISD::ADD, VT, Custom);
1515 setOperationAction(ISD::SUB, VT, Custom);
1516 setOperationAction(ISD::MUL, VT, Custom);
1517 setOperationAction(ISD::VSELECT, VT, Expand);
1519 setOperationAction(ISD::TRUNCATE, VT, Custom);
1520 setOperationAction(ISD::SETCC, VT, Custom);
1521 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1522 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1523 setOperationAction(ISD::SELECT, VT, Custom);
1524 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1525 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1528 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1529 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
1530 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom);
1531 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom);
1533 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1534 setOperationAction(ISD::SMAX, VT, Legal);
1535 setOperationAction(ISD::UMAX, VT, Legal);
1536 setOperationAction(ISD::SMIN, VT, Legal);
1537 setOperationAction(ISD::UMIN, VT, Legal);
1541 // We want to custom lower some of our intrinsics.
1542 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1543 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1544 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1545 if (!Subtarget.is64Bit()) {
1546 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1547 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1550 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1551 // handle type legalization for these operations here.
1553 // FIXME: We really should do custom legalization for addition and
1554 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1555 // than generic legalization for 64-bit multiplication-with-overflow, though.
1556 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1557 if (VT == MVT::i64 && !Subtarget.is64Bit())
1559 // Add/Sub/Mul with overflow operations are custom lowered.
1560 setOperationAction(ISD::SADDO, VT, Custom);
1561 setOperationAction(ISD::UADDO, VT, Custom);
1562 setOperationAction(ISD::SSUBO, VT, Custom);
1563 setOperationAction(ISD::USUBO, VT, Custom);
1564 setOperationAction(ISD::SMULO, VT, Custom);
1565 setOperationAction(ISD::UMULO, VT, Custom);
1567 // Support carry in as value rather than glue.
1568 setOperationAction(ISD::ADDCARRY, VT, Custom);
1569 setOperationAction(ISD::SUBCARRY, VT, Custom);
1570 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1573 if (!Subtarget.is64Bit()) {
1574 // These libcalls are not available in 32-bit.
1575 setLibcallName(RTLIB::SHL_I128, nullptr);
1576 setLibcallName(RTLIB::SRL_I128, nullptr);
1577 setLibcallName(RTLIB::SRA_I128, nullptr);
1580 // Combine sin / cos into one node or libcall if possible.
1581 if (Subtarget.hasSinCos()) {
1582 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1583 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1584 if (Subtarget.isTargetDarwin()) {
1585 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1586 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1587 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1588 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1592 if (Subtarget.isTargetWin64()) {
1593 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1594 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1595 setOperationAction(ISD::SREM, MVT::i128, Custom);
1596 setOperationAction(ISD::UREM, MVT::i128, Custom);
1597 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1598 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1601 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1602 // is. We should promote the value to 64-bits to solve this.
1603 // This is what the CRT headers do - `fmodf` is an inline header
1604 // function casting to f64 and calling `fmod`.
1605 if (Subtarget.is32Bit() && (Subtarget.isTargetKnownWindowsMSVC() ||
1606 Subtarget.isTargetWindowsItanium()))
1607 for (ISD::NodeType Op :
1608 {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
1609 ISD::FLOG10, ISD::FPOW, ISD::FSIN})
1610 if (isOperationExpand(Op, MVT::f32))
1611 setOperationAction(Op, MVT::f32, Promote);
1613 // We have target-specific dag combine patterns for the following nodes:
1614 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1615 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1616 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1617 setTargetDAGCombine(ISD::BITCAST);
1618 setTargetDAGCombine(ISD::VSELECT);
1619 setTargetDAGCombine(ISD::SELECT);
1620 setTargetDAGCombine(ISD::SHL);
1621 setTargetDAGCombine(ISD::SRA);
1622 setTargetDAGCombine(ISD::SRL);
1623 setTargetDAGCombine(ISD::OR);
1624 setTargetDAGCombine(ISD::AND);
1625 setTargetDAGCombine(ISD::ADD);
1626 setTargetDAGCombine(ISD::FADD);
1627 setTargetDAGCombine(ISD::FSUB);
1628 setTargetDAGCombine(ISD::FNEG);
1629 setTargetDAGCombine(ISD::FMA);
1630 setTargetDAGCombine(ISD::FMINNUM);
1631 setTargetDAGCombine(ISD::FMAXNUM);
1632 setTargetDAGCombine(ISD::SUB);
1633 setTargetDAGCombine(ISD::LOAD);
1634 setTargetDAGCombine(ISD::MLOAD);
1635 setTargetDAGCombine(ISD::STORE);
1636 setTargetDAGCombine(ISD::MSTORE);
1637 setTargetDAGCombine(ISD::TRUNCATE);
1638 setTargetDAGCombine(ISD::ZERO_EXTEND);
1639 setTargetDAGCombine(ISD::ANY_EXTEND);
1640 setTargetDAGCombine(ISD::SIGN_EXTEND);
1641 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1642 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1643 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1644 setTargetDAGCombine(ISD::SINT_TO_FP);
1645 setTargetDAGCombine(ISD::UINT_TO_FP);
1646 setTargetDAGCombine(ISD::SETCC);
1647 setTargetDAGCombine(ISD::MUL);
1648 setTargetDAGCombine(ISD::XOR);
1649 setTargetDAGCombine(ISD::MSCATTER);
1650 setTargetDAGCombine(ISD::MGATHER);
1652 computeRegisterProperties(Subtarget.getRegisterInfo());
1654 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1655 MaxStoresPerMemsetOptSize = 8;
1656 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1657 MaxStoresPerMemcpyOptSize = 4;
1658 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1659 MaxStoresPerMemmoveOptSize = 4;
1660 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
1661 setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
1663 // An out-of-order CPU can speculatively execute past a predictable branch,
1664 // but a conditional move could be stalled by an expensive earlier operation.
1665 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
1666 EnableExtLdPromotion = true;
1667 setPrefFunctionAlignment(4); // 2^4 bytes.
1669 verifyIntrinsicTables();
1672 // This has so far only been implemented for 64-bit MachO.
1673 bool X86TargetLowering::useLoadStackGuardNode() const {
1674 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
1677 TargetLoweringBase::LegalizeTypeAction
1678 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1679 if (ExperimentalVectorWideningLegalization &&
1680 VT.getVectorNumElements() != 1 &&
1681 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1682 return TypeWidenVector;
1684 return TargetLoweringBase::getPreferredVectorAction(VT);
1687 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
1688 LLVMContext& Context,
1693 if (VT.isSimple()) {
1694 MVT VVT = VT.getSimpleVT();
1695 const unsigned NumElts = VVT.getVectorNumElements();
1696 MVT EltVT = VVT.getVectorElementType();
1697 if (VVT.is512BitVector()) {
1698 if (Subtarget.hasAVX512())
1699 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1700 EltVT == MVT::f32 || EltVT == MVT::f64)
1702 case 8: return MVT::v8i1;
1703 case 16: return MVT::v16i1;
1705 if (Subtarget.hasBWI())
1706 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1708 case 32: return MVT::v32i1;
1709 case 64: return MVT::v64i1;
1713 if (Subtarget.hasBWI() && Subtarget.hasVLX())
1714 return MVT::getVectorVT(MVT::i1, NumElts);
1716 if (!isTypeLegal(VT) && getTypeAction(Context, VT) == TypePromoteInteger) {
1717 EVT LegalVT = getTypeToTransformTo(Context, VT);
1718 EltVT = LegalVT.getVectorElementType().getSimpleVT();
1721 if (Subtarget.hasVLX() && EltVT.getSizeInBits() >= 32)
1723 case 2: return MVT::v2i1;
1724 case 4: return MVT::v4i1;
1725 case 8: return MVT::v8i1;
1729 return VT.changeVectorElementTypeToInteger();
1732 /// Helper for getByValTypeAlignment to determine
1733 /// the desired ByVal argument alignment.
1734 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1737 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1738 if (VTy->getBitWidth() == 128)
1740 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1741 unsigned EltAlign = 0;
1742 getMaxByValAlign(ATy->getElementType(), EltAlign);
1743 if (EltAlign > MaxAlign)
1744 MaxAlign = EltAlign;
1745 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1746 for (auto *EltTy : STy->elements()) {
1747 unsigned EltAlign = 0;
1748 getMaxByValAlign(EltTy, EltAlign);
1749 if (EltAlign > MaxAlign)
1750 MaxAlign = EltAlign;
1757 /// Return the desired alignment for ByVal aggregate
1758 /// function arguments in the caller parameter area. For X86, aggregates
1759 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1760 /// are at 4-byte boundaries.
1761 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
1762 const DataLayout &DL) const {
1763 if (Subtarget.is64Bit()) {
1764 // Max of 8 and alignment of type.
1765 unsigned TyAlign = DL.getABITypeAlignment(Ty);
1772 if (Subtarget.hasSSE1())
1773 getMaxByValAlign(Ty, Align);
1777 /// Returns the target specific optimal type for load
1778 /// and store operations as a result of memset, memcpy, and memmove
1779 /// lowering. If DstAlign is zero that means it's safe to destination
1780 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1781 /// means there isn't a need to check it against alignment requirement,
1782 /// probably because the source does not need to be loaded. If 'IsMemset' is
1783 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1784 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1785 /// source is constant so it does not need to be loaded.
1786 /// It returns EVT::Other if the type should be determined using generic
1787 /// target-independent logic.
1789 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1790 unsigned DstAlign, unsigned SrcAlign,
1791 bool IsMemset, bool ZeroMemset,
1793 MachineFunction &MF) const {
1794 const Function *F = MF.getFunction();
1795 if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1797 (!Subtarget.isUnalignedMem16Slow() ||
1798 ((DstAlign == 0 || DstAlign >= 16) &&
1799 (SrcAlign == 0 || SrcAlign >= 16)))) {
1800 // FIXME: Check if unaligned 32-byte accesses are slow.
1801 if (Size >= 32 && Subtarget.hasAVX()) {
1802 // Although this isn't a well-supported type for AVX1, we'll let
1803 // legalization and shuffle lowering produce the optimal codegen. If we
1804 // choose an optimal type with a vector element larger than a byte,
1805 // getMemsetStores() may create an intermediate splat (using an integer
1806 // multiply) before we splat as a vector.
1809 if (Subtarget.hasSSE2())
1811 // TODO: Can SSE1 handle a byte vector?
1812 if (Subtarget.hasSSE1())
1814 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
1815 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
1816 // Do not use f64 to lower memcpy if source is string constant. It's
1817 // better to use i32 to avoid the loads.
1818 // Also, do not use f64 to lower memset unless this is a memset of zeros.
1819 // The gymnastics of splatting a byte value into an XMM register and then
1820 // only using 8-byte stores (because this is a CPU with slow unaligned
1821 // 16-byte accesses) makes that a loser.
1825 // This is a compromise. If we reach here, unaligned accesses may be slow on
1826 // this target. However, creating smaller, aligned accesses could be even
1827 // slower and would certainly be a lot more code.
1828 if (Subtarget.is64Bit() && Size >= 8)
1833 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1835 return X86ScalarSSEf32;
1836 else if (VT == MVT::f64)
1837 return X86ScalarSSEf64;
1842 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1847 switch (VT.getSizeInBits()) {
1849 // 8-byte and under are always assumed to be fast.
1853 *Fast = !Subtarget.isUnalignedMem16Slow();
1856 *Fast = !Subtarget.isUnalignedMem32Slow();
1858 // TODO: What about AVX-512 (512-bit) accesses?
1861 // Misaligned accesses of any size are always allowed.
1865 /// Return the entry encoding for a jump table in the
1866 /// current function. The returned value is a member of the
1867 /// MachineJumpTableInfo::JTEntryKind enum.
1868 unsigned X86TargetLowering::getJumpTableEncoding() const {
1869 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1871 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
1872 return MachineJumpTableInfo::EK_Custom32;
1874 // Otherwise, use the normal jump table encoding heuristics.
1875 return TargetLowering::getJumpTableEncoding();
1878 bool X86TargetLowering::useSoftFloat() const {
1879 return Subtarget.useSoftFloat();
1882 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
1883 ArgListTy &Args) const {
1885 // Only relabel X86-32 for C / Stdcall CCs.
1886 if (Subtarget.is64Bit())
1888 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
1890 unsigned ParamRegs = 0;
1891 if (auto *M = MF->getFunction()->getParent())
1892 ParamRegs = M->getNumberRegisterParameters();
1894 // Mark the first N int arguments as having reg
1895 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
1896 Type *T = Args[Idx].Ty;
1897 if (T->isPointerTy() || T->isIntegerTy())
1898 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
1899 unsigned numRegs = 1;
1900 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
1902 if (ParamRegs < numRegs)
1904 ParamRegs -= numRegs;
1905 Args[Idx].IsInReg = true;
1911 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1912 const MachineBasicBlock *MBB,
1913 unsigned uid,MCContext &Ctx) const{
1914 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
1915 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1917 return MCSymbolRefExpr::create(MBB->getSymbol(),
1918 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1921 /// Returns relocation base for the given PIC jumptable.
1922 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1923 SelectionDAG &DAG) const {
1924 if (!Subtarget.is64Bit())
1925 // This doesn't have SDLoc associated with it, but is not really the
1926 // same as a Register.
1927 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
1928 getPointerTy(DAG.getDataLayout()));
1932 /// This returns the relocation base for the given PIC jumptable,
1933 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1934 const MCExpr *X86TargetLowering::
1935 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1936 MCContext &Ctx) const {
1937 // X86-64 uses RIP relative addressing based on the jump table label.
1938 if (Subtarget.isPICStyleRIPRel())
1939 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1941 // Otherwise, the reference is relative to the PIC base.
1942 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
1945 std::pair<const TargetRegisterClass *, uint8_t>
1946 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1948 const TargetRegisterClass *RRC = nullptr;
1950 switch (VT.SimpleTy) {
1952 return TargetLowering::findRepresentativeClass(TRI, VT);
1953 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1954 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1957 RRC = &X86::VR64RegClass;
1959 case MVT::f32: case MVT::f64:
1960 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1961 case MVT::v4f32: case MVT::v2f64:
1962 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
1963 case MVT::v8f32: case MVT::v4f64:
1964 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
1965 case MVT::v16f32: case MVT::v8f64:
1966 RRC = &X86::VR128XRegClass;
1969 return std::make_pair(RRC, Cost);
1972 unsigned X86TargetLowering::getAddressSpace() const {
1973 if (Subtarget.is64Bit())
1974 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
1978 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
1979 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
1980 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
1983 static Constant* SegmentOffset(IRBuilder<> &IRB,
1984 unsigned Offset, unsigned AddressSpace) {
1985 return ConstantExpr::getIntToPtr(
1986 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
1987 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
1990 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
1991 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
1992 // tcbhead_t; use it instead of the usual global variable (see
1993 // sysdeps/{i386,x86_64}/nptl/tls.h)
1994 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
1995 if (Subtarget.isTargetFuchsia()) {
1996 // <magenta/tls.h> defines MX_TLS_STACK_GUARD_OFFSET with this value.
1997 return SegmentOffset(IRB, 0x10, getAddressSpace());
1999 // %fs:0x28, unless we're using a Kernel code model, in which case
2000 // it's %gs:0x28. gs:0x14 on i386.
2001 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2002 return SegmentOffset(IRB, Offset, getAddressSpace());
2006 return TargetLowering::getIRStackGuard(IRB);
2009 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2010 // MSVC CRT provides functionalities for stack protection.
2011 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
2012 // MSVC CRT has a global variable holding security cookie.
2013 M.getOrInsertGlobal("__security_cookie",
2014 Type::getInt8PtrTy(M.getContext()));
2016 // MSVC CRT has a function to validate security cookie.
2017 auto *SecurityCheckCookie = cast<Function>(
2018 M.getOrInsertFunction("__security_check_cookie",
2019 Type::getVoidTy(M.getContext()),
2020 Type::getInt8PtrTy(M.getContext())));
2021 SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall);
2022 SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
2025 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2026 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2028 TargetLowering::insertSSPDeclarations(M);
2031 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2032 // MSVC CRT has a global variable holding security cookie.
2033 if (Subtarget.getTargetTriple().isOSMSVCRT())
2034 return M.getGlobalVariable("__security_cookie");
2035 return TargetLowering::getSDagStackGuard(M);
2038 Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2039 // MSVC CRT has a function to validate security cookie.
2040 if (Subtarget.getTargetTriple().isOSMSVCRT())
2041 return M.getFunction("__security_check_cookie");
2042 return TargetLowering::getSSPStackGuardCheck(M);
2045 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2046 if (Subtarget.getTargetTriple().isOSContiki())
2047 return getDefaultSafeStackPointerLocation(IRB, false);
2049 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2050 // definition of TLS_SLOT_SAFESTACK in
2051 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2052 if (Subtarget.isTargetAndroid()) {
2053 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2055 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2056 return SegmentOffset(IRB, Offset, getAddressSpace());
2059 // Fuchsia is similar.
2060 if (Subtarget.isTargetFuchsia()) {
2061 // <magenta/tls.h> defines MX_TLS_UNSAFE_SP_OFFSET with this value.
2062 return SegmentOffset(IRB, 0x18, getAddressSpace());
2065 return TargetLowering::getSafeStackPointerLocation(IRB);
2068 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2069 unsigned DestAS) const {
2070 assert(SrcAS != DestAS && "Expected different address spaces!");
2072 return SrcAS < 256 && DestAS < 256;
2075 //===----------------------------------------------------------------------===//
2076 // Return Value Calling Convention Implementation
2077 //===----------------------------------------------------------------------===//
2079 #include "X86GenCallingConv.inc"
2081 bool X86TargetLowering::CanLowerReturn(
2082 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2083 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2084 SmallVector<CCValAssign, 16> RVLocs;
2085 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2086 return CCInfo.CheckReturn(Outs, RetCC_X86);
2089 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2090 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2094 /// Lowers masks values (v*i1) to the local register values
2095 /// \returns DAG node after lowering to register type
2096 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2097 const SDLoc &Dl, SelectionDAG &DAG) {
2098 EVT ValVT = ValArg.getValueType();
2100 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2101 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2102 // Two stage lowering might be required
2103 // bitcast: v8i1 -> i8 / v16i1 -> i16
2104 // anyextend: i8 -> i32 / i16 -> i32
2105 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2106 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2107 if (ValLoc == MVT::i32)
2108 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2110 } else if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2111 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2112 // One stage lowering is required
2113 // bitcast: v32i1 -> i32 / v64i1 -> i64
2114 return DAG.getBitcast(ValLoc, ValArg);
2116 return DAG.getNode(ISD::SIGN_EXTEND, Dl, ValLoc, ValArg);
2119 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2120 static void Passv64i1ArgInRegs(
2121 const SDLoc &Dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg,
2122 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, CCValAssign &VA,
2123 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2124 assert((Subtarget.hasBWI() || Subtarget.hasBMI()) &&
2125 "Expected AVX512BW or AVX512BMI target!");
2126 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2127 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2128 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2129 "The value should reside in two registers");
2131 // Before splitting the value we cast it to i64
2132 Arg = DAG.getBitcast(MVT::i64, Arg);
2134 // Splitting the value into two i32 types
2136 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2137 DAG.getConstant(0, Dl, MVT::i32));
2138 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2139 DAG.getConstant(1, Dl, MVT::i32));
2141 // Attach the two i32 types into corresponding registers
2142 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2143 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2147 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2149 const SmallVectorImpl<ISD::OutputArg> &Outs,
2150 const SmallVectorImpl<SDValue> &OutVals,
2151 const SDLoc &dl, SelectionDAG &DAG) const {
2152 MachineFunction &MF = DAG.getMachineFunction();
2153 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2155 // In some cases we need to disable registers from the default CSR list.
2156 // For example, when they are used for argument passing.
2157 bool ShouldDisableCalleeSavedRegister =
2158 CallConv == CallingConv::X86_RegCall ||
2159 MF.getFunction()->hasFnAttribute("no_caller_saved_registers");
2161 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2162 report_fatal_error("X86 interrupts may not return any value");
2164 SmallVector<CCValAssign, 16> RVLocs;
2165 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2166 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2169 SmallVector<SDValue, 6> RetOps;
2170 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2171 // Operand #1 = Bytes To Pop
2172 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2175 // Copy the result values into the output registers.
2176 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2178 CCValAssign &VA = RVLocs[I];
2179 assert(VA.isRegLoc() && "Can only return in registers!");
2181 // Add the register to the CalleeSaveDisableRegs list.
2182 if (ShouldDisableCalleeSavedRegister)
2183 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2185 SDValue ValToCopy = OutVals[OutsIndex];
2186 EVT ValVT = ValToCopy.getValueType();
2188 // Promote values to the appropriate types.
2189 if (VA.getLocInfo() == CCValAssign::SExt)
2190 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2191 else if (VA.getLocInfo() == CCValAssign::ZExt)
2192 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2193 else if (VA.getLocInfo() == CCValAssign::AExt) {
2194 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2195 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2197 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2199 else if (VA.getLocInfo() == CCValAssign::BCvt)
2200 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2202 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2203 "Unexpected FP-extend for return value.");
2205 // If this is x86-64, and we disabled SSE, we can't return FP values,
2206 // or SSE or MMX vectors.
2207 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2208 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2209 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
2210 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2211 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2212 } else if (ValVT == MVT::f64 &&
2213 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
2214 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2215 // llvm-gcc has never done it right and no one has noticed, so this
2216 // should be OK for now.
2217 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2218 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2221 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2222 // the RET instruction and handled by the FP Stackifier.
2223 if (VA.getLocReg() == X86::FP0 ||
2224 VA.getLocReg() == X86::FP1) {
2225 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2226 // change the value to the FP stack register class.
2227 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2228 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2229 RetOps.push_back(ValToCopy);
2230 // Don't emit a copytoreg.
2234 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2235 // which is returned in RAX / RDX.
2236 if (Subtarget.is64Bit()) {
2237 if (ValVT == MVT::x86mmx) {
2238 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2239 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2240 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2242 // If we don't have SSE2 available, convert to v4f32 so the generated
2243 // register is legal.
2244 if (!Subtarget.hasSSE2())
2245 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2250 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2252 if (VA.needsCustom()) {
2253 assert(VA.getValVT() == MVT::v64i1 &&
2254 "Currently the only custom case is when we split v64i1 to 2 regs");
2256 Passv64i1ArgInRegs(dl, DAG, Chain, ValToCopy, RegsToPass, VA, RVLocs[++I],
2259 assert(2 == RegsToPass.size() &&
2260 "Expecting two registers after Pass64BitArgInRegs");
2262 // Add the second register to the CalleeSaveDisableRegs list.
2263 if (ShouldDisableCalleeSavedRegister)
2264 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2266 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2269 // Add nodes to the DAG and add the values into the RetOps list
2270 for (auto &Reg : RegsToPass) {
2271 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2272 Flag = Chain.getValue(1);
2273 RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2277 // Swift calling convention does not require we copy the sret argument
2278 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2280 // All x86 ABIs require that for returning structs by value we copy
2281 // the sret argument into %rax/%eax (depending on ABI) for the return.
2282 // We saved the argument into a virtual register in the entry block,
2283 // so now we copy the value out and into %rax/%eax.
2285 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2286 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2287 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2288 // either case FuncInfo->setSRetReturnReg() will have been called.
2289 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2290 // When we have both sret and another return value, we should use the
2291 // original Chain stored in RetOps[0], instead of the current Chain updated
2292 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2294 // For the case of sret and another return value, we have
2295 // Chain_0 at the function entry
2296 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2297 // If we use Chain_1 in getCopyFromReg, we will have
2298 // Val = getCopyFromReg(Chain_1)
2299 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2301 // getCopyToReg(Chain_0) will be glued together with
2302 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2303 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2304 // Data dependency from Unit B to Unit A due to usage of Val in
2305 // getCopyToReg(Chain_1, Val)
2306 // Chain dependency from Unit A to Unit B
2308 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2309 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2310 getPointerTy(MF.getDataLayout()));
2313 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2314 X86::RAX : X86::EAX;
2315 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2316 Flag = Chain.getValue(1);
2318 // RAX/EAX now acts like a return value.
2320 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2322 // Add the returned register to the CalleeSaveDisableRegs list.
2323 if (ShouldDisableCalleeSavedRegister)
2324 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2327 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2328 const MCPhysReg *I =
2329 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2332 if (X86::GR64RegClass.contains(*I))
2333 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2335 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2339 RetOps[0] = Chain; // Update chain.
2341 // Add the flag if we have it.
2343 RetOps.push_back(Flag);
2345 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2346 if (CallConv == CallingConv::X86_INTR)
2347 opcode = X86ISD::IRET;
2348 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2351 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2352 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2355 SDValue TCChain = Chain;
2356 SDNode *Copy = *N->use_begin();
2357 if (Copy->getOpcode() == ISD::CopyToReg) {
2358 // If the copy has a glue operand, we conservatively assume it isn't safe to
2359 // perform a tail call.
2360 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2362 TCChain = Copy->getOperand(0);
2363 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2366 bool HasRet = false;
2367 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2369 if (UI->getOpcode() != X86ISD::RET_FLAG)
2371 // If we are returning more than one value, we can definitely
2372 // not make a tail call see PR19530
2373 if (UI->getNumOperands() > 4)
2375 if (UI->getNumOperands() == 4 &&
2376 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2388 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2389 ISD::NodeType ExtendKind) const {
2390 MVT ReturnMVT = MVT::i32;
2392 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2393 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2394 // The ABI does not require i1, i8 or i16 to be extended.
2396 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2397 // always extending i8/i16 return values, so keep doing that for now.
2399 ReturnMVT = MVT::i8;
2402 EVT MinVT = getRegisterType(Context, ReturnMVT);
2403 return VT.bitsLT(MinVT) ? MinVT : VT;
2406 /// Reads two 32 bit registers and creates a 64 bit mask value.
2407 /// \param VA The current 32 bit value that need to be assigned.
2408 /// \param NextVA The next 32 bit value that need to be assigned.
2409 /// \param Root The parent DAG node.
2410 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2411 /// glue purposes. In the case the DAG is already using
2412 /// physical register instead of virtual, we should glue
2413 /// our new SDValue to InFlag SDvalue.
2414 /// \return a new SDvalue of size 64bit.
2415 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2416 SDValue &Root, SelectionDAG &DAG,
2417 const SDLoc &Dl, const X86Subtarget &Subtarget,
2418 SDValue *InFlag = nullptr) {
2419 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2420 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2421 assert(VA.getValVT() == MVT::v64i1 &&
2422 "Expecting first location of 64 bit width type");
2423 assert(NextVA.getValVT() == VA.getValVT() &&
2424 "The locations should have the same type");
2425 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2426 "The values should reside in two registers");
2430 SDValue ArgValueLo, ArgValueHi;
2432 MachineFunction &MF = DAG.getMachineFunction();
2433 const TargetRegisterClass *RC = &X86::GR32RegClass;
2435 // Read a 32 bit value from the registers
2436 if (nullptr == InFlag) {
2437 // When no physical register is present,
2438 // create an intermediate virtual register
2439 Reg = MF.addLiveIn(VA.getLocReg(), RC);
2440 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2441 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2442 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2444 // When a physical register is available read the value from it and glue
2445 // the reads together.
2447 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2448 *InFlag = ArgValueLo.getValue(2);
2450 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2451 *InFlag = ArgValueHi.getValue(2);
2454 // Convert the i32 type into v32i1 type
2455 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2457 // Convert the i32 type into v32i1 type
2458 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2460 // Concatenate the two values together
2461 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2464 /// The function will lower a register of various sizes (8/16/32/64)
2465 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2466 /// \returns a DAG node contains the operand after lowering to mask type.
2467 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2468 const EVT &ValLoc, const SDLoc &Dl,
2469 SelectionDAG &DAG) {
2470 SDValue ValReturned = ValArg;
2472 if (ValVT == MVT::v1i1)
2473 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2475 if (ValVT == MVT::v64i1) {
2476 // In 32 bit machine, this case is handled by getv64i1Argument
2477 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2478 // In 64 bit machine, There is no need to truncate the value only bitcast
2481 switch (ValVT.getSimpleVT().SimpleTy) {
2492 llvm_unreachable("Expecting a vector of i1 types");
2495 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2497 return DAG.getBitcast(ValVT, ValReturned);
2500 /// Lower the result values of a call into the
2501 /// appropriate copies out of appropriate physical registers.
2503 SDValue X86TargetLowering::LowerCallResult(
2504 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2505 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2506 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2507 uint32_t *RegMask) const {
2509 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2510 // Assign locations to each value returned by this call.
2511 SmallVector<CCValAssign, 16> RVLocs;
2512 bool Is64Bit = Subtarget.is64Bit();
2513 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2515 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2517 // Copy all of the result registers out of their specified physreg.
2518 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2520 CCValAssign &VA = RVLocs[I];
2521 EVT CopyVT = VA.getLocVT();
2523 // In some calling conventions we need to remove the used registers
2524 // from the register mask.
2526 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2527 SubRegs.isValid(); ++SubRegs)
2528 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
2531 // If this is x86-64, and we disabled SSE, we can't return FP values
2532 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2533 ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
2534 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2535 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2538 // If we prefer to use the value in xmm registers, copy it out as f80 and
2539 // use a truncate to move it from fp stack reg to xmm reg.
2540 bool RoundAfterCopy = false;
2541 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2542 isScalarFPTypeInSSEReg(VA.getValVT())) {
2543 if (!Subtarget.hasX87())
2544 report_fatal_error("X87 register return with X87 disabled");
2546 RoundAfterCopy = (CopyVT != VA.getLocVT());
2550 if (VA.needsCustom()) {
2551 assert(VA.getValVT() == MVT::v64i1 &&
2552 "Currently the only custom case is when we split v64i1 to 2 regs");
2554 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
2556 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
2558 Val = Chain.getValue(0);
2559 InFlag = Chain.getValue(2);
2563 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2564 // This truncation won't change the value.
2565 DAG.getIntPtrConstant(1, dl));
2567 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
2568 if (VA.getValVT().isVector() &&
2569 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
2570 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
2571 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
2572 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
2574 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2577 InVals.push_back(Val);
2583 //===----------------------------------------------------------------------===//
2584 // C & StdCall & Fast Calling Convention implementation
2585 //===----------------------------------------------------------------------===//
2586 // StdCall calling convention seems to be standard for many Windows' API
2587 // routines and around. It differs from C calling convention just a little:
2588 // callee should clean up the stack, not caller. Symbols should be also
2589 // decorated in some fancy way :) It doesn't support any vector arguments.
2590 // For info on fast calling convention see Fast Calling Convention (tail call)
2591 // implementation LowerX86_32FastCCCallTo.
2593 /// CallIsStructReturn - Determines whether a call uses struct return
2595 enum StructReturnType {
2600 static StructReturnType
2601 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsMCU) {
2603 return NotStructReturn;
2605 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2606 if (!Flags.isSRet())
2607 return NotStructReturn;
2608 if (Flags.isInReg() || IsMCU)
2609 return RegStructReturn;
2610 return StackStructReturn;
2613 /// Determines whether a function uses struct return semantics.
2614 static StructReturnType
2615 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsMCU) {
2617 return NotStructReturn;
2619 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2620 if (!Flags.isSRet())
2621 return NotStructReturn;
2622 if (Flags.isInReg() || IsMCU)
2623 return RegStructReturn;
2624 return StackStructReturn;
2627 /// Make a copy of an aggregate at address specified by "Src" to address
2628 /// "Dst" with size and alignment information specified by the specific
2629 /// parameter attribute. The copy will be passed as a byval function parameter.
2630 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
2631 SDValue Chain, ISD::ArgFlagsTy Flags,
2632 SelectionDAG &DAG, const SDLoc &dl) {
2633 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2635 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2636 /*isVolatile*/false, /*AlwaysInline=*/true,
2637 /*isTailCall*/false,
2638 MachinePointerInfo(), MachinePointerInfo());
2641 /// Return true if the calling convention is one that we can guarantee TCO for.
2642 static bool canGuaranteeTCO(CallingConv::ID CC) {
2643 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2644 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
2645 CC == CallingConv::HHVM);
2648 /// Return true if we might ever do TCO for calls with this calling convention.
2649 static bool mayTailCallThisCC(CallingConv::ID CC) {
2651 // C calling conventions:
2652 case CallingConv::C:
2653 case CallingConv::X86_64_Win64:
2654 case CallingConv::X86_64_SysV:
2655 // Callee pop conventions:
2656 case CallingConv::X86_ThisCall:
2657 case CallingConv::X86_StdCall:
2658 case CallingConv::X86_VectorCall:
2659 case CallingConv::X86_FastCall:
2662 return canGuaranteeTCO(CC);
2666 /// Return true if the function is being made into a tailcall target by
2667 /// changing its ABI.
2668 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2669 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
2672 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2674 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2675 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2678 ImmutableCallSite CS(CI);
2679 CallingConv::ID CalleeCC = CS.getCallingConv();
2680 if (!mayTailCallThisCC(CalleeCC))
2687 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
2688 const SmallVectorImpl<ISD::InputArg> &Ins,
2689 const SDLoc &dl, SelectionDAG &DAG,
2690 const CCValAssign &VA,
2691 MachineFrameInfo &MFI, unsigned i) const {
2692 // Create the nodes corresponding to a load from this parameter slot.
2693 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2694 bool AlwaysUseMutable = shouldGuaranteeTCO(
2695 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2696 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2698 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2700 // If value is passed by pointer we have address passed instead of the value
2701 // itself. No need to extend if the mask value and location share the same
2703 bool ExtendedInMem =
2704 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
2705 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
2707 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
2708 ValVT = VA.getLocVT();
2710 ValVT = VA.getValVT();
2712 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
2713 // taken by a return address.
2715 if (CallConv == CallingConv::X86_INTR) {
2716 // X86 interrupts may take one or two arguments.
2717 // On the stack there will be no return address as in regular call.
2718 // Offset of last argument need to be set to -4/-8 bytes.
2719 // Where offset of the first argument out of two, should be set to 0 bytes.
2720 Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1);
2721 if (Subtarget.is64Bit() && Ins.size() == 2) {
2722 // The stack pointer needs to be realigned for 64 bit handlers with error
2723 // code, so the argument offset changes by 8 bytes.
2728 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2729 // changed with more analysis.
2730 // In case of tail call optimization mark all arguments mutable. Since they
2731 // could be overwritten by lowering of arguments in case of a tail call.
2732 if (Flags.isByVal()) {
2733 unsigned Bytes = Flags.getByValSize();
2734 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2735 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2736 // Adjust SP offset of interrupt parameter.
2737 if (CallConv == CallingConv::X86_INTR) {
2738 MFI.setObjectOffset(FI, Offset);
2740 return DAG.getFrameIndex(FI, PtrVT);
2743 // This is an argument in memory. We might be able to perform copy elision.
2744 if (Flags.isCopyElisionCandidate()) {
2745 EVT ArgVT = Ins[i].ArgVT;
2747 if (Ins[i].PartOffset == 0) {
2748 // If this is a one-part value or the first part of a multi-part value,
2749 // create a stack object for the entire argument value type and return a
2750 // load from our portion of it. This assumes that if the first part of an
2751 // argument is in memory, the rest will also be in memory.
2752 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
2753 /*Immutable=*/false);
2754 PartAddr = DAG.getFrameIndex(FI, PtrVT);
2756 ValVT, dl, Chain, PartAddr,
2757 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
2759 // This is not the first piece of an argument in memory. See if there is
2760 // already a fixed stack object including this offset. If so, assume it
2761 // was created by the PartOffset == 0 branch above and create a load from
2762 // the appropriate offset into it.
2763 int64_t PartBegin = VA.getLocMemOffset();
2764 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
2765 int FI = MFI.getObjectIndexBegin();
2766 for (; MFI.isFixedObjectIndex(FI); ++FI) {
2767 int64_t ObjBegin = MFI.getObjectOffset(FI);
2768 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
2769 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
2772 if (MFI.isFixedObjectIndex(FI)) {
2774 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
2775 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
2777 ValVT, dl, Chain, Addr,
2778 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
2779 Ins[i].PartOffset));
2784 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
2785 VA.getLocMemOffset(), isImmutable);
2787 // Set SExt or ZExt flag.
2788 if (VA.getLocInfo() == CCValAssign::ZExt) {
2789 MFI.setObjectZExt(FI, true);
2790 } else if (VA.getLocInfo() == CCValAssign::SExt) {
2791 MFI.setObjectSExt(FI, true);
2794 // Adjust SP offset of interrupt parameter.
2795 if (CallConv == CallingConv::X86_INTR) {
2796 MFI.setObjectOffset(FI, Offset);
2799 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2800 SDValue Val = DAG.getLoad(
2801 ValVT, dl, Chain, FIN,
2802 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
2803 return ExtendedInMem
2804 ? (VA.getValVT().isVector()
2805 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
2806 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
2810 // FIXME: Get this from tablegen.
2811 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2812 const X86Subtarget &Subtarget) {
2813 assert(Subtarget.is64Bit());
2815 if (Subtarget.isCallingConvWin64(CallConv)) {
2816 static const MCPhysReg GPR64ArgRegsWin64[] = {
2817 X86::RCX, X86::RDX, X86::R8, X86::R9
2819 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2822 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2823 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2825 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2828 // FIXME: Get this from tablegen.
2829 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2830 CallingConv::ID CallConv,
2831 const X86Subtarget &Subtarget) {
2832 assert(Subtarget.is64Bit());
2833 if (Subtarget.isCallingConvWin64(CallConv)) {
2834 // The XMM registers which might contain var arg parameters are shadowed
2835 // in their paired GPR. So we only need to save the GPR to their home
2837 // TODO: __vectorcall will change this.
2841 const Function *Fn = MF.getFunction();
2842 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2843 bool isSoftFloat = Subtarget.useSoftFloat();
2844 assert(!(isSoftFloat && NoImplicitFloatOps) &&
2845 "SSE register cannot be used when SSE is disabled!");
2846 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
2847 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2851 static const MCPhysReg XMMArgRegs64Bit[] = {
2852 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2853 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2855 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2859 static bool isSortedByValueNo(const SmallVectorImpl<CCValAssign> &ArgLocs) {
2860 return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
2861 [](const CCValAssign &A, const CCValAssign &B) -> bool {
2862 return A.getValNo() < B.getValNo();
2867 SDValue X86TargetLowering::LowerFormalArguments(
2868 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2869 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2870 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2871 MachineFunction &MF = DAG.getMachineFunction();
2872 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2873 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
2875 const Function *Fn = MF.getFunction();
2876 if (Fn->hasExternalLinkage() &&
2877 Subtarget.isTargetCygMing() &&
2878 Fn->getName() == "main")
2879 FuncInfo->setForceFramePointer(true);
2881 MachineFrameInfo &MFI = MF.getFrameInfo();
2882 bool Is64Bit = Subtarget.is64Bit();
2883 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
2886 !(isVarArg && canGuaranteeTCO(CallConv)) &&
2887 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
2889 if (CallConv == CallingConv::X86_INTR) {
2890 bool isLegal = Ins.size() == 1 ||
2891 (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) ||
2892 (!Is64Bit && Ins[1].VT == MVT::i32)));
2894 report_fatal_error("X86 interrupts may take one or two arguments");
2897 // Assign locations to all of the incoming arguments.
2898 SmallVector<CCValAssign, 16> ArgLocs;
2899 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2901 // Allocate shadow area for Win64.
2903 CCInfo.AllocateStack(32, 8);
2905 CCInfo.AnalyzeArguments(Ins, CC_X86);
2907 // In vectorcall calling convention a second pass is required for the HVA
2909 if (CallingConv::X86_VectorCall == CallConv) {
2910 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
2913 // The next loop assumes that the locations are in the same order of the
2915 assert(isSortedByValueNo(ArgLocs) &&
2916 "Argument Location list must be sorted before lowering");
2919 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
2921 assert(InsIndex < Ins.size() && "Invalid Ins index");
2922 CCValAssign &VA = ArgLocs[I];
2924 if (VA.isRegLoc()) {
2925 EVT RegVT = VA.getLocVT();
2926 if (VA.needsCustom()) {
2928 VA.getValVT() == MVT::v64i1 &&
2929 "Currently the only custom case is when we split v64i1 to 2 regs");
2931 // v64i1 values, in regcall calling convention, that are
2932 // compiled to 32 bit arch, are split up into two registers.
2934 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
2936 const TargetRegisterClass *RC;
2937 if (RegVT == MVT::i32)
2938 RC = &X86::GR32RegClass;
2939 else if (Is64Bit && RegVT == MVT::i64)
2940 RC = &X86::GR64RegClass;
2941 else if (RegVT == MVT::f32)
2942 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
2943 else if (RegVT == MVT::f64)
2944 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
2945 else if (RegVT == MVT::f80)
2946 RC = &X86::RFP80RegClass;
2947 else if (RegVT == MVT::f128)
2948 RC = &X86::FR128RegClass;
2949 else if (RegVT.is512BitVector())
2950 RC = &X86::VR512RegClass;
2951 else if (RegVT.is256BitVector())
2952 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
2953 else if (RegVT.is128BitVector())
2954 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
2955 else if (RegVT == MVT::x86mmx)
2956 RC = &X86::VR64RegClass;
2957 else if (RegVT == MVT::v1i1)
2958 RC = &X86::VK1RegClass;
2959 else if (RegVT == MVT::v8i1)
2960 RC = &X86::VK8RegClass;
2961 else if (RegVT == MVT::v16i1)
2962 RC = &X86::VK16RegClass;
2963 else if (RegVT == MVT::v32i1)
2964 RC = &X86::VK32RegClass;
2965 else if (RegVT == MVT::v64i1)
2966 RC = &X86::VK64RegClass;
2968 llvm_unreachable("Unknown argument type!");
2970 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2971 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2974 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2975 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2977 if (VA.getLocInfo() == CCValAssign::SExt)
2978 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2979 DAG.getValueType(VA.getValVT()));
2980 else if (VA.getLocInfo() == CCValAssign::ZExt)
2981 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2982 DAG.getValueType(VA.getValVT()));
2983 else if (VA.getLocInfo() == CCValAssign::BCvt)
2984 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
2986 if (VA.isExtInLoc()) {
2987 // Handle MMX values passed in XMM regs.
2988 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
2989 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2990 else if (VA.getValVT().isVector() &&
2991 VA.getValVT().getScalarType() == MVT::i1 &&
2992 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
2993 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
2994 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
2995 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
2997 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3000 assert(VA.isMemLoc());
3002 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3005 // If value is passed via pointer - do a load.
3006 if (VA.getLocInfo() == CCValAssign::Indirect)
3008 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3010 InVals.push_back(ArgValue);
3013 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3014 // Swift calling convention does not require we copy the sret argument
3015 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3016 if (CallConv == CallingConv::Swift)
3019 // All x86 ABIs require that for returning structs by value we copy the
3020 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3021 // the argument into a virtual register so that we can access it from the
3023 if (Ins[I].Flags.isSRet()) {
3024 unsigned Reg = FuncInfo->getSRetReturnReg();
3026 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3027 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3028 FuncInfo->setSRetReturnReg(Reg);
3030 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3031 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3036 unsigned StackSize = CCInfo.getNextStackOffset();
3037 // Align stack specially for tail calls.
3038 if (shouldGuaranteeTCO(CallConv,
3039 MF.getTarget().Options.GuaranteedTailCallOpt))
3040 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3042 // If the function takes variable number of arguments, make a frame index for
3043 // the start of the first vararg value... for expansion of llvm.va_start. We
3044 // can skip this if there are no va_start calls.
3045 if (MFI.hasVAStart() &&
3046 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3047 CallConv != CallingConv::X86_ThisCall))) {
3048 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3051 // Figure out if XMM registers are in use.
3052 assert(!(Subtarget.useSoftFloat() &&
3053 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
3054 "SSE register cannot be used when SSE is disabled!");
3056 // 64-bit calling conventions support varargs and register parameters, so we
3057 // have to do extra work to spill them in the prologue.
3058 if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3059 // Find the first unallocated argument registers.
3060 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3061 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3062 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3063 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3064 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3065 "SSE register cannot be used when SSE is disabled!");
3067 // Gather all the live in physical registers.
3068 SmallVector<SDValue, 6> LiveGPRs;
3069 SmallVector<SDValue, 8> LiveXMMRegs;
3071 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3072 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3074 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3076 if (!ArgXMMs.empty()) {
3077 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3078 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3079 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3080 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3081 LiveXMMRegs.push_back(
3082 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3087 // Get to the caller-allocated home save location. Add 8 to account
3088 // for the return address.
3089 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3090 FuncInfo->setRegSaveFrameIndex(
3091 MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3092 // Fixup to set vararg frame on shadow area (4 x i64).
3094 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3096 // For X86-64, if there are vararg parameters that are passed via
3097 // registers, then we must store them to their spots on the stack so
3098 // they may be loaded by dereferencing the result of va_next.
3099 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3100 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3101 FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3102 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3105 // Store the integer parameter registers.
3106 SmallVector<SDValue, 8> MemOps;
3107 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3108 getPointerTy(DAG.getDataLayout()));
3109 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3110 for (SDValue Val : LiveGPRs) {
3111 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3112 RSFIN, DAG.getIntPtrConstant(Offset, dl));
3114 DAG.getStore(Val.getValue(1), dl, Val, FIN,
3115 MachinePointerInfo::getFixedStack(
3116 DAG.getMachineFunction(),
3117 FuncInfo->getRegSaveFrameIndex(), Offset));
3118 MemOps.push_back(Store);
3122 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3123 // Now store the XMM (fp + vector) parameter registers.
3124 SmallVector<SDValue, 12> SaveXMMOps;
3125 SaveXMMOps.push_back(Chain);
3126 SaveXMMOps.push_back(ALVal);
3127 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3128 FuncInfo->getRegSaveFrameIndex(), dl));
3129 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3130 FuncInfo->getVarArgsFPOffset(), dl));
3131 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3133 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3134 MVT::Other, SaveXMMOps));
3137 if (!MemOps.empty())
3138 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3141 if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3142 // Find the largest legal vector type.
3143 MVT VecVT = MVT::Other;
3144 // FIXME: Only some x86_32 calling conventions support AVX512.
3145 if (Subtarget.hasAVX512() &&
3146 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3147 CallConv == CallingConv::Intel_OCL_BI)))
3148 VecVT = MVT::v16f32;
3149 else if (Subtarget.hasAVX())
3151 else if (Subtarget.hasSSE2())
3154 // We forward some GPRs and some vector types.
3155 SmallVector<MVT, 2> RegParmTypes;
3156 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3157 RegParmTypes.push_back(IntVT);
3158 if (VecVT != MVT::Other)
3159 RegParmTypes.push_back(VecVT);
3161 // Compute the set of forwarded registers. The rest are scratch.
3162 SmallVectorImpl<ForwardedRegister> &Forwards =
3163 FuncInfo->getForwardedMustTailRegParms();
3164 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3166 // Conservatively forward AL on x86_64, since it might be used for varargs.
3167 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
3168 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3169 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3172 // Copy all forwards from physical to virtual registers.
3173 for (ForwardedRegister &F : Forwards) {
3174 // FIXME: Can we use a less constrained schedule?
3175 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3176 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
3177 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
3181 // Some CCs need callee pop.
3182 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3183 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3184 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3185 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3186 // X86 interrupts must pop the error code (and the alignment padding) if
3188 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3190 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3191 // If this is an sret function, the return should pop the hidden pointer.
3192 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3193 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3194 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3195 FuncInfo->setBytesToPopOnReturn(4);
3199 // RegSaveFrameIndex is X86-64 only.
3200 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3201 if (CallConv == CallingConv::X86_FastCall ||
3202 CallConv == CallingConv::X86_ThisCall)
3203 // fastcc functions can't have varargs.
3204 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3207 FuncInfo->setArgumentStackSize(StackSize);
3209 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3210 EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
3211 if (Personality == EHPersonality::CoreCLR) {
3213 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3214 // that we'd prefer this slot be allocated towards the bottom of the frame
3215 // (i.e. near the stack pointer after allocating the frame). Every
3216 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3217 // offset from the bottom of this and each funclet's frame must be the
3218 // same, so the size of funclets' (mostly empty) frames is dictated by
3219 // how far this slot is from the bottom (since they allocate just enough
3220 // space to accommodate holding this slot at the correct offset).
3221 int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3222 EHInfo->PSPSymFrameIdx = PSPSymFI;
3226 if (CallConv == CallingConv::X86_RegCall ||
3227 Fn->hasFnAttribute("no_caller_saved_registers")) {
3228 const MachineRegisterInfo &MRI = MF.getRegInfo();
3229 for (const auto &Pair : make_range(MRI.livein_begin(), MRI.livein_end()))
3230 MF.getRegInfo().disableCalleeSavedRegister(Pair.first);
3236 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3237 SDValue Arg, const SDLoc &dl,
3239 const CCValAssign &VA,
3240 ISD::ArgFlagsTy Flags) const {
3241 unsigned LocMemOffset = VA.getLocMemOffset();
3242 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3243 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3245 if (Flags.isByVal())
3246 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3248 return DAG.getStore(
3249 Chain, dl, Arg, PtrOff,
3250 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3253 /// Emit a load of return address if tail call
3254 /// optimization is performed and it is required.
3255 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3256 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3257 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3258 // Adjust the Return address stack slot.
3259 EVT VT = getPointerTy(DAG.getDataLayout());
3260 OutRetAddr = getReturnAddressFrameIndex(DAG);
3262 // Load the "old" Return address.
3263 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3264 return SDValue(OutRetAddr.getNode(), 1);
3267 /// Emit a store of the return address if tail call
3268 /// optimization is performed and it is required (FPDiff!=0).
3269 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3270 SDValue Chain, SDValue RetAddrFrIdx,
3271 EVT PtrVT, unsigned SlotSize,
3272 int FPDiff, const SDLoc &dl) {
3273 // Store the return address to the appropriate stack slot.
3274 if (!FPDiff) return Chain;
3275 // Calculate the new stack slot for the return address.
3276 int NewReturnAddrFI =
3277 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3279 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3280 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3281 MachinePointerInfo::getFixedStack(
3282 DAG.getMachineFunction(), NewReturnAddrFI));
3286 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3287 /// operation of specified width.
3288 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3290 unsigned NumElems = VT.getVectorNumElements();
3291 SmallVector<int, 8> Mask;
3292 Mask.push_back(NumElems);
3293 for (unsigned i = 1; i != NumElems; ++i)
3295 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3299 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3300 SmallVectorImpl<SDValue> &InVals) const {
3301 SelectionDAG &DAG = CLI.DAG;
3303 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3304 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3305 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3306 SDValue Chain = CLI.Chain;
3307 SDValue Callee = CLI.Callee;
3308 CallingConv::ID CallConv = CLI.CallConv;
3309 bool &isTailCall = CLI.IsTailCall;
3310 bool isVarArg = CLI.IsVarArg;
3312 MachineFunction &MF = DAG.getMachineFunction();
3313 bool Is64Bit = Subtarget.is64Bit();
3314 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3315 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3316 bool IsSibcall = false;
3317 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3318 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
3319 const CallInst *CI =
3320 CLI.CS ? dyn_cast<CallInst>(CLI.CS->getInstruction()) : nullptr;
3321 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3322 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3323 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3325 if (CallConv == CallingConv::X86_INTR)
3326 report_fatal_error("X86 interrupts may not be called directly");
3328 if (Attr.getValueAsString() == "true")
3331 if (Subtarget.isPICStyleGOT() &&
3332 !MF.getTarget().Options.GuaranteedTailCallOpt) {
3333 // If we are using a GOT, disable tail calls to external symbols with
3334 // default visibility. Tail calling such a symbol requires using a GOT
3335 // relocation, which forces early binding of the symbol. This breaks code
3336 // that require lazy function symbol resolution. Using musttail or
3337 // GuaranteedTailCallOpt will override this.
3338 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3339 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3340 G->getGlobal()->hasDefaultVisibility()))
3344 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
3346 // Force this to be a tail call. The verifier rules are enough to ensure
3347 // that we can lower this successfully without moving the return address
3350 } else if (isTailCall) {
3351 // Check if it's really possible to do a tail call.
3352 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3353 isVarArg, SR != NotStructReturn,
3354 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
3355 Outs, OutVals, Ins, DAG);
3357 // Sibcalls are automatically detected tailcalls which do not require
3359 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
3366 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3367 "Var args not supported with calling convention fastcc, ghc or hipe");
3369 // Analyze operands of the call, assigning locations to each operand.
3370 SmallVector<CCValAssign, 16> ArgLocs;
3371 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3373 // Allocate shadow area for Win64.
3375 CCInfo.AllocateStack(32, 8);
3377 CCInfo.AnalyzeArguments(Outs, CC_X86);
3379 // In vectorcall calling convention a second pass is required for the HVA
3381 if (CallingConv::X86_VectorCall == CallConv) {
3382 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3385 // Get a count of how many bytes are to be pushed on the stack.
3386 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3388 // This is a sibcall. The memory operands are available in caller's
3389 // own caller's stack.
3391 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
3392 canGuaranteeTCO(CallConv))
3393 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3396 if (isTailCall && !IsSibcall && !IsMustTail) {
3397 // Lower arguments at fp - stackoffset + fpdiff.
3398 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3400 FPDiff = NumBytesCallerPushed - NumBytes;
3402 // Set the delta of movement of the returnaddr stackslot.
3403 // But only set if delta is greater than previous delta.
3404 if (FPDiff < X86Info->getTCReturnAddrDelta())
3405 X86Info->setTCReturnAddrDelta(FPDiff);
3408 unsigned NumBytesToPush = NumBytes;
3409 unsigned NumBytesToPop = NumBytes;
3411 // If we have an inalloca argument, all stack space has already been allocated
3412 // for us and be right at the top of the stack. We don't support multiple
3413 // arguments passed in memory when using inalloca.
3414 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3416 if (!ArgLocs.back().isMemLoc())
3417 report_fatal_error("cannot use inalloca attribute on a register "
3419 if (ArgLocs.back().getLocMemOffset() != 0)
3420 report_fatal_error("any parameter with the inalloca attribute must be "
3421 "the only memory argument");
3425 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3426 NumBytes - NumBytesToPush, dl);
3428 SDValue RetAddrFrIdx;
3429 // Load return address for tail calls.
3430 if (isTailCall && FPDiff)
3431 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3432 Is64Bit, FPDiff, dl);
3434 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3435 SmallVector<SDValue, 8> MemOpChains;
3438 // The next loop assumes that the locations are in the same order of the
3440 assert(isSortedByValueNo(ArgLocs) &&
3441 "Argument Location list must be sorted before lowering");
3443 // Walk the register/memloc assignments, inserting copies/loads. In the case
3444 // of tail call optimization arguments are handle later.
3445 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3446 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3448 assert(OutIndex < Outs.size() && "Invalid Out index");
3449 // Skip inalloca arguments, they have already been written.
3450 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3451 if (Flags.isInAlloca())
3454 CCValAssign &VA = ArgLocs[I];
3455 EVT RegVT = VA.getLocVT();
3456 SDValue Arg = OutVals[OutIndex];
3457 bool isByVal = Flags.isByVal();
3459 // Promote the value if needed.
3460 switch (VA.getLocInfo()) {
3461 default: llvm_unreachable("Unknown loc info!");
3462 case CCValAssign::Full: break;
3463 case CCValAssign::SExt:
3464 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3466 case CCValAssign::ZExt:
3467 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3469 case CCValAssign::AExt:
3470 if (Arg.getValueType().isVector() &&
3471 Arg.getValueType().getVectorElementType() == MVT::i1)
3472 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3473 else if (RegVT.is128BitVector()) {
3474 // Special case: passing MMX values in XMM registers.
3475 Arg = DAG.getBitcast(MVT::i64, Arg);
3476 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3477 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3479 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3481 case CCValAssign::BCvt:
3482 Arg = DAG.getBitcast(RegVT, Arg);
3484 case CCValAssign::Indirect: {
3485 // Store the argument.
3486 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3487 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3488 Chain = DAG.getStore(
3489 Chain, dl, Arg, SpillSlot,
3490 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3496 if (VA.needsCustom()) {
3497 assert(VA.getValVT() == MVT::v64i1 &&
3498 "Currently the only custom case is when we split v64i1 to 2 regs");
3499 // Split v64i1 value into two registers
3500 Passv64i1ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++I],
3502 } else if (VA.isRegLoc()) {
3503 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3504 if (isVarArg && IsWin64) {
3505 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3506 // shadow reg if callee is a varargs function.
3507 unsigned ShadowReg = 0;
3508 switch (VA.getLocReg()) {
3509 case X86::XMM0: ShadowReg = X86::RCX; break;
3510 case X86::XMM1: ShadowReg = X86::RDX; break;
3511 case X86::XMM2: ShadowReg = X86::R8; break;
3512 case X86::XMM3: ShadowReg = X86::R9; break;
3515 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3517 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3518 assert(VA.isMemLoc());
3519 if (!StackPtr.getNode())
3520 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3521 getPointerTy(DAG.getDataLayout()));
3522 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3523 dl, DAG, VA, Flags));
3527 if (!MemOpChains.empty())
3528 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3530 if (Subtarget.isPICStyleGOT()) {
3531 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3534 RegsToPass.push_back(std::make_pair(
3535 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3536 getPointerTy(DAG.getDataLayout()))));
3538 // If we are tail calling and generating PIC/GOT style code load the
3539 // address of the callee into ECX. The value in ecx is used as target of
3540 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3541 // for tail calls on PIC/GOT architectures. Normally we would just put the
3542 // address of GOT into ebx and then call target@PLT. But for tail calls
3543 // ebx would be restored (since ebx is callee saved) before jumping to the
3546 // Note: The actual moving to ECX is done further down.
3547 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3548 if (G && !G->getGlobal()->hasLocalLinkage() &&
3549 G->getGlobal()->hasDefaultVisibility())
3550 Callee = LowerGlobalAddress(Callee, DAG);
3551 else if (isa<ExternalSymbolSDNode>(Callee))
3552 Callee = LowerExternalSymbol(Callee, DAG);
3556 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3557 // From AMD64 ABI document:
3558 // For calls that may call functions that use varargs or stdargs
3559 // (prototype-less calls or calls to functions containing ellipsis (...) in
3560 // the declaration) %al is used as hidden argument to specify the number
3561 // of SSE registers used. The contents of %al do not need to match exactly
3562 // the number of registers, but must be an ubound on the number of SSE
3563 // registers used and is in the range 0 - 8 inclusive.
3565 // Count the number of XMM registers allocated.
3566 static const MCPhysReg XMMArgRegs[] = {
3567 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3568 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3570 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3571 assert((Subtarget.hasSSE1() || !NumXMMRegs)
3572 && "SSE registers cannot be used when SSE is disabled");
3574 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3575 DAG.getConstant(NumXMMRegs, dl,
3579 if (isVarArg && IsMustTail) {
3580 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3581 for (const auto &F : Forwards) {
3582 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3583 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3587 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3588 // don't need this because the eligibility check rejects calls that require
3589 // shuffling arguments passed in memory.
3590 if (!IsSibcall && isTailCall) {
3591 // Force all the incoming stack arguments to be loaded from the stack
3592 // before any new outgoing arguments are stored to the stack, because the
3593 // outgoing stack slots may alias the incoming argument stack slots, and
3594 // the alias isn't otherwise explicit. This is slightly more conservative
3595 // than necessary, because it means that each store effectively depends
3596 // on every argument instead of just those arguments it would clobber.
3597 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3599 SmallVector<SDValue, 8> MemOpChains2;
3602 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
3604 CCValAssign &VA = ArgLocs[I];
3606 if (VA.isRegLoc()) {
3607 if (VA.needsCustom()) {
3608 assert((CallConv == CallingConv::X86_RegCall) &&
3609 "Expecting custom case only in regcall calling convention");
3610 // This means that we are in special case where one argument was
3611 // passed through two register locations - Skip the next location
3618 assert(VA.isMemLoc());
3619 SDValue Arg = OutVals[OutsIndex];
3620 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
3621 // Skip inalloca arguments. They don't require any work.
3622 if (Flags.isInAlloca())
3624 // Create frame index.
3625 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3626 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3627 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
3628 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3630 if (Flags.isByVal()) {
3631 // Copy relative to framepointer.
3632 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3633 if (!StackPtr.getNode())
3634 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3635 getPointerTy(DAG.getDataLayout()));
3636 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3639 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3643 // Store relative to framepointer.
3644 MemOpChains2.push_back(DAG.getStore(
3645 ArgChain, dl, Arg, FIN,
3646 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
3650 if (!MemOpChains2.empty())
3651 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3653 // Store the return address to the appropriate stack slot.
3654 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3655 getPointerTy(DAG.getDataLayout()),
3656 RegInfo->getSlotSize(), FPDiff, dl);
3659 // Build a sequence of copy-to-reg nodes chained together with token chain
3660 // and flag operands which copy the outgoing args into registers.
3662 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3663 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3664 RegsToPass[i].second, InFlag);
3665 InFlag = Chain.getValue(1);
3668 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3669 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3670 // In the 64-bit large code model, we have to make all calls
3671 // through a register, since the call instruction's 32-bit
3672 // pc-relative offset may not be large enough to hold the whole
3674 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3675 // If the callee is a GlobalAddress node (quite common, every direct call
3676 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3678 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3680 // We should use extra load for direct calls to dllimported functions in
3682 const GlobalValue *GV = G->getGlobal();
3683 if (!GV->hasDLLImportStorageClass()) {
3684 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
3686 Callee = DAG.getTargetGlobalAddress(
3687 GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
3689 if (OpFlags == X86II::MO_GOTPCREL) {
3691 Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
3692 getPointerTy(DAG.getDataLayout()), Callee);
3693 // Add extra indirection
3694 Callee = DAG.getLoad(
3695 getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
3696 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3699 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3700 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
3701 unsigned char OpFlags =
3702 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
3704 Callee = DAG.getTargetExternalSymbol(
3705 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
3706 } else if (Subtarget.isTarget64BitILP32() &&
3707 Callee->getValueType(0) == MVT::i32) {
3708 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3709 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3712 // Returns a chain & a flag for retval copy to use.
3713 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3714 SmallVector<SDValue, 8> Ops;
3716 if (!IsSibcall && isTailCall) {
3717 Chain = DAG.getCALLSEQ_END(Chain,
3718 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3719 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
3720 InFlag = Chain.getValue(1);
3723 Ops.push_back(Chain);
3724 Ops.push_back(Callee);
3727 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
3729 // Add argument registers to the end of the list so that they are known live
3731 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3732 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3733 RegsToPass[i].second.getValueType()));
3735 // Add a register mask operand representing the call-preserved registers.
3736 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
3737 // set X86_INTR calling convention because it has the same CSR mask
3738 // (same preserved registers).
3739 const uint32_t *Mask = RegInfo->getCallPreservedMask(
3740 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
3741 assert(Mask && "Missing call preserved mask for calling convention");
3743 // If this is an invoke in a 32-bit function using a funclet-based
3744 // personality, assume the function clobbers all registers. If an exception
3745 // is thrown, the runtime will not restore CSRs.
3746 // FIXME: Model this more precisely so that we can register allocate across
3747 // the normal edge and spill and fill across the exceptional edge.
3748 if (!Is64Bit && CLI.CS && CLI.CS->isInvoke()) {
3749 const Function *CallerFn = MF.getFunction();
3750 EHPersonality Pers =
3751 CallerFn->hasPersonalityFn()
3752 ? classifyEHPersonality(CallerFn->getPersonalityFn())
3753 : EHPersonality::Unknown;
3754 if (isFuncletEHPersonality(Pers))
3755 Mask = RegInfo->getNoPreservedMask();
3758 // Define a new register mask from the existing mask.
3759 uint32_t *RegMask = nullptr;
3761 // In some calling conventions we need to remove the used physical registers
3762 // from the reg mask.
3763 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
3764 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3766 // Allocate a new Reg Mask and copy Mask.
3767 RegMask = MF.allocateRegisterMask(TRI->getNumRegs());
3768 unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32;
3769 memcpy(RegMask, Mask, sizeof(uint32_t) * RegMaskSize);
3771 // Make sure all sub registers of the argument registers are reset
3773 for (auto const &RegPair : RegsToPass)
3774 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
3775 SubRegs.isValid(); ++SubRegs)
3776 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3778 // Create the RegMask Operand according to our updated mask.
3779 Ops.push_back(DAG.getRegisterMask(RegMask));
3781 // Create the RegMask Operand according to the static mask.
3782 Ops.push_back(DAG.getRegisterMask(Mask));
3785 if (InFlag.getNode())
3786 Ops.push_back(InFlag);
3790 //// If this is the first return lowered for this function, add the regs
3791 //// to the liveout set for the function.
3792 // This isn't right, although it's probably harmless on x86; liveouts
3793 // should be computed from returns not tail calls. Consider a void
3794 // function making a tail call to a function returning int.
3795 MF.getFrameInfo().setHasTailCall();
3796 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3799 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3800 InFlag = Chain.getValue(1);
3802 // Create the CALLSEQ_END node.
3803 unsigned NumBytesForCalleeToPop;
3804 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3805 DAG.getTarget().Options.GuaranteedTailCallOpt))
3806 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3807 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3808 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3809 SR == StackStructReturn)
3810 // If this is a call to a struct-return function, the callee
3811 // pops the hidden struct pointer, so we have to push it back.
3812 // This is common for Darwin/X86, Linux & Mingw32 targets.
3813 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3814 NumBytesForCalleeToPop = 4;
3816 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3818 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
3819 // No need to reset the stack after the call if the call doesn't return. To
3820 // make the MI verify, we'll pretend the callee does it for us.
3821 NumBytesForCalleeToPop = NumBytes;
3824 // Returns a flag for retval copy to use.
3826 Chain = DAG.getCALLSEQ_END(Chain,
3827 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3828 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
3831 InFlag = Chain.getValue(1);
3834 // Handle result values, copying them out of physregs into vregs that we
3836 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
3840 //===----------------------------------------------------------------------===//
3841 // Fast Calling Convention (tail call) implementation
3842 //===----------------------------------------------------------------------===//
3844 // Like std call, callee cleans arguments, convention except that ECX is
3845 // reserved for storing the tail called function address. Only 2 registers are
3846 // free for argument passing (inreg). Tail call optimization is performed
3848 // * tailcallopt is enabled
3849 // * caller/callee are fastcc
3850 // On X86_64 architecture with GOT-style position independent code only local
3851 // (within module) calls are supported at the moment.
3852 // To keep the stack aligned according to platform abi the function
3853 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3854 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3855 // If a tail called function callee has more arguments than the caller the
3856 // caller needs to make sure that there is room to move the RETADDR to. This is
3857 // achieved by reserving an area the size of the argument delta right after the
3858 // original RETADDR, but before the saved framepointer or the spilled registers
3859 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3871 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
3874 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3875 SelectionDAG& DAG) const {
3876 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3877 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3878 unsigned StackAlignment = TFI.getStackAlignment();
3879 uint64_t AlignMask = StackAlignment - 1;
3880 int64_t Offset = StackSize;
3881 unsigned SlotSize = RegInfo->getSlotSize();
3882 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3883 // Number smaller than 12 so just add the difference.
3884 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3886 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3887 Offset = ((~AlignMask) & Offset) + StackAlignment +
3888 (StackAlignment-SlotSize);
3893 /// Return true if the given stack call argument is already available in the
3894 /// same position (relatively) of the caller's incoming argument stack.
3896 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3897 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
3898 const X86InstrInfo *TII, const CCValAssign &VA) {
3899 unsigned Bytes = Arg.getValueSizeInBits() / 8;
3902 // Look through nodes that don't alter the bits of the incoming value.
3903 unsigned Op = Arg.getOpcode();
3904 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
3905 Arg = Arg.getOperand(0);
3908 if (Op == ISD::TRUNCATE) {
3909 const SDValue &TruncInput = Arg.getOperand(0);
3910 if (TruncInput.getOpcode() == ISD::AssertZext &&
3911 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
3912 Arg.getValueType()) {
3913 Arg = TruncInput.getOperand(0);
3921 if (Arg.getOpcode() == ISD::CopyFromReg) {
3922 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3923 if (!TargetRegisterInfo::isVirtualRegister(VR))
3925 MachineInstr *Def = MRI->getVRegDef(VR);
3928 if (!Flags.isByVal()) {
3929 if (!TII->isLoadFromStackSlot(*Def, FI))
3932 unsigned Opcode = Def->getOpcode();
3933 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3934 Opcode == X86::LEA64_32r) &&
3935 Def->getOperand(1).isFI()) {
3936 FI = Def->getOperand(1).getIndex();
3937 Bytes = Flags.getByValSize();
3941 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3942 if (Flags.isByVal())
3943 // ByVal argument is passed in as a pointer but it's now being
3944 // dereferenced. e.g.
3945 // define @foo(%struct.X* %A) {
3946 // tail call @bar(%struct.X* byval %A)
3949 SDValue Ptr = Ld->getBasePtr();
3950 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3953 FI = FINode->getIndex();
3954 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3955 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3956 FI = FINode->getIndex();
3957 Bytes = Flags.getByValSize();
3961 assert(FI != INT_MAX);
3962 if (!MFI.isFixedObjectIndex(FI))
3965 if (Offset != MFI.getObjectOffset(FI))
3968 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
3969 // If the argument location is wider than the argument type, check that any
3970 // extension flags match.
3971 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
3972 Flags.isSExt() != MFI.isObjectSExt(FI)) {
3977 return Bytes == MFI.getObjectSize(FI);
3980 /// Check whether the call is eligible for tail call optimization. Targets
3981 /// that want to do tail call optimization should implement this function.
3982 bool X86TargetLowering::IsEligibleForTailCallOptimization(
3983 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
3984 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
3985 const SmallVectorImpl<ISD::OutputArg> &Outs,
3986 const SmallVectorImpl<SDValue> &OutVals,
3987 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
3988 if (!mayTailCallThisCC(CalleeCC))
3991 // If -tailcallopt is specified, make fastcc functions tail-callable.
3992 MachineFunction &MF = DAG.getMachineFunction();
3993 const Function *CallerF = MF.getFunction();
3995 // If the function return type is x86_fp80 and the callee return type is not,
3996 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3997 // perform a tailcall optimization here.
3998 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4001 CallingConv::ID CallerCC = CallerF->getCallingConv();
4002 bool CCMatch = CallerCC == CalleeCC;
4003 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4004 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4006 // Win64 functions have extra shadow space for argument homing. Don't do the
4007 // sibcall if the caller and callee have mismatched expectations for this
4009 if (IsCalleeWin64 != IsCallerWin64)
4012 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
4013 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4018 // Look for obvious safe cases to perform tail call optimization that do not
4019 // require ABI changes. This is what gcc calls sibcall.
4021 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4022 // emit a special epilogue.
4023 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4024 if (RegInfo->needsStackRealignment(MF))
4027 // Also avoid sibcall optimization if either caller or callee uses struct
4028 // return semantics.
4029 if (isCalleeStructRet || isCallerStructRet)
4032 // Do not sibcall optimize vararg calls unless all arguments are passed via
4034 LLVMContext &C = *DAG.getContext();
4035 if (isVarArg && !Outs.empty()) {
4036 // Optimizing for varargs on Win64 is unlikely to be safe without
4037 // additional testing.
4038 if (IsCalleeWin64 || IsCallerWin64)
4041 SmallVector<CCValAssign, 16> ArgLocs;
4042 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4044 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4045 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4046 if (!ArgLocs[i].isRegLoc())
4050 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4051 // stack. Therefore, if it's not used by the call it is not safe to optimize
4052 // this into a sibcall.
4053 bool Unused = false;
4054 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4061 SmallVector<CCValAssign, 16> RVLocs;
4062 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4063 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4064 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4065 CCValAssign &VA = RVLocs[i];
4066 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4071 // Check that the call results are passed in the same way.
4072 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4073 RetCC_X86, RetCC_X86))
4075 // The callee has to preserve all registers the caller needs to preserve.
4076 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4077 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4079 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4080 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4084 unsigned StackArgsSize = 0;
4086 // If the callee takes no arguments then go on to check the results of the
4088 if (!Outs.empty()) {
4089 // Check if stack adjustment is needed. For now, do not do this if any
4090 // argument is passed on the stack.
4091 SmallVector<CCValAssign, 16> ArgLocs;
4092 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4094 // Allocate shadow area for Win64
4096 CCInfo.AllocateStack(32, 8);
4098 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4099 StackArgsSize = CCInfo.getNextStackOffset();
4101 if (CCInfo.getNextStackOffset()) {
4102 // Check if the arguments are already laid out in the right way as
4103 // the caller's fixed stack objects.
4104 MachineFrameInfo &MFI = MF.getFrameInfo();
4105 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4106 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4107 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4108 CCValAssign &VA = ArgLocs[i];
4109 SDValue Arg = OutVals[i];
4110 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4111 if (VA.getLocInfo() == CCValAssign::Indirect)
4113 if (!VA.isRegLoc()) {
4114 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4121 bool PositionIndependent = isPositionIndependent();
4122 // If the tailcall address may be in a register, then make sure it's
4123 // possible to register allocate for it. In 32-bit, the call address can
4124 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4125 // callee-saved registers are restored. These happen to be the same
4126 // registers used to pass 'inreg' arguments so watch out for those.
4127 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4128 !isa<ExternalSymbolSDNode>(Callee)) ||
4129 PositionIndependent)) {
4130 unsigned NumInRegs = 0;
4131 // In PIC we need an extra register to formulate the address computation
4133 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4135 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4136 CCValAssign &VA = ArgLocs[i];
4139 unsigned Reg = VA.getLocReg();
4142 case X86::EAX: case X86::EDX: case X86::ECX:
4143 if (++NumInRegs == MaxInRegs)
4150 const MachineRegisterInfo &MRI = MF.getRegInfo();
4151 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4155 bool CalleeWillPop =
4156 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4157 MF.getTarget().Options.GuaranteedTailCallOpt);
4159 if (unsigned BytesToPop =
4160 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4161 // If we have bytes to pop, the callee must pop them.
4162 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4163 if (!CalleePopMatches)
4165 } else if (CalleeWillPop && StackArgsSize > 0) {
4166 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4174 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4175 const TargetLibraryInfo *libInfo) const {
4176 return X86::createFastISel(funcInfo, libInfo);
4179 //===----------------------------------------------------------------------===//
4180 // Other Lowering Hooks
4181 //===----------------------------------------------------------------------===//
4183 static bool MayFoldLoad(SDValue Op) {
4184 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4187 static bool MayFoldIntoStore(SDValue Op) {
4188 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4191 static bool MayFoldIntoZeroExtend(SDValue Op) {
4192 if (Op.hasOneUse()) {
4193 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4194 return (ISD::ZERO_EXTEND == Opcode);
4199 static bool isTargetShuffle(unsigned Opcode) {
4201 default: return false;
4202 case X86ISD::BLENDI:
4203 case X86ISD::PSHUFB:
4204 case X86ISD::PSHUFD:
4205 case X86ISD::PSHUFHW:
4206 case X86ISD::PSHUFLW:
4208 case X86ISD::INSERTPS:
4209 case X86ISD::PALIGNR:
4210 case X86ISD::VSHLDQ:
4211 case X86ISD::VSRLDQ:
4212 case X86ISD::MOVLHPS:
4213 case X86ISD::MOVLHPD:
4214 case X86ISD::MOVHLPS:
4215 case X86ISD::MOVLPS:
4216 case X86ISD::MOVLPD:
4217 case X86ISD::MOVSHDUP:
4218 case X86ISD::MOVSLDUP:
4219 case X86ISD::MOVDDUP:
4222 case X86ISD::UNPCKL:
4223 case X86ISD::UNPCKH:
4224 case X86ISD::VBROADCAST:
4225 case X86ISD::VPERMILPI:
4226 case X86ISD::VPERMILPV:
4227 case X86ISD::VPERM2X128:
4228 case X86ISD::VPERMIL2:
4229 case X86ISD::VPERMI:
4230 case X86ISD::VPPERM:
4231 case X86ISD::VPERMV:
4232 case X86ISD::VPERMV3:
4233 case X86ISD::VPERMIV3:
4234 case X86ISD::VZEXT_MOVL:
4239 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4241 default: return false;
4243 case X86ISD::PSHUFB:
4244 case X86ISD::VPERMILPV:
4245 case X86ISD::VPERMIL2:
4246 case X86ISD::VPPERM:
4247 case X86ISD::VPERMV:
4248 case X86ISD::VPERMV3:
4249 case X86ISD::VPERMIV3:
4251 // 'Faux' Target Shuffles.
4258 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4259 MachineFunction &MF = DAG.getMachineFunction();
4260 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4261 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4262 int ReturnAddrIndex = FuncInfo->getRAIndex();
4264 if (ReturnAddrIndex == 0) {
4265 // Set up a frame object for the return address.
4266 unsigned SlotSize = RegInfo->getSlotSize();
4267 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4270 FuncInfo->setRAIndex(ReturnAddrIndex);
4273 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4276 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4277 bool hasSymbolicDisplacement) {
4278 // Offset should fit into 32 bit immediate field.
4279 if (!isInt<32>(Offset))
4282 // If we don't have a symbolic displacement - we don't have any extra
4284 if (!hasSymbolicDisplacement)
4287 // FIXME: Some tweaks might be needed for medium code model.
4288 if (M != CodeModel::Small && M != CodeModel::Kernel)
4291 // For small code model we assume that latest object is 16MB before end of 31
4292 // bits boundary. We may also accept pretty large negative constants knowing
4293 // that all objects are in the positive half of address space.
4294 if (M == CodeModel::Small && Offset < 16*1024*1024)
4297 // For kernel code model we know that all object resist in the negative half
4298 // of 32bits address space. We may not accept negative offsets, since they may
4299 // be just off and we may accept pretty large positive ones.
4300 if (M == CodeModel::Kernel && Offset >= 0)
4306 /// Determines whether the callee is required to pop its own arguments.
4307 /// Callee pop is necessary to support tail calls.
4308 bool X86::isCalleePop(CallingConv::ID CallingConv,
4309 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4310 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4311 // can guarantee TCO.
4312 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4315 switch (CallingConv) {
4318 case CallingConv::X86_StdCall:
4319 case CallingConv::X86_FastCall:
4320 case CallingConv::X86_ThisCall:
4321 case CallingConv::X86_VectorCall:
4326 /// \brief Return true if the condition is an unsigned comparison operation.
4327 static bool isX86CCUnsigned(unsigned X86CC) {
4330 llvm_unreachable("Invalid integer condition!");
4346 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4347 switch (SetCCOpcode) {
4348 default: llvm_unreachable("Invalid integer condition!");
4349 case ISD::SETEQ: return X86::COND_E;
4350 case ISD::SETGT: return X86::COND_G;
4351 case ISD::SETGE: return X86::COND_GE;
4352 case ISD::SETLT: return X86::COND_L;
4353 case ISD::SETLE: return X86::COND_LE;
4354 case ISD::SETNE: return X86::COND_NE;
4355 case ISD::SETULT: return X86::COND_B;
4356 case ISD::SETUGT: return X86::COND_A;
4357 case ISD::SETULE: return X86::COND_BE;
4358 case ISD::SETUGE: return X86::COND_AE;
4362 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4363 /// condition code, returning the condition code and the LHS/RHS of the
4364 /// comparison to make.
4365 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4366 bool isFP, SDValue &LHS, SDValue &RHS,
4367 SelectionDAG &DAG) {
4369 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4370 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4371 // X > -1 -> X == 0, jump !sign.
4372 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4373 return X86::COND_NS;
4375 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4376 // X < 0 -> X == 0, jump on sign.
4379 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
4381 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4382 return X86::COND_LE;
4386 return TranslateIntegerX86CC(SetCCOpcode);
4389 // First determine if it is required or is profitable to flip the operands.
4391 // If LHS is a foldable load, but RHS is not, flip the condition.
4392 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4393 !ISD::isNON_EXTLoad(RHS.getNode())) {
4394 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4395 std::swap(LHS, RHS);
4398 switch (SetCCOpcode) {
4404 std::swap(LHS, RHS);
4408 // On a floating point condition, the flags are set as follows:
4410 // 0 | 0 | 0 | X > Y
4411 // 0 | 0 | 1 | X < Y
4412 // 1 | 0 | 0 | X == Y
4413 // 1 | 1 | 1 | unordered
4414 switch (SetCCOpcode) {
4415 default: llvm_unreachable("Condcode should be pre-legalized away");
4417 case ISD::SETEQ: return X86::COND_E;
4418 case ISD::SETOLT: // flipped
4420 case ISD::SETGT: return X86::COND_A;
4421 case ISD::SETOLE: // flipped
4423 case ISD::SETGE: return X86::COND_AE;
4424 case ISD::SETUGT: // flipped
4426 case ISD::SETLT: return X86::COND_B;
4427 case ISD::SETUGE: // flipped
4429 case ISD::SETLE: return X86::COND_BE;
4431 case ISD::SETNE: return X86::COND_NE;
4432 case ISD::SETUO: return X86::COND_P;
4433 case ISD::SETO: return X86::COND_NP;
4435 case ISD::SETUNE: return X86::COND_INVALID;
4439 /// Is there a floating point cmov for the specific X86 condition code?
4440 /// Current x86 isa includes the following FP cmov instructions:
4441 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4442 static bool hasFPCMov(unsigned X86CC) {
4459 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4461 unsigned Intrinsic) const {
4463 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4467 Info.opc = ISD::INTRINSIC_W_CHAIN;
4468 Info.readMem = false;
4469 Info.writeMem = false;
4473 switch (IntrData->Type) {
4474 case EXPAND_FROM_MEM: {
4475 Info.ptrVal = I.getArgOperand(0);
4476 Info.memVT = MVT::getVT(I.getType());
4478 Info.readMem = true;
4481 case COMPRESS_TO_MEM: {
4482 Info.ptrVal = I.getArgOperand(0);
4483 Info.memVT = MVT::getVT(I.getArgOperand(1)->getType());
4485 Info.writeMem = true;
4488 case TRUNCATE_TO_MEM_VI8:
4489 case TRUNCATE_TO_MEM_VI16:
4490 case TRUNCATE_TO_MEM_VI32: {
4491 Info.ptrVal = I.getArgOperand(0);
4492 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4493 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4494 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4496 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4497 ScalarVT = MVT::i16;
4498 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4499 ScalarVT = MVT::i32;
4501 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4503 Info.writeMem = true;
4513 /// Returns true if the target can instruction select the
4514 /// specified FP immediate natively. If false, the legalizer will
4515 /// materialize the FP immediate as a load from a constant pool.
4516 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
4517 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4518 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4524 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4525 ISD::LoadExtType ExtTy,
4527 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4528 // relocation target a movq or addq instruction: don't let the load shrink.
4529 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4530 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4531 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4532 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4536 /// \brief Returns true if it is beneficial to convert a load of a constant
4537 /// to just the constant itself.
4538 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4540 assert(Ty->isIntegerTy());
4542 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4543 if (BitSize == 0 || BitSize > 64)
4548 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
4549 unsigned Index) const {
4550 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4553 return (Index == 0 || Index == ResVT.getVectorNumElements());
4556 bool X86TargetLowering::isCheapToSpeculateCttz() const {
4557 // Speculate cttz only if we can directly use TZCNT.
4558 return Subtarget.hasBMI();
4561 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
4562 // Speculate ctlz only if we can directly use LZCNT.
4563 return Subtarget.hasLZCNT();
4566 bool X86TargetLowering::isCtlzFast() const {
4567 return Subtarget.hasFastLZCNT();
4570 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
4571 const Instruction &AndI) const {
4575 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
4576 if (!Subtarget.hasBMI())
4579 // There are only 32-bit and 64-bit forms for 'andn'.
4580 EVT VT = Y.getValueType();
4581 if (VT != MVT::i32 && VT != MVT::i64)
4587 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
4588 MVT VT = MVT::getIntegerVT(NumBits);
4589 if (isTypeLegal(VT))
4592 // PMOVMSKB can handle this.
4593 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
4596 // VPMOVMSKB can handle this.
4597 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
4600 // TODO: Allow 64-bit type for 32-bit target.
4601 // TODO: 512-bit types should be allowed, but make sure that those
4602 // cases are handled in combineVectorSizedSetCCEquality().
4604 return MVT::INVALID_SIMPLE_VALUE_TYPE;
4607 /// Val is the undef sentinel value or equal to the specified value.
4608 static bool isUndefOrEqual(int Val, int CmpVal) {
4609 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
4612 /// Val is either the undef or zero sentinel value.
4613 static bool isUndefOrZero(int Val) {
4614 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
4617 /// Return true if every element in Mask, beginning
4618 /// from position Pos and ending in Pos+Size is the undef sentinel value.
4619 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
4620 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
4621 if (Mask[i] != SM_SentinelUndef)
4626 /// Return true if Val is undef or if its value falls within the
4627 /// specified range (L, H].
4628 static bool isUndefOrInRange(int Val, int Low, int Hi) {
4629 return (Val == SM_SentinelUndef) || (Val >= Low && Val < Hi);
4632 /// Return true if every element in Mask is undef or if its value
4633 /// falls within the specified range (L, H].
4634 static bool isUndefOrInRange(ArrayRef<int> Mask,
4637 if (!isUndefOrInRange(M, Low, Hi))
4642 /// Return true if Val is undef, zero or if its value falls within the
4643 /// specified range (L, H].
4644 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
4645 return isUndefOrZero(Val) || (Val >= Low && Val < Hi);
4648 /// Return true if every element in Mask is undef, zero or if its value
4649 /// falls within the specified range (L, H].
4650 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
4652 if (!isUndefOrZeroOrInRange(M, Low, Hi))
4657 /// Return true if every element in Mask, beginning
4658 /// from position Pos and ending in Pos+Size, falls within the specified
4659 /// sequential range (Low, Low+Size]. or is undef.
4660 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
4661 unsigned Pos, unsigned Size, int Low) {
4662 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
4663 if (!isUndefOrEqual(Mask[i], Low))
4668 /// Return true if every element in Mask, beginning
4669 /// from position Pos and ending in Pos+Size, falls within the specified
4670 /// sequential range (Low, Low+Size], or is undef or is zero.
4671 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
4672 unsigned Size, int Low) {
4673 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
4674 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
4679 /// Return true if every element in Mask, beginning
4680 /// from position Pos and ending in Pos+Size is undef or is zero.
4681 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
4683 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
4684 if (!isUndefOrZero(Mask[i]))
4689 /// \brief Helper function to test whether a shuffle mask could be
4690 /// simplified by widening the elements being shuffled.
4692 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
4693 /// leaves it in an unspecified state.
4695 /// NOTE: This must handle normal vector shuffle masks and *target* vector
4696 /// shuffle masks. The latter have the special property of a '-2' representing
4697 /// a zero-ed lane of a vector.
4698 static bool canWidenShuffleElements(ArrayRef<int> Mask,
4699 SmallVectorImpl<int> &WidenedMask) {
4700 WidenedMask.assign(Mask.size() / 2, 0);
4701 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
4703 int M1 = Mask[i + 1];
4705 // If both elements are undef, its trivial.
4706 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
4707 WidenedMask[i / 2] = SM_SentinelUndef;
4711 // Check for an undef mask and a mask value properly aligned to fit with
4712 // a pair of values. If we find such a case, use the non-undef mask's value.
4713 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
4714 WidenedMask[i / 2] = M1 / 2;
4717 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
4718 WidenedMask[i / 2] = M0 / 2;
4722 // When zeroing, we need to spread the zeroing across both lanes to widen.
4723 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
4724 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
4725 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
4726 WidenedMask[i / 2] = SM_SentinelZero;
4732 // Finally check if the two mask values are adjacent and aligned with
4734 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
4735 WidenedMask[i / 2] = M0 / 2;
4739 // Otherwise we can't safely widen the elements used in this shuffle.
4742 assert(WidenedMask.size() == Mask.size() / 2 &&
4743 "Incorrect size of mask after widening the elements!");
4748 /// Helper function to scale a shuffle or target shuffle mask, replacing each
4749 /// mask index with the scaled sequential indices for an equivalent narrowed
4750 /// mask. This is the reverse process to canWidenShuffleElements, but can always
4752 static void scaleShuffleMask(int Scale, ArrayRef<int> Mask,
4753 SmallVectorImpl<int> &ScaledMask) {
4754 assert(0 < Scale && "Unexpected scaling factor");
4755 int NumElts = Mask.size();
4756 ScaledMask.assign(static_cast<size_t>(NumElts * Scale), -1);
4758 for (int i = 0; i != NumElts; ++i) {
4761 // Repeat sentinel values in every mask element.
4763 for (int s = 0; s != Scale; ++s)
4764 ScaledMask[(Scale * i) + s] = M;
4768 // Scale mask element and increment across each mask element.
4769 for (int s = 0; s != Scale; ++s)
4770 ScaledMask[(Scale * i) + s] = (Scale * M) + s;
4774 /// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector
4775 /// extract that is suitable for instruction that extract 128 or 256 bit vectors
4776 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4777 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4778 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4781 // The index should be aligned on a vecWidth-bit boundary.
4782 uint64_t Index = N->getConstantOperandVal(1);
4783 MVT VT = N->getSimpleValueType(0);
4784 unsigned ElSize = VT.getScalarSizeInBits();
4785 return (Index * ElSize) % vecWidth == 0;
4788 /// Return true if the specified INSERT_SUBVECTOR
4789 /// operand specifies a subvector insert that is suitable for input to
4790 /// insertion of 128 or 256-bit subvectors
4791 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4792 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4793 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4796 // The index should be aligned on a vecWidth-bit boundary.
4797 uint64_t Index = N->getConstantOperandVal(2);
4798 MVT VT = N->getSimpleValueType(0);
4799 unsigned ElSize = VT.getScalarSizeInBits();
4800 return (Index * ElSize) % vecWidth == 0;
4803 bool X86::isVINSERT128Index(SDNode *N) {
4804 return isVINSERTIndex(N, 128);
4807 bool X86::isVINSERT256Index(SDNode *N) {
4808 return isVINSERTIndex(N, 256);
4811 bool X86::isVEXTRACT128Index(SDNode *N) {
4812 return isVEXTRACTIndex(N, 128);
4815 bool X86::isVEXTRACT256Index(SDNode *N) {
4816 return isVEXTRACTIndex(N, 256);
4819 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4820 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4821 assert(isa<ConstantSDNode>(N->getOperand(1).getNode()) &&
4822 "Illegal extract subvector for VEXTRACT");
4824 uint64_t Index = N->getConstantOperandVal(1);
4825 MVT VecVT = N->getOperand(0).getSimpleValueType();
4826 unsigned NumElemsPerChunk = vecWidth / VecVT.getScalarSizeInBits();
4827 return Index / NumElemsPerChunk;
4830 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4831 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4832 assert(isa<ConstantSDNode>(N->getOperand(2).getNode()) &&
4833 "Illegal insert subvector for VINSERT");
4835 uint64_t Index = N->getConstantOperandVal(2);
4836 MVT VecVT = N->getSimpleValueType(0);
4837 unsigned NumElemsPerChunk = vecWidth / VecVT.getScalarSizeInBits();
4838 return Index / NumElemsPerChunk;
4841 /// Return the appropriate immediate to extract the specified
4842 /// EXTRACT_SUBVECTOR index with VEXTRACTF128 and VINSERTI128 instructions.
4843 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4844 return getExtractVEXTRACTImmediate(N, 128);
4847 /// Return the appropriate immediate to extract the specified
4848 /// EXTRACT_SUBVECTOR index with VEXTRACTF64x4 and VINSERTI64x4 instructions.
4849 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4850 return getExtractVEXTRACTImmediate(N, 256);
4853 /// Return the appropriate immediate to insert at the specified
4854 /// INSERT_SUBVECTOR index with VINSERTF128 and VINSERTI128 instructions.
4855 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
4856 return getInsertVINSERTImmediate(N, 128);
4859 /// Return the appropriate immediate to insert at the specified
4860 /// INSERT_SUBVECTOR index with VINSERTF46x4 and VINSERTI64x4 instructions.
4861 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
4862 return getInsertVINSERTImmediate(N, 256);
4865 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
4866 bool X86::isZeroNode(SDValue Elt) {
4867 return isNullConstant(Elt) || isNullFPConstant(Elt);
4870 // Build a vector of constants.
4871 // Use an UNDEF node if MaskElt == -1.
4872 // Split 64-bit constants in the 32-bit mode.
4873 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
4874 const SDLoc &dl, bool IsMask = false) {
4876 SmallVector<SDValue, 32> Ops;
4879 MVT ConstVecVT = VT;
4880 unsigned NumElts = VT.getVectorNumElements();
4881 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
4882 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
4883 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
4887 MVT EltVT = ConstVecVT.getVectorElementType();
4888 for (unsigned i = 0; i < NumElts; ++i) {
4889 bool IsUndef = Values[i] < 0 && IsMask;
4890 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
4891 DAG.getConstant(Values[i], dl, EltVT);
4892 Ops.push_back(OpNode);
4894 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
4895 DAG.getConstant(0, dl, EltVT));
4897 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
4899 ConstsNode = DAG.getBitcast(VT, ConstsNode);
4903 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
4904 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4905 assert(Bits.size() == Undefs.getBitWidth() &&
4906 "Unequal constant and undef arrays");
4907 SmallVector<SDValue, 32> Ops;
4910 MVT ConstVecVT = VT;
4911 unsigned NumElts = VT.getVectorNumElements();
4912 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
4913 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
4914 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
4918 MVT EltVT = ConstVecVT.getVectorElementType();
4919 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
4921 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
4924 const APInt &V = Bits[i];
4925 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
4927 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
4928 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
4929 } else if (EltVT == MVT::f32) {
4930 APFloat FV(APFloat::IEEEsingle(), V);
4931 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
4932 } else if (EltVT == MVT::f64) {
4933 APFloat FV(APFloat::IEEEdouble(), V);
4934 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
4936 Ops.push_back(DAG.getConstant(V, dl, EltVT));
4940 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
4941 return DAG.getBitcast(VT, ConstsNode);
4944 /// Returns a vector of specified type with all zero elements.
4945 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
4946 SelectionDAG &DAG, const SDLoc &dl) {
4947 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
4948 VT.getVectorElementType() == MVT::i1) &&
4949 "Unexpected vector type");
4951 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
4952 // type. This ensures they get CSE'd. But if the integer type is not
4953 // available, use a floating-point +0.0 instead.
4955 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
4956 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
4957 } else if (VT.getVectorElementType() == MVT::i1) {
4958 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
4959 "Unexpected vector type");
4960 assert((Subtarget.hasVLX() || VT.getVectorNumElements() >= 8) &&
4961 "Unexpected vector type");
4962 Vec = DAG.getConstant(0, dl, VT);
4964 unsigned Num32BitElts = VT.getSizeInBits() / 32;
4965 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
4967 return DAG.getBitcast(VT, Vec);
4970 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
4971 const SDLoc &dl, unsigned vectorWidth) {
4972 EVT VT = Vec.getValueType();
4973 EVT ElVT = VT.getVectorElementType();
4974 unsigned Factor = VT.getSizeInBits()/vectorWidth;
4975 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
4976 VT.getVectorNumElements()/Factor);
4978 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
4979 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
4980 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
4982 // This is the index of the first element of the vectorWidth-bit chunk
4983 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
4984 IdxVal &= ~(ElemsPerChunk - 1);
4986 // If the input is a buildvector just emit a smaller one.
4987 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
4988 return DAG.getBuildVector(
4989 ResultVT, dl, makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
4991 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
4992 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
4995 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
4996 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
4997 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
4998 /// instructions or a simple subregister reference. Idx is an index in the
4999 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5000 /// lowering EXTRACT_VECTOR_ELT operations easier.
5001 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5002 SelectionDAG &DAG, const SDLoc &dl) {
5003 assert((Vec.getValueType().is256BitVector() ||
5004 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5005 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5008 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5009 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5010 SelectionDAG &DAG, const SDLoc &dl) {
5011 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5012 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5015 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5016 SelectionDAG &DAG, const SDLoc &dl,
5017 unsigned vectorWidth) {
5018 assert((vectorWidth == 128 || vectorWidth == 256) &&
5019 "Unsupported vector width");
5020 // Inserting UNDEF is Result
5023 EVT VT = Vec.getValueType();
5024 EVT ElVT = VT.getVectorElementType();
5025 EVT ResultVT = Result.getValueType();
5027 // Insert the relevant vectorWidth bits.
5028 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5029 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5031 // This is the index of the first element of the vectorWidth-bit chunk
5032 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5033 IdxVal &= ~(ElemsPerChunk - 1);
5035 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5036 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5039 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5040 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5041 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5042 /// simple superregister reference. Idx is an index in the 128 bits
5043 /// we want. It need not be aligned to a 128-bit boundary. That makes
5044 /// lowering INSERT_VECTOR_ELT operations easier.
5045 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5046 SelectionDAG &DAG, const SDLoc &dl) {
5047 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5048 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5051 static SDValue insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5052 SelectionDAG &DAG, const SDLoc &dl) {
5053 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
5054 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
5057 /// Insert i1-subvector to i1-vector.
5058 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5059 const X86Subtarget &Subtarget) {
5062 SDValue Vec = Op.getOperand(0);
5063 SDValue SubVec = Op.getOperand(1);
5064 SDValue Idx = Op.getOperand(2);
5066 if (!isa<ConstantSDNode>(Idx))
5069 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5070 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5073 MVT OpVT = Op.getSimpleValueType();
5074 MVT SubVecVT = SubVec.getSimpleValueType();
5075 unsigned NumElems = OpVT.getVectorNumElements();
5076 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5078 assert(IdxVal + SubVecNumElems <= NumElems &&
5079 IdxVal % SubVecVT.getSizeInBits() == 0 &&
5080 "Unexpected index value in INSERT_SUBVECTOR");
5082 // There are 3 possible cases:
5083 // 1. Subvector should be inserted in the lower part (IdxVal == 0)
5084 // 2. Subvector should be inserted in the upper part
5085 // (IdxVal + SubVecNumElems == NumElems)
5086 // 3. Subvector should be inserted in the middle (for example v2i1
5087 // to v16i1, index 2)
5089 // extend to natively supported kshift
5090 MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5091 MVT WideOpVT = OpVT;
5092 if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits())
5095 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5096 SDValue Undef = DAG.getUNDEF(WideOpVT);
5097 SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5098 Undef, SubVec, ZeroIdx);
5100 // Extract sub-vector if require.
5101 auto ExtractSubVec = [&](SDValue V) {
5102 return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
5106 if (Vec.isUndef()) {
5108 SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8);
5109 WideSubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, WideSubVec,
5112 return ExtractSubVec(WideSubVec);
5115 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5116 NumElems = WideOpVT.getVectorNumElements();
5117 unsigned ShiftLeft = NumElems - SubVecNumElems;
5118 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5119 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, WideSubVec,
5120 DAG.getConstant(ShiftLeft, dl, MVT::i8));
5121 Vec = ShiftRight ? DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5122 DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec;
5123 return ExtractSubVec(Vec);
5127 // Zero lower bits of the Vec
5128 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
5129 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5130 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5131 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5132 // Merge them together, SubVec should be zero extended.
5133 WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5134 getZeroVector(WideOpVT, Subtarget, DAG, dl),
5136 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
5137 return ExtractSubVec(Vec);
5140 // Simple case when we put subvector in the upper part
5141 if (IdxVal + SubVecNumElems == NumElems) {
5142 // Zero upper bits of the Vec
5143 WideSubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, WideSubVec,
5144 DAG.getConstant(IdxVal, dl, MVT::i8));
5145 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
5146 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5147 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5148 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5149 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
5150 return ExtractSubVec(Vec);
5152 // Subvector should be inserted in the middle - use shuffle
5153 WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef,
5155 SmallVector<int, 64> Mask;
5156 for (unsigned i = 0; i < NumElems; ++i)
5157 Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
5159 return DAG.getVectorShuffle(OpVT, dl, WideSubVec, Vec, Mask);
5162 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
5163 /// instructions. This is used because creating CONCAT_VECTOR nodes of
5164 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
5165 /// large BUILD_VECTORS.
5166 static SDValue concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
5167 unsigned NumElems, SelectionDAG &DAG,
5169 SDValue V = insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
5170 return insert128BitVector(V, V2, NumElems / 2, DAG, dl);
5173 static SDValue concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
5174 unsigned NumElems, SelectionDAG &DAG,
5176 SDValue V = insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
5177 return insert256BitVector(V, V2, NumElems / 2, DAG, dl);
5180 /// Returns a vector of specified type with all bits set.
5181 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5182 /// Then bitcast to their original type, ensuring they get CSE'd.
5183 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5184 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5185 "Expected a 128/256/512-bit vector type");
5187 APInt Ones = APInt::getAllOnesValue(32);
5188 unsigned NumElts = VT.getSizeInBits() / 32;
5189 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5190 return DAG.getBitcast(VT, Vec);
5193 static SDValue getExtendInVec(unsigned Opc, const SDLoc &DL, EVT VT, SDValue In,
5194 SelectionDAG &DAG) {
5195 EVT InVT = In.getValueType();
5196 assert((X86ISD::VSEXT == Opc || X86ISD::VZEXT == Opc) && "Unexpected opcode");
5198 if (VT.is128BitVector() && InVT.is128BitVector())
5199 return X86ISD::VSEXT == Opc ? DAG.getSignExtendVectorInReg(In, DL, VT)
5200 : DAG.getZeroExtendVectorInReg(In, DL, VT);
5202 // For 256-bit vectors, we only need the lower (128-bit) input half.
5203 // For 512-bit vectors, we only need the lower input half or quarter.
5204 if (VT.getSizeInBits() > 128 && InVT.getSizeInBits() > 128) {
5205 int Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
5206 In = extractSubVector(In, 0, DAG, DL,
5207 std::max(128, (int)VT.getSizeInBits() / Scale));
5210 return DAG.getNode(Opc, DL, VT, In);
5213 /// Generate unpacklo/unpackhi shuffle mask.
5214 static void createUnpackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask, bool Lo,
5216 assert(Mask.empty() && "Expected an empty shuffle mask vector");
5217 int NumElts = VT.getVectorNumElements();
5218 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
5220 for (int i = 0; i < NumElts; ++i) {
5221 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
5222 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
5223 Pos += (Unary ? 0 : NumElts * (i % 2));
5224 Pos += (Lo ? 0 : NumEltsInLane / 2);
5225 Mask.push_back(Pos);
5229 /// Returns a vector_shuffle node for an unpackl operation.
5230 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5231 SDValue V1, SDValue V2) {
5232 SmallVector<int, 8> Mask;
5233 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
5234 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5237 /// Returns a vector_shuffle node for an unpackh operation.
5238 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5239 SDValue V1, SDValue V2) {
5240 SmallVector<int, 8> Mask;
5241 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
5242 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5245 /// Return a vector_shuffle of the specified vector of zero or undef vector.
5246 /// This produces a shuffle where the low element of V2 is swizzled into the
5247 /// zero/undef vector, landing at element Idx.
5248 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5249 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
5251 const X86Subtarget &Subtarget,
5252 SelectionDAG &DAG) {
5253 MVT VT = V2.getSimpleValueType();
5255 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5256 int NumElems = VT.getVectorNumElements();
5257 SmallVector<int, 16> MaskVec(NumElems);
5258 for (int i = 0; i != NumElems; ++i)
5259 // If this is the insertion idx, put the low elt of V2 here.
5260 MaskVec[i] = (i == Idx) ? NumElems : i;
5261 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
5264 static SDValue peekThroughBitcasts(SDValue V) {
5265 while (V.getNode() && V.getOpcode() == ISD::BITCAST)
5266 V = V.getOperand(0);
5270 static SDValue peekThroughOneUseBitcasts(SDValue V) {
5271 while (V.getNode() && V.getOpcode() == ISD::BITCAST &&
5272 V.getOperand(0).hasOneUse())
5273 V = V.getOperand(0);
5277 static const Constant *getTargetConstantFromNode(SDValue Op) {
5278 Op = peekThroughBitcasts(Op);
5280 auto *Load = dyn_cast<LoadSDNode>(Op);
5284 SDValue Ptr = Load->getBasePtr();
5285 if (Ptr->getOpcode() == X86ISD::Wrapper ||
5286 Ptr->getOpcode() == X86ISD::WrapperRIP)
5287 Ptr = Ptr->getOperand(0);
5289 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
5290 if (!CNode || CNode->isMachineConstantPoolEntry())
5293 return dyn_cast<Constant>(CNode->getConstVal());
5296 // Extract raw constant bits from constant pools.
5297 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
5299 SmallVectorImpl<APInt> &EltBits,
5300 bool AllowWholeUndefs = true,
5301 bool AllowPartialUndefs = true) {
5302 assert(EltBits.empty() && "Expected an empty EltBits vector");
5304 Op = peekThroughBitcasts(Op);
5306 EVT VT = Op.getValueType();
5307 unsigned SizeInBits = VT.getSizeInBits();
5308 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
5309 unsigned NumElts = SizeInBits / EltSizeInBits;
5311 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
5312 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
5314 // Extract all the undef/constant element data and pack into single bitsets.
5315 APInt UndefBits(SizeInBits, 0);
5316 APInt MaskBits(SizeInBits, 0);
5318 // Split the undef/constant single bitset data into the target elements.
5319 auto SplitBitData = [&]() {
5320 // Don't split if we don't allow undef bits.
5321 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
5322 if (UndefBits.getBoolValue() && !AllowUndefs)
5325 UndefElts = APInt(NumElts, 0);
5326 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
5328 for (unsigned i = 0; i != NumElts; ++i) {
5329 unsigned BitOffset = i * EltSizeInBits;
5330 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
5332 // Only treat an element as UNDEF if all bits are UNDEF.
5333 if (UndefEltBits.isAllOnesValue()) {
5334 if (!AllowWholeUndefs)
5336 UndefElts.setBit(i);
5340 // If only some bits are UNDEF then treat them as zero (or bail if not
5342 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
5345 APInt Bits = MaskBits.extractBits(EltSizeInBits, BitOffset);
5346 EltBits[i] = Bits.getZExtValue();
5351 // Collect constant bits and insert into mask/undef bit masks.
5352 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
5353 unsigned BitOffset) {
5356 if (isa<UndefValue>(Cst)) {
5357 unsigned CstSizeInBits = Cst->getType()->getPrimitiveSizeInBits();
5358 Undefs.setBits(BitOffset, BitOffset + CstSizeInBits);
5361 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
5362 Mask.insertBits(CInt->getValue(), BitOffset);
5365 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
5366 Mask.insertBits(CFP->getValueAPF().bitcastToAPInt(), BitOffset);
5372 // Extract constant bits from build vector.
5373 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
5374 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
5375 const SDValue &Src = Op.getOperand(i);
5376 unsigned BitOffset = i * SrcEltSizeInBits;
5377 if (Src.isUndef()) {
5378 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
5381 auto *Cst = cast<ConstantSDNode>(Src);
5382 APInt Bits = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
5383 MaskBits.insertBits(Bits, BitOffset);
5385 return SplitBitData();
5388 // Extract constant bits from constant pool vector.
5389 if (auto *Cst = getTargetConstantFromNode(Op)) {
5390 Type *CstTy = Cst->getType();
5391 if (!CstTy->isVectorTy() || (SizeInBits != CstTy->getPrimitiveSizeInBits()))
5394 unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
5395 for (unsigned i = 0, e = CstTy->getVectorNumElements(); i != e; ++i)
5396 if (!CollectConstantBits(Cst->getAggregateElement(i), MaskBits, UndefBits,
5397 i * CstEltSizeInBits))
5400 return SplitBitData();
5403 // Extract constant bits from a broadcasted constant pool scalar.
5404 if (Op.getOpcode() == X86ISD::VBROADCAST &&
5405 EltSizeInBits <= SrcEltSizeInBits) {
5406 if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
5407 APInt Bits(SizeInBits, 0);
5408 APInt Undefs(SizeInBits, 0);
5409 if (CollectConstantBits(Broadcast, Bits, Undefs, 0)) {
5410 for (unsigned i = 0; i != NumSrcElts; ++i) {
5411 MaskBits |= Bits.shl(i * SrcEltSizeInBits);
5412 UndefBits |= Undefs.shl(i * SrcEltSizeInBits);
5414 return SplitBitData();
5419 // Extract a rematerialized scalar constant insertion.
5420 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
5421 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
5422 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
5423 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
5424 MaskBits = CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
5425 MaskBits = MaskBits.zext(SizeInBits);
5426 return SplitBitData();
5432 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
5433 unsigned MaskEltSizeInBits,
5434 SmallVectorImpl<uint64_t> &RawMask) {
5436 SmallVector<APInt, 64> EltBits;
5438 // Extract the raw target constant bits.
5439 // FIXME: We currently don't support UNDEF bits or mask entries.
5440 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
5441 EltBits, /* AllowWholeUndefs */ false,
5442 /* AllowPartialUndefs */ false))
5445 // Insert the extracted elements into the mask.
5446 for (APInt Elt : EltBits)
5447 RawMask.push_back(Elt.getZExtValue());
5452 /// Calculates the shuffle mask corresponding to the target-specific opcode.
5453 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
5454 /// operands in \p Ops, and returns true.
5455 /// Sets \p IsUnary to true if only one source is used. Note that this will set
5456 /// IsUnary for shuffles which use a single input multiple times, and in those
5457 /// cases it will adjust the mask to only have indices within that single input.
5458 /// It is an error to call this with non-empty Mask/Ops vectors.
5459 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5460 SmallVectorImpl<SDValue> &Ops,
5461 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5462 unsigned NumElems = VT.getVectorNumElements();
5465 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
5466 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
5469 bool IsFakeUnary = false;
5470 switch(N->getOpcode()) {
5471 case X86ISD::BLENDI:
5472 ImmN = N->getOperand(N->getNumOperands()-1);
5473 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5474 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5477 ImmN = N->getOperand(N->getNumOperands()-1);
5478 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5479 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5481 case X86ISD::INSERTPS:
5482 ImmN = N->getOperand(N->getNumOperands()-1);
5483 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5484 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5486 case X86ISD::UNPCKH:
5487 DecodeUNPCKHMask(VT, Mask);
5488 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5490 case X86ISD::UNPCKL:
5491 DecodeUNPCKLMask(VT, Mask);
5492 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5494 case X86ISD::MOVHLPS:
5495 DecodeMOVHLPSMask(NumElems, Mask);
5496 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5498 case X86ISD::MOVLHPS:
5499 DecodeMOVLHPSMask(NumElems, Mask);
5500 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5502 case X86ISD::PALIGNR:
5503 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5504 ImmN = N->getOperand(N->getNumOperands()-1);
5505 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5506 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5507 Ops.push_back(N->getOperand(1));
5508 Ops.push_back(N->getOperand(0));
5510 case X86ISD::VSHLDQ:
5511 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5512 ImmN = N->getOperand(N->getNumOperands() - 1);
5513 DecodePSLLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5516 case X86ISD::VSRLDQ:
5517 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5518 ImmN = N->getOperand(N->getNumOperands() - 1);
5519 DecodePSRLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5522 case X86ISD::PSHUFD:
5523 case X86ISD::VPERMILPI:
5524 ImmN = N->getOperand(N->getNumOperands()-1);
5525 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5528 case X86ISD::PSHUFHW:
5529 ImmN = N->getOperand(N->getNumOperands()-1);
5530 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5533 case X86ISD::PSHUFLW:
5534 ImmN = N->getOperand(N->getNumOperands()-1);
5535 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5538 case X86ISD::VZEXT_MOVL:
5539 DecodeZeroMoveLowMask(VT, Mask);
5542 case X86ISD::VBROADCAST: {
5543 SDValue N0 = N->getOperand(0);
5544 // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
5545 // add the pre-extracted value to the Ops vector.
5546 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5547 N0.getOperand(0).getValueType() == VT &&
5548 N0.getConstantOperandVal(1) == 0)
5549 Ops.push_back(N0.getOperand(0));
5551 // We only decode broadcasts of same-sized vectors, unless the broadcast
5552 // came from an extract from the original width. If we found one, we
5553 // pushed it the Ops vector above.
5554 if (N0.getValueType() == VT || !Ops.empty()) {
5555 DecodeVectorBroadcast(VT, Mask);
5561 case X86ISD::VPERMILPV: {
5563 SDValue MaskNode = N->getOperand(1);
5564 unsigned MaskEltSize = VT.getScalarSizeInBits();
5565 SmallVector<uint64_t, 32> RawMask;
5566 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
5567 DecodeVPERMILPMask(VT, RawMask, Mask);
5570 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5571 DecodeVPERMILPMask(C, MaskEltSize, Mask);
5576 case X86ISD::PSHUFB: {
5578 SDValue MaskNode = N->getOperand(1);
5579 SmallVector<uint64_t, 32> RawMask;
5580 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
5581 DecodePSHUFBMask(RawMask, Mask);
5584 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5585 DecodePSHUFBMask(C, Mask);
5590 case X86ISD::VPERMI:
5591 ImmN = N->getOperand(N->getNumOperands()-1);
5592 DecodeVPERMMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5597 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5599 case X86ISD::VPERM2X128:
5600 ImmN = N->getOperand(N->getNumOperands()-1);
5601 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5602 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5604 case X86ISD::MOVSLDUP:
5605 DecodeMOVSLDUPMask(VT, Mask);
5608 case X86ISD::MOVSHDUP:
5609 DecodeMOVSHDUPMask(VT, Mask);
5612 case X86ISD::MOVDDUP:
5613 DecodeMOVDDUPMask(VT, Mask);
5616 case X86ISD::MOVLHPD:
5617 case X86ISD::MOVLPD:
5618 case X86ISD::MOVLPS:
5619 // Not yet implemented
5621 case X86ISD::VPERMIL2: {
5622 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5623 unsigned MaskEltSize = VT.getScalarSizeInBits();
5624 SDValue MaskNode = N->getOperand(2);
5625 SDValue CtrlNode = N->getOperand(3);
5626 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
5627 unsigned CtrlImm = CtrlOp->getZExtValue();
5628 SmallVector<uint64_t, 32> RawMask;
5629 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
5630 DecodeVPERMIL2PMask(VT, CtrlImm, RawMask, Mask);
5633 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5634 DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, Mask);
5640 case X86ISD::VPPERM: {
5641 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5642 SDValue MaskNode = N->getOperand(2);
5643 SmallVector<uint64_t, 32> RawMask;
5644 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
5645 DecodeVPPERMMask(RawMask, Mask);
5648 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5649 DecodeVPPERMMask(C, Mask);
5654 case X86ISD::VPERMV: {
5656 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
5657 Ops.push_back(N->getOperand(1));
5658 SDValue MaskNode = N->getOperand(0);
5659 SmallVector<uint64_t, 32> RawMask;
5660 unsigned MaskEltSize = VT.getScalarSizeInBits();
5661 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
5662 DecodeVPERMVMask(RawMask, Mask);
5665 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5666 DecodeVPERMVMask(C, MaskEltSize, Mask);
5671 case X86ISD::VPERMV3: {
5672 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
5673 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
5674 Ops.push_back(N->getOperand(0));
5675 Ops.push_back(N->getOperand(2));
5676 SDValue MaskNode = N->getOperand(1);
5677 unsigned MaskEltSize = VT.getScalarSizeInBits();
5678 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5679 DecodeVPERMV3Mask(C, MaskEltSize, Mask);
5684 case X86ISD::VPERMIV3: {
5685 IsUnary = IsFakeUnary = N->getOperand(1) == N->getOperand(2);
5686 // Unlike most shuffle nodes, VPERMIV3's mask operand is the first one.
5687 Ops.push_back(N->getOperand(1));
5688 Ops.push_back(N->getOperand(2));
5689 SDValue MaskNode = N->getOperand(0);
5690 unsigned MaskEltSize = VT.getScalarSizeInBits();
5691 if (auto *C = getTargetConstantFromNode(MaskNode)) {
5692 DecodeVPERMV3Mask(C, MaskEltSize, Mask);
5697 default: llvm_unreachable("unknown target shuffle node");
5700 // Empty mask indicates the decode failed.
5704 // Check if we're getting a shuffle mask with zero'd elements.
5705 if (!AllowSentinelZero)
5706 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
5709 // If we have a fake unary shuffle, the shuffle mask is spread across two
5710 // inputs that are actually the same node. Re-map the mask to always point
5711 // into the first input.
5714 if (M >= (int)Mask.size())
5717 // If we didn't already add operands in the opcode-specific code, default to
5718 // adding 1 or 2 operands starting at 0.
5720 Ops.push_back(N->getOperand(0));
5721 if (!IsUnary || IsFakeUnary)
5722 Ops.push_back(N->getOperand(1));
5728 /// Check a target shuffle mask's inputs to see if we can set any values to
5729 /// SM_SentinelZero - this is for elements that are known to be zero
5730 /// (not just zeroable) from their inputs.
5731 /// Returns true if the target shuffle mask was decoded.
5732 static bool setTargetShuffleZeroElements(SDValue N,
5733 SmallVectorImpl<int> &Mask,
5734 SmallVectorImpl<SDValue> &Ops) {
5736 if (!isTargetShuffle(N.getOpcode()))
5739 MVT VT = N.getSimpleValueType();
5740 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
5743 SDValue V1 = Ops[0];
5744 SDValue V2 = IsUnary ? V1 : Ops[1];
5746 V1 = peekThroughBitcasts(V1);
5747 V2 = peekThroughBitcasts(V2);
5749 assert((VT.getSizeInBits() % Mask.size()) == 0 &&
5750 "Illegal split of shuffle value type");
5751 unsigned EltSizeInBits = VT.getSizeInBits() / Mask.size();
5753 // Extract known constant input data.
5754 APInt UndefSrcElts[2];
5755 SmallVector<APInt, 32> SrcEltBits[2];
5756 bool IsSrcConstant[2] = {
5757 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
5758 SrcEltBits[0], true, false),
5759 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
5760 SrcEltBits[1], true, false)};
5762 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
5765 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
5769 // Determine shuffle input and normalize the mask.
5770 unsigned SrcIdx = M / Size;
5771 SDValue V = M < Size ? V1 : V2;
5774 // We are referencing an UNDEF input.
5776 Mask[i] = SM_SentinelUndef;
5780 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
5781 // TODO: We currently only set UNDEF for integer types - floats use the same
5782 // registers as vectors and many of the scalar folded loads rely on the
5783 // SCALAR_TO_VECTOR pattern.
5784 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5785 (Size % V.getValueType().getVectorNumElements()) == 0) {
5786 int Scale = Size / V.getValueType().getVectorNumElements();
5787 int Idx = M / Scale;
5788 if (Idx != 0 && !VT.isFloatingPoint())
5789 Mask[i] = SM_SentinelUndef;
5790 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
5791 Mask[i] = SM_SentinelZero;
5795 // Attempt to extract from the source's constant bits.
5796 if (IsSrcConstant[SrcIdx]) {
5797 if (UndefSrcElts[SrcIdx][M])
5798 Mask[i] = SM_SentinelUndef;
5799 else if (SrcEltBits[SrcIdx][M] == 0)
5800 Mask[i] = SM_SentinelZero;
5804 assert(VT.getVectorNumElements() == Mask.size() &&
5805 "Different mask size from vector size!");
5809 // Attempt to decode ops that could be represented as a shuffle mask.
5810 // The decoded shuffle mask may contain a different number of elements to the
5811 // destination value type.
5812 static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask,
5813 SmallVectorImpl<SDValue> &Ops) {
5817 MVT VT = N.getSimpleValueType();
5818 unsigned NumElts = VT.getVectorNumElements();
5819 unsigned NumSizeInBits = VT.getSizeInBits();
5820 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
5821 assert((NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 &&
5822 "Expected byte aligned value types");
5824 unsigned Opcode = N.getOpcode();
5827 case X86ISD::ANDNP: {
5828 // Attempt to decode as a per-byte mask.
5830 SmallVector<APInt, 32> EltBits;
5831 SDValue N0 = N.getOperand(0);
5832 SDValue N1 = N.getOperand(1);
5833 bool IsAndN = (X86ISD::ANDNP == Opcode);
5834 uint64_t ZeroMask = IsAndN ? 255 : 0;
5835 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
5837 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
5839 Mask.push_back(SM_SentinelUndef);
5842 uint64_t ByteBits = EltBits[i].getZExtValue();
5843 if (ByteBits != 0 && ByteBits != 255)
5845 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
5847 Ops.push_back(IsAndN ? N1 : N0);
5850 case ISD::SCALAR_TO_VECTOR: {
5851 // Match against a scalar_to_vector of an extract from a vector,
5852 // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
5853 SDValue N0 = N.getOperand(0);
5856 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5857 N0.getOperand(0).getValueType() == VT) {
5859 } else if (N0.getOpcode() == ISD::AssertZext &&
5860 N0.getOperand(0).getOpcode() == X86ISD::PEXTRW &&
5861 cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i16) {
5862 SrcExtract = N0.getOperand(0);
5863 assert(SrcExtract.getOperand(0).getValueType() == MVT::v8i16);
5864 } else if (N0.getOpcode() == ISD::AssertZext &&
5865 N0.getOperand(0).getOpcode() == X86ISD::PEXTRB &&
5866 cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i8) {
5867 SrcExtract = N0.getOperand(0);
5868 assert(SrcExtract.getOperand(0).getValueType() == MVT::v16i8);
5871 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)) ||
5872 NumElts <= SrcExtract.getConstantOperandVal(1))
5875 SDValue SrcVec = SrcExtract.getOperand(0);
5876 EVT SrcVT = SrcVec.getValueType();
5877 unsigned NumSrcElts = SrcVT.getVectorNumElements();
5878 unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
5880 Ops.push_back(SrcVec);
5881 Mask.push_back(SrcExtract.getConstantOperandVal(1));
5882 Mask.append(NumZeros, SM_SentinelZero);
5883 Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
5886 case X86ISD::PINSRB:
5887 case X86ISD::PINSRW: {
5888 SDValue InVec = N.getOperand(0);
5889 SDValue InScl = N.getOperand(1);
5890 uint64_t InIdx = N.getConstantOperandVal(2);
5891 assert(InIdx < NumElts && "Illegal insertion index");
5893 // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
5894 if (X86::isZeroNode(InScl)) {
5895 Ops.push_back(InVec);
5896 for (unsigned i = 0; i != NumElts; ++i)
5897 Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
5901 // Attempt to recognise a PINSR*(ASSERTZEXT(PEXTR*)) shuffle pattern.
5902 // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
5904 (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
5905 if (InScl.getOpcode() != ISD::AssertZext ||
5906 InScl.getOperand(0).getOpcode() != ExOp)
5909 SDValue ExVec = InScl.getOperand(0).getOperand(0);
5910 uint64_t ExIdx = InScl.getOperand(0).getConstantOperandVal(1);
5911 assert(ExIdx < NumElts && "Illegal extraction index");
5912 Ops.push_back(InVec);
5913 Ops.push_back(ExVec);
5914 for (unsigned i = 0; i != NumElts; ++i)
5915 Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
5919 case X86ISD::VSRLI: {
5920 uint64_t ShiftVal = N.getConstantOperandVal(1);
5921 // Out of range bit shifts are guaranteed to be zero.
5922 if (NumBitsPerElt <= ShiftVal) {
5923 Mask.append(NumElts, SM_SentinelZero);
5927 // We can only decode 'whole byte' bit shifts as shuffles.
5928 if ((ShiftVal % 8) != 0)
5931 uint64_t ByteShift = ShiftVal / 8;
5932 unsigned NumBytes = NumSizeInBits / 8;
5933 unsigned NumBytesPerElt = NumBitsPerElt / 8;
5934 Ops.push_back(N.getOperand(0));
5936 // Clear mask to all zeros and insert the shifted byte indices.
5937 Mask.append(NumBytes, SM_SentinelZero);
5939 if (X86ISD::VSHLI == Opcode) {
5940 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
5941 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
5942 Mask[i + j] = i + j - ByteShift;
5944 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
5945 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
5946 Mask[i + j - ByteShift] = i + j;
5950 case ISD::ZERO_EXTEND_VECTOR_INREG:
5951 case X86ISD::VZEXT: {
5952 // TODO - add support for VPMOVZX with smaller input vector types.
5953 SDValue Src = N.getOperand(0);
5954 MVT SrcVT = Src.getSimpleValueType();
5955 if (NumSizeInBits != SrcVT.getSizeInBits())
5957 DecodeZeroExtendMask(SrcVT.getScalarType(), VT, Mask);
5966 /// Removes unused shuffle source inputs and adjusts the shuffle mask accordingly.
5967 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
5968 SmallVectorImpl<int> &Mask) {
5969 int MaskWidth = Mask.size();
5970 SmallVector<SDValue, 16> UsedInputs;
5971 for (int i = 0, e = Inputs.size(); i < e; ++i) {
5972 int lo = UsedInputs.size() * MaskWidth;
5973 int hi = lo + MaskWidth;
5974 if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
5975 UsedInputs.push_back(Inputs[i]);
5982 Inputs = UsedInputs;
5985 /// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
5986 /// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
5987 /// remaining input indices in case we now have a unary shuffle and adjust the
5988 /// inputs accordingly.
5989 /// Returns true if the target shuffle mask was decoded.
5990 static bool resolveTargetShuffleInputs(SDValue Op,
5991 SmallVectorImpl<SDValue> &Inputs,
5992 SmallVectorImpl<int> &Mask) {
5993 if (!setTargetShuffleZeroElements(Op, Mask, Inputs))
5994 if (!getFauxShuffleMask(Op, Mask, Inputs))
5997 resolveTargetShuffleInputsAndMask(Inputs, Mask);
6001 /// Returns the scalar element that will make up the ith
6002 /// element of the result of the vector shuffle.
6003 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
6006 return SDValue(); // Limit search depth.
6008 SDValue V = SDValue(N, 0);
6009 EVT VT = V.getValueType();
6010 unsigned Opcode = V.getOpcode();
6012 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
6013 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
6014 int Elt = SV->getMaskElt(Index);
6017 return DAG.getUNDEF(VT.getVectorElementType());
6019 unsigned NumElems = VT.getVectorNumElements();
6020 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
6021 : SV->getOperand(1);
6022 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
6025 // Recurse into target specific vector shuffles to find scalars.
6026 if (isTargetShuffle(Opcode)) {
6027 MVT ShufVT = V.getSimpleValueType();
6028 MVT ShufSVT = ShufVT.getVectorElementType();
6029 int NumElems = (int)ShufVT.getVectorNumElements();
6030 SmallVector<int, 16> ShuffleMask;
6031 SmallVector<SDValue, 16> ShuffleOps;
6034 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
6037 int Elt = ShuffleMask[Index];
6038 if (Elt == SM_SentinelZero)
6039 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
6040 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
6041 if (Elt == SM_SentinelUndef)
6042 return DAG.getUNDEF(ShufSVT);
6044 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
6045 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
6046 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
6050 // Actual nodes that may contain scalar elements
6051 if (Opcode == ISD::BITCAST) {
6052 V = V.getOperand(0);
6053 EVT SrcVT = V.getValueType();
6054 unsigned NumElems = VT.getVectorNumElements();
6056 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
6060 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
6061 return (Index == 0) ? V.getOperand(0)
6062 : DAG.getUNDEF(VT.getVectorElementType());
6064 if (V.getOpcode() == ISD::BUILD_VECTOR)
6065 return V.getOperand(Index);
6070 /// Custom lower build_vector of v16i8.
6071 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
6072 unsigned NumNonZero, unsigned NumZero,
6074 const X86Subtarget &Subtarget) {
6075 if (NumNonZero > 8 && !Subtarget.hasSSE41())
6082 // SSE4.1 - use PINSRB to insert each byte directly.
6083 if (Subtarget.hasSSE41()) {
6084 for (unsigned i = 0; i < 16; ++i) {
6085 bool IsNonZero = (NonZeros & (1 << i)) != 0;
6087 // If the build vector contains zeros or our first insertion is not the
6088 // first index then insert into zero vector to break any register
6089 // dependency else use SCALAR_TO_VECTOR/VZEXT_MOVL.
6092 if (NumZero || 0 != i)
6093 V = getZeroVector(MVT::v16i8, Subtarget, DAG, dl);
6095 assert(0 == i && "Expected insertion into zero-index");
6096 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6097 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6098 V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
6099 V = DAG.getBitcast(MVT::v16i8, V);
6103 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v16i8, V,
6104 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
6111 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
6112 for (unsigned i = 0; i < 16; ++i) {
6113 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
6114 if (ThisIsNonZero && First) {
6116 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
6118 V = DAG.getUNDEF(MVT::v8i16);
6123 // FIXME: Investigate extending to i32 instead of just i16.
6124 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
6125 SDValue ThisElt, LastElt;
6126 bool LastIsNonZero = (NonZeros & (1 << (i - 1))) != 0;
6127 if (LastIsNonZero) {
6129 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i - 1));
6131 if (ThisIsNonZero) {
6132 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
6133 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, ThisElt,
6134 DAG.getConstant(8, dl, MVT::i8));
6136 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
6142 V = NumZero ? DAG.getZExtOrTrunc(ThisElt, dl, MVT::i32)
6143 : DAG.getAnyExtOrTrunc(ThisElt, dl, MVT::i32);
6144 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6145 V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
6146 V = DAG.getBitcast(MVT::v8i16, V);
6148 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
6149 DAG.getIntPtrConstant(i / 2, dl));
6155 return DAG.getBitcast(MVT::v16i8, V);
6158 /// Custom lower build_vector of v8i16.
6159 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
6160 unsigned NumNonZero, unsigned NumZero,
6162 const X86Subtarget &Subtarget) {
6163 if (NumNonZero > 4 && !Subtarget.hasSSE41())
6169 for (unsigned i = 0; i < 8; ++i) {
6170 bool IsNonZero = (NonZeros & (1 << i)) != 0;
6172 // If the build vector contains zeros or our first insertion is not the
6173 // first index then insert into zero vector to break any register
6174 // dependency else use SCALAR_TO_VECTOR/VZEXT_MOVL.
6177 if (NumZero || 0 != i)
6178 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
6180 assert(0 == i && "Expected insertion into zero-index");
6181 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6182 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6183 V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
6184 V = DAG.getBitcast(MVT::v8i16, V);
6188 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V,
6189 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
6196 /// Custom lower build_vector of v4i32 or v4f32.
6197 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
6198 const X86Subtarget &Subtarget) {
6199 // Find all zeroable elements.
6200 std::bitset<4> Zeroable;
6201 for (int i=0; i < 4; ++i) {
6202 SDValue Elt = Op->getOperand(i);
6203 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
6205 assert(Zeroable.size() - Zeroable.count() > 1 &&
6206 "We expect at least two non-zero elements!");
6208 // We only know how to deal with build_vector nodes where elements are either
6209 // zeroable or extract_vector_elt with constant index.
6210 SDValue FirstNonZero;
6211 unsigned FirstNonZeroIdx;
6212 for (unsigned i=0; i < 4; ++i) {
6215 SDValue Elt = Op->getOperand(i);
6216 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6217 !isa<ConstantSDNode>(Elt.getOperand(1)))
6219 // Make sure that this node is extracting from a 128-bit vector.
6220 MVT VT = Elt.getOperand(0).getSimpleValueType();
6221 if (!VT.is128BitVector())
6223 if (!FirstNonZero.getNode()) {
6225 FirstNonZeroIdx = i;
6229 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
6230 SDValue V1 = FirstNonZero.getOperand(0);
6231 MVT VT = V1.getSimpleValueType();
6233 // See if this build_vector can be lowered as a blend with zero.
6235 unsigned EltMaskIdx, EltIdx;
6237 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
6238 if (Zeroable[EltIdx]) {
6239 // The zero vector will be on the right hand side.
6240 Mask[EltIdx] = EltIdx+4;
6244 Elt = Op->getOperand(EltIdx);
6245 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
6246 EltMaskIdx = Elt.getConstantOperandVal(1);
6247 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
6249 Mask[EltIdx] = EltIdx;
6253 // Let the shuffle legalizer deal with blend operations.
6254 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
6255 if (V1.getSimpleValueType() != VT)
6256 V1 = DAG.getBitcast(VT, V1);
6257 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask);
6260 // See if we can lower this build_vector to a INSERTPS.
6261 if (!Subtarget.hasSSE41())
6264 SDValue V2 = Elt.getOperand(0);
6265 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
6268 bool CanFold = true;
6269 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
6273 SDValue Current = Op->getOperand(i);
6274 SDValue SrcVector = Current->getOperand(0);
6277 CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i);
6283 assert(V1.getNode() && "Expected at least two non-zero elements!");
6284 if (V1.getSimpleValueType() != MVT::v4f32)
6285 V1 = DAG.getBitcast(MVT::v4f32, V1);
6286 if (V2.getSimpleValueType() != MVT::v4f32)
6287 V2 = DAG.getBitcast(MVT::v4f32, V2);
6289 // Ok, we can emit an INSERTPS instruction.
6290 unsigned ZMask = Zeroable.to_ulong();
6292 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
6293 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
6295 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
6296 DAG.getIntPtrConstant(InsertPSMask, DL));
6297 return DAG.getBitcast(VT, Result);
6300 /// Return a vector logical shift node.
6301 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
6302 SelectionDAG &DAG, const TargetLowering &TLI,
6304 assert(VT.is128BitVector() && "Unknown type for VShift");
6305 MVT ShVT = MVT::v16i8;
6306 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
6307 SrcOp = DAG.getBitcast(ShVT, SrcOp);
6308 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(DAG.getDataLayout(), VT);
6309 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
6310 SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, ScalarShiftTy);
6311 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
6314 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
6315 SelectionDAG &DAG) {
6317 // Check if the scalar load can be widened into a vector load. And if
6318 // the address is "base + cst" see if the cst can be "absorbed" into
6319 // the shuffle mask.
6320 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
6321 SDValue Ptr = LD->getBasePtr();
6322 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
6324 EVT PVT = LD->getValueType(0);
6325 if (PVT != MVT::i32 && PVT != MVT::f32)
6330 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
6331 FI = FINode->getIndex();
6333 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
6334 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6335 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6336 Offset = Ptr.getConstantOperandVal(1);
6337 Ptr = Ptr.getOperand(0);
6342 // FIXME: 256-bit vector instructions don't require a strict alignment,
6343 // improve this code to support it better.
6344 unsigned RequiredAlign = VT.getSizeInBits()/8;
6345 SDValue Chain = LD->getChain();
6346 // Make sure the stack object alignment is at least 16 or 32.
6347 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6348 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
6349 if (MFI.isFixedObjectIndex(FI)) {
6350 // Can't change the alignment. FIXME: It's possible to compute
6351 // the exact stack offset and reference FI + adjust offset instead.
6352 // If someone *really* cares about this. That's the way to implement it.
6355 MFI.setObjectAlignment(FI, RequiredAlign);
6359 // (Offset % 16 or 32) must be multiple of 4. Then address is then
6360 // Ptr + (Offset & ~15).
6363 if ((Offset % RequiredAlign) & 3)
6365 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
6368 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6369 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
6372 int EltNo = (Offset - StartOffset) >> 2;
6373 unsigned NumElems = VT.getVectorNumElements();
6375 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6376 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6377 LD->getPointerInfo().getWithOffset(StartOffset));
6379 SmallVector<int, 8> Mask(NumElems, EltNo);
6381 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
6387 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6388 /// elements can be replaced by a single large load which has the same value as
6389 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6391 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
6392 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6393 const SDLoc &DL, SelectionDAG &DAG,
6394 bool isAfterLegalize) {
6395 unsigned NumElems = Elts.size();
6397 int LastLoadedElt = -1;
6398 SmallBitVector LoadMask(NumElems, false);
6399 SmallBitVector ZeroMask(NumElems, false);
6400 SmallBitVector UndefMask(NumElems, false);
6402 // For each element in the initializer, see if we've found a load, zero or an
6404 for (unsigned i = 0; i < NumElems; ++i) {
6405 SDValue Elt = peekThroughBitcasts(Elts[i]);
6410 UndefMask[i] = true;
6411 else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
6413 else if (ISD::isNON_EXTLoad(Elt.getNode())) {
6416 // Each loaded element must be the correct fractional portion of the
6417 // requested vector load.
6418 if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
6423 assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&
6424 "Incomplete element masks");
6426 // Handle Special Cases - all undef or undef/zero.
6427 if (UndefMask.count() == NumElems)
6428 return DAG.getUNDEF(VT);
6430 // FIXME: Should we return this as a BUILD_VECTOR instead?
6431 if ((ZeroMask | UndefMask).count() == NumElems)
6432 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
6433 : DAG.getConstantFP(0.0, DL, VT);
6435 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6436 int FirstLoadedElt = LoadMask.find_first();
6437 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
6438 LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
6439 EVT LDBaseVT = EltBase.getValueType();
6441 // Consecutive loads can contain UNDEFS but not ZERO elements.
6442 // Consecutive loads with UNDEFs and ZEROs elements require a
6443 // an additional shuffle stage to clear the ZERO elements.
6444 bool IsConsecutiveLoad = true;
6445 bool IsConsecutiveLoadWithZeros = true;
6446 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
6448 SDValue Elt = peekThroughBitcasts(Elts[i]);
6449 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6450 if (!DAG.areNonVolatileConsecutiveLoads(
6451 LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
6452 i - FirstLoadedElt)) {
6453 IsConsecutiveLoad = false;
6454 IsConsecutiveLoadWithZeros = false;
6457 } else if (ZeroMask[i]) {
6458 IsConsecutiveLoad = false;
6462 auto CreateLoad = [&DAG, &DL](EVT VT, LoadSDNode *LDBase) {
6463 auto MMOFlags = LDBase->getMemOperand()->getFlags();
6464 assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
6465 "Cannot merge volatile loads.");
6467 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6468 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
6470 if (LDBase->hasAnyUseOfValue(1)) {
6472 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
6473 SDValue(NewLd.getNode(), 1));
6474 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6475 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6476 SDValue(NewLd.getNode(), 1));
6482 // LOAD - all consecutive load/undefs (must start/end with a load).
6483 // If we have found an entire vector of loads and undefs, then return a large
6484 // load of the entire vector width starting at the base pointer.
6485 // If the vector contains zeros, then attempt to shuffle those elements.
6486 if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
6487 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
6488 assert(LDBase && "Did not find base load for merging consecutive loads");
6489 EVT EltVT = LDBase->getValueType(0);
6490 // Ensure that the input vector size for the merged loads matches the
6491 // cumulative size of the input elements.
6492 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6495 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
6498 if (IsConsecutiveLoad)
6499 return CreateLoad(VT, LDBase);
6501 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
6502 // vector and a zero vector to clear out the zero elements.
6503 if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) {
6504 SmallVector<int, 4> ClearMask(NumElems, -1);
6505 for (unsigned i = 0; i < NumElems; ++i) {
6507 ClearMask[i] = i + NumElems;
6508 else if (LoadMask[i])
6511 SDValue V = CreateLoad(VT, LDBase);
6512 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
6513 : DAG.getConstantFP(0.0, DL, VT);
6514 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
6519 (1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits();
6521 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
6522 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
6523 (LoadSize == 32 || LoadSize == 64) &&
6524 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
6525 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSize)
6526 : MVT::getIntegerVT(LoadSize);
6527 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSize);
6528 if (TLI.isTypeLegal(VecVT)) {
6529 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
6530 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6532 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
6533 LDBase->getPointerInfo(),
6534 LDBase->getAlignment(),
6535 false/*isVolatile*/, true/*ReadMem*/,
6538 // Make sure the newly-created LOAD is in the same position as LDBase in
6539 // terms of dependency. We create a TokenFactor for LDBase and ResNode,
6540 // and update uses of LDBase's output chain to use the TokenFactor.
6541 if (LDBase->hasAnyUseOfValue(1)) {
6543 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
6544 SDValue(ResNode.getNode(), 1));
6545 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6546 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6547 SDValue(ResNode.getNode(), 1));
6550 return DAG.getBitcast(VT, ResNode);
6557 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
6558 unsigned SplatBitSize, LLVMContext &C) {
6559 unsigned ScalarSize = VT.getScalarSizeInBits();
6560 unsigned NumElm = SplatBitSize / ScalarSize;
6562 SmallVector<Constant *, 32> ConstantVec;
6563 for (unsigned i = 0; i < NumElm; i++) {
6564 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
6566 if (VT.isFloatingPoint()) {
6567 if (ScalarSize == 32) {
6568 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
6570 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
6571 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
6574 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
6575 ConstantVec.push_back(Const);
6577 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
6580 static bool isUseOfShuffle(SDNode *N) {
6581 for (auto *U : N->uses()) {
6582 if (isTargetShuffle(U->getOpcode()))
6584 if (U->getOpcode() == ISD::BITCAST) // Ignore bitcasts
6585 return isUseOfShuffle(U);
6590 /// Attempt to use the vbroadcast instruction to generate a splat value
6591 /// from a splat BUILD_VECTOR which uses:
6592 /// a. A single scalar load, or a constant.
6593 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
6595 /// The VBROADCAST node is returned when a pattern is found,
6596 /// or SDValue() otherwise.
6597 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
6598 const X86Subtarget &Subtarget,
6599 SelectionDAG &DAG) {
6600 // VBROADCAST requires AVX.
6601 // TODO: Splats could be generated for non-AVX CPUs using SSE
6602 // instructions, but there's less potential gain for only 128-bit vectors.
6603 if (!Subtarget.hasAVX())
6606 MVT VT = BVOp->getSimpleValueType(0);
6609 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6610 "Unsupported vector type for broadcast.");
6612 BitVector UndefElements;
6613 SDValue Ld = BVOp->getSplatValue(&UndefElements);
6615 // We need a splat of a single value to use broadcast, and it doesn't
6616 // make any sense if the value is only in one element of the vector.
6617 if (!Ld || (VT.getVectorNumElements() - UndefElements.count()) <= 1) {
6618 APInt SplatValue, Undef;
6619 unsigned SplatBitSize;
6621 // Check if this is a repeated constant pattern suitable for broadcasting.
6622 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
6623 SplatBitSize > VT.getScalarSizeInBits() &&
6624 SplatBitSize < VT.getSizeInBits()) {
6625 // Avoid replacing with broadcast when it's a use of a shuffle
6626 // instruction to preserve the present custom lowering of shuffles.
6627 if (isUseOfShuffle(BVOp) || BVOp->hasOneUse())
6629 // replace BUILD_VECTOR with broadcast of the repeated constants.
6630 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6631 LLVMContext *Ctx = DAG.getContext();
6632 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
6633 if (Subtarget.hasAVX()) {
6634 if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
6635 !(SplatBitSize == 64 && Subtarget.is32Bit())) {
6636 // Splatted value can fit in one INTEGER constant in constant pool.
6637 // Load the constant and broadcast it.
6638 MVT CVT = MVT::getIntegerVT(SplatBitSize);
6639 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
6640 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
6641 SDValue CP = DAG.getConstantPool(C, PVT);
6642 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
6644 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6646 CVT, dl, DAG.getEntryNode(), CP,
6647 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
6649 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
6650 MVT::getVectorVT(CVT, Repeat), Ld);
6651 return DAG.getBitcast(VT, Brdcst);
6652 } else if (SplatBitSize == 32 || SplatBitSize == 64) {
6653 // Splatted value can fit in one FLOAT constant in constant pool.
6654 // Load the constant and broadcast it.
6655 // AVX have support for 32 and 64 bit broadcast for floats only.
6656 // No 64bit integer in 32bit subtarget.
6657 MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
6658 // Lower the splat via APFloat directly, to avoid any conversion.
6661 ? ConstantFP::get(*Ctx,
6662 APFloat(APFloat::IEEEsingle(), SplatValue))
6663 : ConstantFP::get(*Ctx,
6664 APFloat(APFloat::IEEEdouble(), SplatValue));
6665 SDValue CP = DAG.getConstantPool(C, PVT);
6666 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
6668 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6670 CVT, dl, DAG.getEntryNode(), CP,
6671 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
6673 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
6674 MVT::getVectorVT(CVT, Repeat), Ld);
6675 return DAG.getBitcast(VT, Brdcst);
6676 } else if (SplatBitSize > 64) {
6677 // Load the vector of constants and broadcast it.
6678 MVT CVT = VT.getScalarType();
6679 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
6681 SDValue VCP = DAG.getConstantPool(VecC, PVT);
6682 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
6683 unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
6685 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
6686 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
6688 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
6689 return DAG.getBitcast(VT, Brdcst);
6696 bool ConstSplatVal =
6697 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
6699 // Make sure that all of the users of a non-constant load are from the
6700 // BUILD_VECTOR node.
6701 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6704 unsigned ScalarSize = Ld.getValueSizeInBits();
6705 bool IsGE256 = (VT.getSizeInBits() >= 256);
6707 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6708 // instruction to save 8 or more bytes of constant pool data.
6709 // TODO: If multiple splats are generated to load the same constant,
6710 // it may be detrimental to overall size. There needs to be a way to detect
6711 // that condition to know if this is truly a size win.
6712 bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
6714 // Handle broadcasting a single constant scalar from the constant pool
6716 // On Sandybridge (no AVX2), it is still better to load a constant vector
6717 // from the constant pool and not to broadcast it from a scalar.
6718 // But override that restriction when optimizing for size.
6719 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6720 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
6721 EVT CVT = Ld.getValueType();
6722 assert(!CVT.isVector() && "Must not broadcast a vector type");
6724 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6725 // For size optimization, also splat v2f64 and v2i64, and for size opt
6726 // with AVX2, also splat i8 and i16.
6727 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6728 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6729 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
6730 const Constant *C = nullptr;
6731 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6732 C = CI->getConstantIntValue();
6733 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6734 C = CF->getConstantFPValue();
6736 assert(C && "Invalid constant type");
6738 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6740 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
6741 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6743 CVT, dl, DAG.getEntryNode(), CP,
6744 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
6747 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6751 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6753 // Handle AVX2 in-register broadcasts.
6754 if (!IsLoad && Subtarget.hasInt256() &&
6755 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6756 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6758 // The scalar source must be a normal load.
6762 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6763 (Subtarget.hasVLX() && ScalarSize == 64))
6764 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6766 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6767 // double since there is no vbroadcastsd xmm
6768 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
6769 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6770 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6773 // Unsupported broadcast.
6777 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6778 /// underlying vector and index.
6780 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6782 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6784 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6785 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6788 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6790 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6792 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6793 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6796 // In this case the vector is the extract_subvector expression and the index
6797 // is 2, as specified by the shuffle.
6798 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6799 SDValue ShuffleVec = SVOp->getOperand(0);
6800 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6801 assert(ShuffleVecVT.getVectorElementType() ==
6802 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6804 int ShuffleIdx = SVOp->getMaskElt(Idx);
6805 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6806 ExtractedFromVec = ShuffleVec;
6812 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6813 MVT VT = Op.getSimpleValueType();
6815 // Skip if insert_vec_elt is not supported.
6816 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6817 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6821 unsigned NumElems = Op.getNumOperands();
6825 SmallVector<unsigned, 4> InsertIndices;
6826 SmallVector<int, 8> Mask(NumElems, -1);
6828 for (unsigned i = 0; i != NumElems; ++i) {
6829 unsigned Opc = Op.getOperand(i).getOpcode();
6831 if (Opc == ISD::UNDEF)
6834 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6835 // Quit if more than 1 elements need inserting.
6836 if (InsertIndices.size() > 1)
6839 InsertIndices.push_back(i);
6843 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6844 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6846 // Quit if non-constant index.
6847 if (!isa<ConstantSDNode>(ExtIdx))
6849 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6851 // Quit if extracted from vector of different type.
6852 if (ExtractedFromVec.getValueType() != VT)
6855 if (!VecIn1.getNode())
6856 VecIn1 = ExtractedFromVec;
6857 else if (VecIn1 != ExtractedFromVec) {
6858 if (!VecIn2.getNode())
6859 VecIn2 = ExtractedFromVec;
6860 else if (VecIn2 != ExtractedFromVec)
6861 // Quit if more than 2 vectors to shuffle
6865 if (ExtractedFromVec == VecIn1)
6867 else if (ExtractedFromVec == VecIn2)
6868 Mask[i] = Idx + NumElems;
6871 if (!VecIn1.getNode())
6874 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6875 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
6877 for (unsigned Idx : InsertIndices)
6878 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6879 DAG.getIntPtrConstant(Idx, DL));
6884 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
6885 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
6886 Op.getScalarValueSizeInBits() == 1 &&
6887 "Can not convert non-constant vector");
6888 uint64_t Immediate = 0;
6889 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6890 SDValue In = Op.getOperand(idx);
6892 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
6895 MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
6896 return DAG.getConstant(Immediate, dl, VT);
6898 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6900 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6902 MVT VT = Op.getSimpleValueType();
6903 assert((VT.getVectorElementType() == MVT::i1) &&
6904 "Unexpected type in LowerBUILD_VECTORvXi1!");
6907 if (ISD::isBuildVectorAllZeros(Op.getNode()))
6908 return DAG.getTargetConstant(0, dl, VT);
6910 if (ISD::isBuildVectorAllOnes(Op.getNode()))
6911 return DAG.getTargetConstant(1, dl, VT);
6913 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6914 SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
6915 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
6916 return DAG.getBitcast(VT, Imm);
6917 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
6918 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
6919 DAG.getIntPtrConstant(0, dl));
6922 // Vector has one or more non-const elements
6923 uint64_t Immediate = 0;
6924 SmallVector<unsigned, 16> NonConstIdx;
6925 bool IsSplat = true;
6926 bool HasConstElts = false;
6928 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6929 SDValue In = Op.getOperand(idx);
6932 if (!isa<ConstantSDNode>(In))
6933 NonConstIdx.push_back(idx);
6935 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
6936 HasConstElts = true;
6940 else if (In != Op.getOperand(SplatIdx))
6944 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
6946 return DAG.getSelect(dl, VT, Op.getOperand(SplatIdx),
6947 DAG.getConstant(1, dl, VT),
6948 DAG.getConstant(0, dl, VT));
6950 // insert elements one by one
6954 MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
6955 Imm = DAG.getConstant(Immediate, dl, ImmVT);
6957 else if (HasConstElts)
6958 Imm = DAG.getConstant(0, dl, VT);
6960 Imm = DAG.getUNDEF(VT);
6961 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
6962 DstVec = DAG.getBitcast(VT, Imm);
6964 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
6965 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
6966 DAG.getIntPtrConstant(0, dl));
6969 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
6970 unsigned InsertIdx = NonConstIdx[i];
6971 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6972 Op.getOperand(InsertIdx),
6973 DAG.getIntPtrConstant(InsertIdx, dl));
6978 /// \brief Return true if \p N implements a horizontal binop and return the
6979 /// operands for the horizontal binop into V0 and V1.
6981 /// This is a helper function of LowerToHorizontalOp().
6982 /// This function checks that the build_vector \p N in input implements a
6983 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6984 /// operation to match.
6985 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6986 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6987 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6990 /// This function only analyzes elements of \p N whose indices are
6991 /// in range [BaseIdx, LastIdx).
6992 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6994 unsigned BaseIdx, unsigned LastIdx,
6995 SDValue &V0, SDValue &V1) {
6996 EVT VT = N->getValueType(0);
6998 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6999 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
7000 "Invalid Vector in input!");
7002 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
7003 bool CanFold = true;
7004 unsigned ExpectedVExtractIdx = BaseIdx;
7005 unsigned NumElts = LastIdx - BaseIdx;
7006 V0 = DAG.getUNDEF(VT);
7007 V1 = DAG.getUNDEF(VT);
7009 // Check if N implements a horizontal binop.
7010 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
7011 SDValue Op = N->getOperand(i + BaseIdx);
7014 if (Op->isUndef()) {
7015 // Update the expected vector extract index.
7016 if (i * 2 == NumElts)
7017 ExpectedVExtractIdx = BaseIdx;
7018 ExpectedVExtractIdx += 2;
7022 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
7027 SDValue Op0 = Op.getOperand(0);
7028 SDValue Op1 = Op.getOperand(1);
7030 // Try to match the following pattern:
7031 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
7032 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7033 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7034 Op0.getOperand(0) == Op1.getOperand(0) &&
7035 isa<ConstantSDNode>(Op0.getOperand(1)) &&
7036 isa<ConstantSDNode>(Op1.getOperand(1)));
7040 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
7041 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
7043 if (i * 2 < NumElts) {
7045 V0 = Op0.getOperand(0);
7046 if (V0.getValueType() != VT)
7051 V1 = Op0.getOperand(0);
7052 if (V1.getValueType() != VT)
7055 if (i * 2 == NumElts)
7056 ExpectedVExtractIdx = BaseIdx;
7059 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
7060 if (I0 == ExpectedVExtractIdx)
7061 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
7062 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
7063 // Try to match the following dag sequence:
7064 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
7065 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
7069 ExpectedVExtractIdx += 2;
7075 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
7076 /// a concat_vector.
7078 /// This is a helper function of LowerToHorizontalOp().
7079 /// This function expects two 256-bit vectors called V0 and V1.
7080 /// At first, each vector is split into two separate 128-bit vectors.
7081 /// Then, the resulting 128-bit vectors are used to implement two
7082 /// horizontal binary operations.
7084 /// The kind of horizontal binary operation is defined by \p X86Opcode.
7086 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
7087 /// the two new horizontal binop.
7088 /// When Mode is set, the first horizontal binop dag node would take as input
7089 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
7090 /// horizontal binop dag node would take as input the lower 128-bit of V1
7091 /// and the upper 128-bit of V1.
7093 /// HADD V0_LO, V0_HI
7094 /// HADD V1_LO, V1_HI
7096 /// Otherwise, the first horizontal binop dag node takes as input the lower
7097 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
7098 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
7100 /// HADD V0_LO, V1_LO
7101 /// HADD V0_HI, V1_HI
7103 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
7104 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
7105 /// the upper 128-bits of the result.
7106 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
7107 const SDLoc &DL, SelectionDAG &DAG,
7108 unsigned X86Opcode, bool Mode,
7109 bool isUndefLO, bool isUndefHI) {
7110 MVT VT = V0.getSimpleValueType();
7111 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
7112 "Invalid nodes in input!");
7114 unsigned NumElts = VT.getVectorNumElements();
7115 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
7116 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
7117 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
7118 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
7119 MVT NewVT = V0_LO.getSimpleValueType();
7121 SDValue LO = DAG.getUNDEF(NewVT);
7122 SDValue HI = DAG.getUNDEF(NewVT);
7125 // Don't emit a horizontal binop if the result is expected to be UNDEF.
7126 if (!isUndefLO && !V0->isUndef())
7127 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
7128 if (!isUndefHI && !V1->isUndef())
7129 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
7131 // Don't emit a horizontal binop if the result is expected to be UNDEF.
7132 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
7133 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
7135 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
7136 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
7139 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
7142 /// Returns true iff \p BV builds a vector with the result equivalent to
7143 /// the result of ADDSUB operation.
7144 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1 operation
7145 /// are written to the parameters \p Opnd0 and \p Opnd1.
7146 static bool isAddSub(const BuildVectorSDNode *BV,
7147 const X86Subtarget &Subtarget, SelectionDAG &DAG,
7148 SDValue &Opnd0, SDValue &Opnd1) {
7150 MVT VT = BV->getSimpleValueType(0);
7151 if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
7152 (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)) &&
7153 (!Subtarget.hasAVX512() || (VT != MVT::v16f32 && VT != MVT::v8f64)))
7156 unsigned NumElts = VT.getVectorNumElements();
7157 SDValue InVec0 = DAG.getUNDEF(VT);
7158 SDValue InVec1 = DAG.getUNDEF(VT);
7160 // Odd-numbered elements in the input build vector are obtained from
7161 // adding two integer/float elements.
7162 // Even-numbered elements in the input build vector are obtained from
7163 // subtracting two integer/float elements.
7164 unsigned ExpectedOpcode = ISD::FSUB;
7165 unsigned NextExpectedOpcode = ISD::FADD;
7166 bool AddFound = false;
7167 bool SubFound = false;
7169 for (unsigned i = 0, e = NumElts; i != e; ++i) {
7170 SDValue Op = BV->getOperand(i);
7172 // Skip 'undef' values.
7173 unsigned Opcode = Op.getOpcode();
7174 if (Opcode == ISD::UNDEF) {
7175 std::swap(ExpectedOpcode, NextExpectedOpcode);
7179 // Early exit if we found an unexpected opcode.
7180 if (Opcode != ExpectedOpcode)
7183 SDValue Op0 = Op.getOperand(0);
7184 SDValue Op1 = Op.getOperand(1);
7186 // Try to match the following pattern:
7187 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
7188 // Early exit if we cannot match that sequence.
7189 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7190 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7191 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
7192 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
7193 Op0.getOperand(1) != Op1.getOperand(1))
7196 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
7200 // We found a valid add/sub node. Update the information accordingly.
7206 // Update InVec0 and InVec1.
7207 if (InVec0.isUndef()) {
7208 InVec0 = Op0.getOperand(0);
7209 if (InVec0.getSimpleValueType() != VT)
7212 if (InVec1.isUndef()) {
7213 InVec1 = Op1.getOperand(0);
7214 if (InVec1.getSimpleValueType() != VT)
7218 // Make sure that operands in input to each add/sub node always
7219 // come from a same pair of vectors.
7220 if (InVec0 != Op0.getOperand(0)) {
7221 if (ExpectedOpcode == ISD::FSUB)
7224 // FADD is commutable. Try to commute the operands
7225 // and then test again.
7226 std::swap(Op0, Op1);
7227 if (InVec0 != Op0.getOperand(0))
7231 if (InVec1 != Op1.getOperand(0))
7234 // Update the pair of expected opcodes.
7235 std::swap(ExpectedOpcode, NextExpectedOpcode);
7238 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
7239 if (!AddFound || !SubFound || InVec0.isUndef() || InVec1.isUndef())
7247 /// Returns true if is possible to fold MUL and an idiom that has already been
7248 /// recognized as ADDSUB(\p Opnd0, \p Opnd1) into FMADDSUB(x, y, \p Opnd1).
7249 /// If (and only if) true is returned, the operands of FMADDSUB are written to
7250 /// parameters \p Opnd0, \p Opnd1, \p Opnd2.
7252 /// Prior to calling this function it should be known that there is some
7253 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
7254 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
7255 /// before replacement of such SDNode with ADDSUB operation. Thus the number
7256 /// of \p Opnd0 uses is expected to be equal to 2.
7257 /// For example, this function may be called for the following IR:
7258 /// %AB = fmul fast <2 x double> %A, %B
7259 /// %Sub = fsub fast <2 x double> %AB, %C
7260 /// %Add = fadd fast <2 x double> %AB, %C
7261 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
7262 /// <2 x i32> <i32 0, i32 3>
7263 /// There is a def for %Addsub here, which potentially can be replaced by
7264 /// X86ISD::ADDSUB operation:
7265 /// %Addsub = X86ISD::ADDSUB %AB, %C
7266 /// and such ADDSUB can further be replaced with FMADDSUB:
7267 /// %Addsub = FMADDSUB %A, %B, %C.
7269 /// The main reason why this method is called before the replacement of the
7270 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
7271 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
7273 static bool isFMAddSub(const X86Subtarget &Subtarget, SelectionDAG &DAG,
7274 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2) {
7275 if (Opnd0.getOpcode() != ISD::FMUL || Opnd0->use_size() != 2 ||
7276 !Subtarget.hasAnyFMA())
7279 // FIXME: These checks must match the similar ones in
7280 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
7281 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
7282 // or MUL + ADDSUB to FMADDSUB.
7283 const TargetOptions &Options = DAG.getTarget().Options;
7285 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
7290 Opnd1 = Opnd0.getOperand(1);
7291 Opnd0 = Opnd0.getOperand(0);
7296 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' operation
7297 /// accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB node.
7298 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
7299 const X86Subtarget &Subtarget,
7300 SelectionDAG &DAG) {
7301 SDValue Opnd0, Opnd1;
7302 if (!isAddSub(BV, Subtarget, DAG, Opnd0, Opnd1))
7305 MVT VT = BV->getSimpleValueType(0);
7308 // Try to generate X86ISD::FMADDSUB node here.
7310 if (isFMAddSub(Subtarget, DAG, Opnd0, Opnd1, Opnd2))
7311 return DAG.getNode(X86ISD::FMADDSUB, DL, VT, Opnd0, Opnd1, Opnd2);
7313 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
7314 // the ADDSUB idiom has been successfully recognized. There are no known
7315 // X86 targets with 512-bit ADDSUB instructions!
7316 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
7318 if (VT.is512BitVector())
7321 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
7324 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
7325 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
7326 const X86Subtarget &Subtarget,
7327 SelectionDAG &DAG) {
7328 MVT VT = BV->getSimpleValueType(0);
7329 unsigned NumElts = VT.getVectorNumElements();
7330 unsigned NumUndefsLO = 0;
7331 unsigned NumUndefsHI = 0;
7332 unsigned Half = NumElts/2;
7334 // Count the number of UNDEF operands in the build_vector in input.
7335 for (unsigned i = 0, e = Half; i != e; ++i)
7336 if (BV->getOperand(i)->isUndef())
7339 for (unsigned i = Half, e = NumElts; i != e; ++i)
7340 if (BV->getOperand(i)->isUndef())
7343 // Early exit if this is either a build_vector of all UNDEFs or all the
7344 // operands but one are UNDEF.
7345 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
7349 SDValue InVec0, InVec1;
7350 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) {
7351 // Try to match an SSE3 float HADD/HSUB.
7352 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
7353 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
7355 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
7356 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
7357 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) {
7358 // Try to match an SSSE3 integer HADD/HSUB.
7359 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
7360 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
7362 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
7363 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
7366 if (!Subtarget.hasAVX())
7369 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
7370 // Try to match an AVX horizontal add/sub of packed single/double
7371 // precision floating point values from 256-bit vectors.
7372 SDValue InVec2, InVec3;
7373 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
7374 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
7375 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
7376 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
7377 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
7379 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
7380 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
7381 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
7382 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
7383 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
7384 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
7385 // Try to match an AVX2 horizontal add/sub of signed integers.
7386 SDValue InVec2, InVec3;
7388 bool CanFold = true;
7390 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
7391 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
7392 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
7393 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
7394 X86Opcode = X86ISD::HADD;
7395 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
7396 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
7397 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
7398 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
7399 X86Opcode = X86ISD::HSUB;
7404 // Fold this build_vector into a single horizontal add/sub.
7405 // Do this only if the target has AVX2.
7406 if (Subtarget.hasAVX2())
7407 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
7409 // Do not try to expand this build_vector into a pair of horizontal
7410 // add/sub if we can emit a pair of scalar add/sub.
7411 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
7414 // Convert this build_vector into a pair of horizontal binop followed by
7416 bool isUndefLO = NumUndefsLO == Half;
7417 bool isUndefHI = NumUndefsHI == Half;
7418 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
7419 isUndefLO, isUndefHI);
7423 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
7424 VT == MVT::v16i16) && Subtarget.hasAVX()) {
7426 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
7427 X86Opcode = X86ISD::HADD;
7428 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
7429 X86Opcode = X86ISD::HSUB;
7430 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
7431 X86Opcode = X86ISD::FHADD;
7432 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
7433 X86Opcode = X86ISD::FHSUB;
7437 // Don't try to expand this build_vector into a pair of horizontal add/sub
7438 // if we can simply emit a pair of scalar add/sub.
7439 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
7442 // Convert this build_vector into two horizontal add/sub followed by
7444 bool isUndefLO = NumUndefsLO == Half;
7445 bool isUndefHI = NumUndefsHI == Half;
7446 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
7447 isUndefLO, isUndefHI);
7453 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
7454 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
7455 /// just apply the bit to the vectors.
7456 /// NOTE: Its not in our interest to start make a general purpose vectorizer
7457 /// from this, but enough scalar bit operations are created from the later
7458 /// legalization + scalarization stages to need basic support.
7459 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
7460 SelectionDAG &DAG) {
7462 MVT VT = Op->getSimpleValueType(0);
7463 unsigned NumElems = VT.getVectorNumElements();
7464 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7466 // Check that all elements have the same opcode.
7467 // TODO: Should we allow UNDEFS and if so how many?
7468 unsigned Opcode = Op->getOperand(0).getOpcode();
7469 for (unsigned i = 1; i < NumElems; ++i)
7470 if (Opcode != Op->getOperand(i).getOpcode())
7473 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
7480 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
7485 SmallVector<SDValue, 4> LHSElts, RHSElts;
7486 for (SDValue Elt : Op->ops()) {
7487 SDValue LHS = Elt.getOperand(0);
7488 SDValue RHS = Elt.getOperand(1);
7490 // We expect the canonicalized RHS operand to be the constant.
7491 if (!isa<ConstantSDNode>(RHS))
7493 LHSElts.push_back(LHS);
7494 RHSElts.push_back(RHS);
7497 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
7498 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
7499 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
7502 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
7503 /// functionality to do this, so it's all zeros, all ones, or some derivation
7504 /// that is cheap to calculate.
7505 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
7506 const X86Subtarget &Subtarget) {
7508 MVT VT = Op.getSimpleValueType();
7510 // Vectors containing all zeros can be matched by pxor and xorps.
7511 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
7512 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
7513 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
7514 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
7517 return getZeroVector(VT, Subtarget, DAG, DL);
7520 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
7521 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
7522 // vpcmpeqd on 256-bit vectors.
7523 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
7524 if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
7525 (VT == MVT::v8i32 && Subtarget.hasInt256()))
7528 return getOnesVector(VT, DAG, DL);
7535 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
7538 MVT VT = Op.getSimpleValueType();
7539 MVT ExtVT = VT.getVectorElementType();
7540 unsigned NumElems = Op.getNumOperands();
7542 // Generate vectors for predicate vectors.
7543 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
7544 return LowerBUILD_VECTORvXi1(Op, DAG);
7546 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
7547 return VectorConstant;
7549 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
7550 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
7552 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
7553 return HorizontalOp;
7554 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
7556 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
7559 unsigned EVTBits = ExtVT.getSizeInBits();
7561 unsigned NumZero = 0;
7562 unsigned NumNonZero = 0;
7563 uint64_t NonZeros = 0;
7564 bool IsAllConstants = true;
7565 SmallSet<SDValue, 8> Values;
7566 for (unsigned i = 0; i < NumElems; ++i) {
7567 SDValue Elt = Op.getOperand(i);
7571 if (Elt.getOpcode() != ISD::Constant &&
7572 Elt.getOpcode() != ISD::ConstantFP)
7573 IsAllConstants = false;
7574 if (X86::isZeroNode(Elt))
7577 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
7578 NonZeros |= ((uint64_t)1 << i);
7583 // All undef vector. Return an UNDEF. All zero vectors were handled above.
7584 if (NumNonZero == 0)
7585 return DAG.getUNDEF(VT);
7587 // Special case for single non-zero, non-undef, element.
7588 if (NumNonZero == 1) {
7589 unsigned Idx = countTrailingZeros(NonZeros);
7590 SDValue Item = Op.getOperand(Idx);
7592 // If this is an insertion of an i64 value on x86-32, and if the top bits of
7593 // the value are obviously zero, truncate the value to i32 and do the
7594 // insertion that way. Only do this if the value is non-constant or if the
7595 // value is a constant being inserted into element 0. It is cheaper to do
7596 // a constant pool load than it is to do a movd + shuffle.
7597 if (ExtVT == MVT::i64 && !Subtarget.is64Bit() &&
7598 (!IsAllConstants || Idx == 0)) {
7599 if (DAG.MaskedValueIsZero(Item, APInt::getHighBitsSet(64, 32))) {
7601 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
7602 MVT VecVT = MVT::v4i32;
7604 // Truncate the value (which may itself be a constant) to i32, and
7605 // convert it to a vector with movd (S2V+shuffle to zero extend).
7606 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
7607 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
7608 return DAG.getBitcast(VT, getShuffleVectorZeroOrUndef(
7609 Item, Idx * 2, true, Subtarget, DAG));
7613 // If we have a constant or non-constant insertion into the low element of
7614 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
7615 // the rest of the elements. This will be matched as movd/movq/movss/movsd
7616 // depending on what the source datatype is.
7619 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7621 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
7622 (ExtVT == MVT::i64 && Subtarget.is64Bit())) {
7623 assert((VT.is128BitVector() || VT.is256BitVector() ||
7624 VT.is512BitVector()) &&
7625 "Expected an SSE value type!");
7626 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7627 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7628 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7631 // We can't directly insert an i8 or i16 into a vector, so zero extend
7633 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7634 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7635 if (VT.getSizeInBits() >= 256) {
7636 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
7637 if (Subtarget.hasAVX()) {
7638 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
7639 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7641 // Without AVX, we need to extend to a 128-bit vector and then
7642 // insert into the 256-bit vector.
7643 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7644 SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
7645 Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7648 assert(VT.is128BitVector() && "Expected an SSE value type!");
7649 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7650 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7652 return DAG.getBitcast(VT, Item);
7656 // Is it a vector logical left shift?
7657 if (NumElems == 2 && Idx == 1 &&
7658 X86::isZeroNode(Op.getOperand(0)) &&
7659 !X86::isZeroNode(Op.getOperand(1))) {
7660 unsigned NumBits = VT.getSizeInBits();
7661 return getVShift(true, VT,
7662 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7663 VT, Op.getOperand(1)),
7664 NumBits/2, DAG, *this, dl);
7667 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7670 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7671 // is a non-constant being inserted into an element other than the low one,
7672 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7673 // movd/movss) to move this into the low element, then shuffle it into
7675 if (EVTBits == 32) {
7676 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7677 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7681 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7682 if (Values.size() == 1) {
7683 if (EVTBits == 32) {
7684 // Instead of a shuffle like this:
7685 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7686 // Check if it's possible to issue this instead.
7687 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7688 unsigned Idx = countTrailingZeros(NonZeros);
7689 SDValue Item = Op.getOperand(Idx);
7690 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7691 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7696 // A vector full of immediates; various special cases are already
7697 // handled, so this is best done with a single constant-pool load.
7701 // See if we can use a vector load to get all of the elements.
7702 if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) {
7703 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
7704 if (SDValue LD = EltsFromConsecutiveLoads(VT, Ops, dl, DAG, false))
7708 // For AVX-length vectors, build the individual 128-bit pieces and use
7709 // shuffles to put them in place.
7710 if (VT.is256BitVector() || VT.is512BitVector()) {
7711 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
7713 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7715 // Build both the lower and upper subvector.
7717 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElems / 2));
7718 SDValue Upper = DAG.getBuildVector(
7719 HVT, dl, makeArrayRef(&Ops[NumElems / 2], NumElems / 2));
7721 // Recreate the wider vector with the lower and upper part.
7722 if (VT.is256BitVector())
7723 return concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7724 return concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7727 // Let legalizer expand 2-wide build_vectors.
7728 if (EVTBits == 64) {
7729 if (NumNonZero == 1) {
7730 // One half is zero or undef.
7731 unsigned Idx = countTrailingZeros(NonZeros);
7732 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7733 Op.getOperand(Idx));
7734 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7739 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7740 if (EVTBits == 8 && NumElems == 16)
7741 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
7745 if (EVTBits == 16 && NumElems == 8)
7746 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
7750 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7751 if (EVTBits == 32 && NumElems == 4)
7752 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
7755 // If element VT is == 32 bits, turn it into a number of shuffles.
7756 if (NumElems == 4 && NumZero > 0) {
7757 SmallVector<SDValue, 8> Ops(NumElems);
7758 for (unsigned i = 0; i < 4; ++i) {
7759 bool isZero = !(NonZeros & (1ULL << i));
7761 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
7763 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7766 for (unsigned i = 0; i < 2; ++i) {
7767 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7770 Ops[i] = Ops[i*2]; // Must be a zero vector.
7773 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
7776 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
7779 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
7784 bool Reverse1 = (NonZeros & 0x3) == 2;
7785 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7789 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7790 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7792 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
7795 if (Values.size() > 1 && VT.is128BitVector()) {
7796 // Check for a build vector from mostly shuffle plus few inserting.
7797 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
7800 // For SSE 4.1, use insertps to put the high elements into the low element.
7801 if (Subtarget.hasSSE41()) {
7803 if (!Op.getOperand(0).isUndef())
7804 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7806 Result = DAG.getUNDEF(VT);
7808 for (unsigned i = 1; i < NumElems; ++i) {
7809 if (Op.getOperand(i).isUndef()) continue;
7810 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7811 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
7816 // Otherwise, expand into a number of unpckl*, start by extending each of
7817 // our (non-undef) elements to the full vector width with the element in the
7818 // bottom slot of the vector (which generates no code for SSE).
7819 SmallVector<SDValue, 8> Ops(NumElems);
7820 for (unsigned i = 0; i < NumElems; ++i) {
7821 if (!Op.getOperand(i).isUndef())
7822 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7824 Ops[i] = DAG.getUNDEF(VT);
7827 // Next, we iteratively mix elements, e.g. for v4f32:
7828 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7829 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7830 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7831 unsigned EltStride = NumElems >> 1;
7832 while (EltStride != 0) {
7833 for (unsigned i = 0; i < EltStride; ++i) {
7834 // If Ops[i+EltStride] is undef and this is the first round of mixing,
7835 // then it is safe to just drop this shuffle: V[i] is already in the
7836 // right place, the one element (since it's the first round) being
7837 // inserted as undef can be dropped. This isn't safe for successive
7838 // rounds because they will permute elements within both vectors.
7839 if (Ops[i+EltStride].isUndef() &&
7840 EltStride == NumElems/2)
7843 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i], Ops[i + EltStride]);
7852 // 256-bit AVX can use the vinsertf128 instruction
7853 // to create 256-bit vectors from two other 128-bit ones.
7854 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7856 MVT ResVT = Op.getSimpleValueType();
7858 assert((ResVT.is256BitVector() ||
7859 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7861 SDValue V1 = Op.getOperand(0);
7862 SDValue V2 = Op.getOperand(1);
7863 unsigned NumElems = ResVT.getVectorNumElements();
7864 if (ResVT.is256BitVector())
7865 return concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7867 if (Op.getNumOperands() == 4) {
7868 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
7869 ResVT.getVectorNumElements()/2);
7870 SDValue V3 = Op.getOperand(2);
7871 SDValue V4 = Op.getOperand(3);
7872 return concat256BitVectors(
7873 concat128BitVectors(V1, V2, HalfVT, NumElems / 2, DAG, dl),
7874 concat128BitVectors(V3, V4, HalfVT, NumElems / 2, DAG, dl), ResVT,
7877 return concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7880 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
7881 const X86Subtarget &Subtarget,
7882 SelectionDAG & DAG) {
7884 MVT ResVT = Op.getSimpleValueType();
7885 unsigned NumOfOperands = Op.getNumOperands();
7887 assert(isPowerOf2_32(NumOfOperands) &&
7888 "Unexpected number of operands in CONCAT_VECTORS");
7890 SDValue Undef = DAG.getUNDEF(ResVT);
7891 if (NumOfOperands > 2) {
7892 // Specialize the cases when all, or all but one, of the operands are undef.
7893 unsigned NumOfDefinedOps = 0;
7895 for (unsigned i = 0; i < NumOfOperands; i++)
7896 if (!Op.getOperand(i).isUndef()) {
7900 if (NumOfDefinedOps == 0)
7902 if (NumOfDefinedOps == 1) {
7903 unsigned SubVecNumElts =
7904 Op.getOperand(OpIdx).getValueType().getVectorNumElements();
7905 SDValue IdxVal = DAG.getIntPtrConstant(SubVecNumElts * OpIdx, dl);
7906 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef,
7907 Op.getOperand(OpIdx), IdxVal);
7910 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
7911 ResVT.getVectorNumElements()/2);
7912 SmallVector<SDValue, 2> Ops;
7913 for (unsigned i = 0; i < NumOfOperands/2; i++)
7914 Ops.push_back(Op.getOperand(i));
7915 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, Ops);
7917 for (unsigned i = NumOfOperands/2; i < NumOfOperands; i++)
7918 Ops.push_back(Op.getOperand(i));
7919 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, Ops);
7920 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
7924 SDValue V1 = Op.getOperand(0);
7925 SDValue V2 = Op.getOperand(1);
7926 unsigned NumElems = ResVT.getVectorNumElements();
7927 assert(V1.getValueType() == V2.getValueType() &&
7928 V1.getValueType().getVectorNumElements() == NumElems/2 &&
7929 "Unexpected operands in CONCAT_VECTORS");
7931 if (ResVT.getSizeInBits() >= 16)
7932 return Op; // The operation is legal with KUNPCK
7934 bool IsZeroV1 = ISD::isBuildVectorAllZeros(V1.getNode());
7935 bool IsZeroV2 = ISD::isBuildVectorAllZeros(V2.getNode());
7936 SDValue ZeroVec = getZeroVector(ResVT, Subtarget, DAG, dl);
7937 if (IsZeroV1 && IsZeroV2)
7940 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
7942 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V1, ZeroIdx);
7944 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ZeroVec, V1, ZeroIdx);
7946 SDValue IdxVal = DAG.getIntPtrConstant(NumElems/2, dl);
7948 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V2, IdxVal);
7951 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ZeroVec, V2, IdxVal);
7953 V1 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V1, ZeroIdx);
7954 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, V1, V2, IdxVal);
7957 static SDValue LowerCONCAT_VECTORS(SDValue Op,
7958 const X86Subtarget &Subtarget,
7959 SelectionDAG &DAG) {
7960 MVT VT = Op.getSimpleValueType();
7961 if (VT.getVectorElementType() == MVT::i1)
7962 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
7964 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7965 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7966 Op.getNumOperands() == 4)));
7968 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7969 // from two other 128-bit ones.
7971 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7972 return LowerAVXCONCAT_VECTORS(Op, DAG);
7975 //===----------------------------------------------------------------------===//
7976 // Vector shuffle lowering
7978 // This is an experimental code path for lowering vector shuffles on x86. It is
7979 // designed to handle arbitrary vector shuffles and blends, gracefully
7980 // degrading performance as necessary. It works hard to recognize idiomatic
7981 // shuffles and lower them to optimal instruction patterns without leaving
7982 // a framework that allows reasonably efficient handling of all vector shuffle
7984 //===----------------------------------------------------------------------===//
7986 /// \brief Tiny helper function to identify a no-op mask.
7988 /// This is a somewhat boring predicate function. It checks whether the mask
7989 /// array input, which is assumed to be a single-input shuffle mask of the kind
7990 /// used by the X86 shuffle instructions (not a fully general
7991 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7992 /// in-place shuffle are 'no-op's.
7993 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7994 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7995 assert(Mask[i] >= -1 && "Out of bound mask element!");
7996 if (Mask[i] >= 0 && Mask[i] != i)
8002 /// \brief Test whether there are elements crossing 128-bit lanes in this
8005 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
8006 /// and we routinely test for these.
8007 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
8008 int LaneSize = 128 / VT.getScalarSizeInBits();
8009 int Size = Mask.size();
8010 for (int i = 0; i < Size; ++i)
8011 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
8016 /// \brief Test whether a shuffle mask is equivalent within each sub-lane.
8018 /// This checks a shuffle mask to see if it is performing the same
8019 /// lane-relative shuffle in each sub-lane. This trivially implies
8020 /// that it is also not lane-crossing. It may however involve a blend from the
8021 /// same lane of a second vector.
8023 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
8024 /// non-trivial to compute in the face of undef lanes. The representation is
8025 /// suitable for use with existing 128-bit shuffles as entries from the second
8026 /// vector have been remapped to [LaneSize, 2*LaneSize).
8027 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
8029 SmallVectorImpl<int> &RepeatedMask) {
8030 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
8031 RepeatedMask.assign(LaneSize, -1);
8032 int Size = Mask.size();
8033 for (int i = 0; i < Size; ++i) {
8034 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
8037 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
8038 // This entry crosses lanes, so there is no way to model this shuffle.
8041 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
8042 // Adjust second vector indices to start at LaneSize instead of Size.
8043 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
8044 : Mask[i] % LaneSize + LaneSize;
8045 if (RepeatedMask[i % LaneSize] < 0)
8046 // This is the first non-undef entry in this slot of a 128-bit lane.
8047 RepeatedMask[i % LaneSize] = LocalM;
8048 else if (RepeatedMask[i % LaneSize] != LocalM)
8049 // Found a mismatch with the repeated mask.
8055 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
8057 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
8058 SmallVectorImpl<int> &RepeatedMask) {
8059 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
8062 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
8064 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
8065 SmallVectorImpl<int> &RepeatedMask) {
8066 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
8069 /// Test whether a target shuffle mask is equivalent within each sub-lane.
8070 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
8071 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
8073 SmallVectorImpl<int> &RepeatedMask) {
8074 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
8075 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
8076 int Size = Mask.size();
8077 for (int i = 0; i < Size; ++i) {
8078 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
8079 if (Mask[i] == SM_SentinelUndef)
8081 if (Mask[i] == SM_SentinelZero) {
8082 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
8084 RepeatedMask[i % LaneSize] = SM_SentinelZero;
8087 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
8088 // This entry crosses lanes, so there is no way to model this shuffle.
8091 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
8092 // Adjust second vector indices to start at LaneSize instead of Size.
8094 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
8095 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
8096 // This is the first non-undef entry in this slot of a 128-bit lane.
8097 RepeatedMask[i % LaneSize] = LocalM;
8098 else if (RepeatedMask[i % LaneSize] != LocalM)
8099 // Found a mismatch with the repeated mask.
8105 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
8108 /// This is a fast way to test a shuffle mask against a fixed pattern:
8110 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
8112 /// It returns true if the mask is exactly as wide as the argument list, and
8113 /// each element of the mask is either -1 (signifying undef) or the value given
8114 /// in the argument.
8115 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
8116 ArrayRef<int> ExpectedMask) {
8117 if (Mask.size() != ExpectedMask.size())
8120 int Size = Mask.size();
8122 // If the values are build vectors, we can look through them to find
8123 // equivalent inputs that make the shuffles equivalent.
8124 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
8125 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
8127 for (int i = 0; i < Size; ++i) {
8128 assert(Mask[i] >= -1 && "Out of bound mask element!");
8129 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
8130 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
8131 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
8132 if (!MaskBV || !ExpectedBV ||
8133 MaskBV->getOperand(Mask[i] % Size) !=
8134 ExpectedBV->getOperand(ExpectedMask[i] % Size))
8142 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
8144 /// The masks must be exactly the same width.
8146 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
8147 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
8149 /// SM_SentinelZero is accepted as a valid negative index but must match in both.
8150 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
8151 ArrayRef<int> ExpectedMask) {
8152 int Size = Mask.size();
8153 if (Size != (int)ExpectedMask.size())
8156 for (int i = 0; i < Size; ++i)
8157 if (Mask[i] == SM_SentinelUndef)
8159 else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
8161 else if (Mask[i] != ExpectedMask[i])
8167 // Merges a general DAG shuffle mask and zeroable bit mask into a target shuffle
8169 static SmallVector<int, 64> createTargetShuffleMask(ArrayRef<int> Mask,
8170 const APInt &Zeroable) {
8171 int NumElts = Mask.size();
8172 assert(NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes");
8174 SmallVector<int, 64> TargetMask(NumElts, SM_SentinelUndef);
8175 for (int i = 0; i != NumElts; ++i) {
8177 if (M == SM_SentinelUndef)
8179 assert(0 <= M && M < (2 * NumElts) && "Out of range shuffle index");
8180 TargetMask[i] = (Zeroable[i] ? SM_SentinelZero : M);
8185 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
8187 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
8188 if (VT != MVT::v8i32 && VT != MVT::v8f32)
8191 SmallVector<int, 8> Unpcklwd;
8192 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
8193 /* Unary = */ false);
8194 SmallVector<int, 8> Unpckhwd;
8195 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
8196 /* Unary = */ false);
8197 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
8198 isTargetShuffleEquivalent(Mask, Unpckhwd));
8199 return IsUnpackwdMask;
8202 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
8204 /// This helper function produces an 8-bit shuffle immediate corresponding to
8205 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
8206 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
8209 /// NB: We rely heavily on "undef" masks preserving the input lane.
8210 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
8211 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
8212 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
8213 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
8214 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
8215 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
8218 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
8219 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
8220 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
8221 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
8225 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
8226 SelectionDAG &DAG) {
8227 return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
8230 /// \brief Compute whether each element of a shuffle is zeroable.
8232 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
8233 /// Either it is an undef element in the shuffle mask, the element of the input
8234 /// referenced is undef, or the element of the input referenced is known to be
8235 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
8236 /// as many lanes with this technique as possible to simplify the remaining
8238 static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
8239 SDValue V1, SDValue V2) {
8240 APInt Zeroable(Mask.size(), 0);
8241 V1 = peekThroughBitcasts(V1);
8242 V2 = peekThroughBitcasts(V2);
8244 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
8245 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
8247 int VectorSizeInBits = V1.getValueSizeInBits();
8248 int ScalarSizeInBits = VectorSizeInBits / Mask.size();
8249 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
8251 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
8253 // Handle the easy cases.
8254 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
8259 // Determine shuffle input and normalize the mask.
8260 SDValue V = M < Size ? V1 : V2;
8263 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
8264 if (V.getOpcode() != ISD::BUILD_VECTOR)
8267 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
8268 // the (larger) source element must be UNDEF/ZERO.
8269 if ((Size % V.getNumOperands()) == 0) {
8270 int Scale = Size / V->getNumOperands();
8271 SDValue Op = V.getOperand(M / Scale);
8272 if (Op.isUndef() || X86::isZeroNode(Op))
8274 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
8275 APInt Val = Cst->getAPIntValue();
8276 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
8277 Val = Val.getLoBits(ScalarSizeInBits);
8280 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
8281 APInt Val = Cst->getValueAPF().bitcastToAPInt();
8282 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
8283 Val = Val.getLoBits(ScalarSizeInBits);
8290 // If the BUILD_VECTOR has more elements then all the (smaller) source
8291 // elements must be UNDEF or ZERO.
8292 if ((V.getNumOperands() % Size) == 0) {
8293 int Scale = V->getNumOperands() / Size;
8294 bool AllZeroable = true;
8295 for (int j = 0; j < Scale; ++j) {
8296 SDValue Op = V.getOperand((M * Scale) + j);
8297 AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
8308 // The Shuffle result is as follow:
8309 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
8310 // Each Zeroable's element correspond to a particular Mask's element.
8311 // As described in computeZeroableShuffleElements function.
8313 // The function looks for a sub-mask that the nonzero elements are in
8314 // increasing order. If such sub-mask exist. The function returns true.
8315 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
8316 ArrayRef<int> Mask, const EVT &VectorType,
8317 bool &IsZeroSideLeft) {
8318 int NextElement = -1;
8319 // Check if the Mask's nonzero elements are in increasing order.
8320 for (int i = 0, e = Mask.size(); i < e; i++) {
8321 // Checks if the mask's zeros elements are built from only zeros.
8322 assert(Mask[i] >= -1 && "Out of bound mask element!");
8327 // Find the lowest non zero element
8328 if (NextElement < 0) {
8329 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
8330 IsZeroSideLeft = NextElement != 0;
8332 // Exit if the mask's non zero elements are not in increasing order.
8333 if (NextElement != Mask[i])
8340 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
8341 static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
8342 ArrayRef<int> Mask, SDValue V1,
8344 const APInt &Zeroable,
8345 const X86Subtarget &Subtarget,
8346 SelectionDAG &DAG) {
8347 int Size = Mask.size();
8348 int LaneSize = 128 / VT.getScalarSizeInBits();
8349 const int NumBytes = VT.getSizeInBits() / 8;
8350 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
8352 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
8353 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
8354 (Subtarget.hasBWI() && VT.is512BitVector()));
8356 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
8357 // Sign bit set in i8 mask means zero element.
8358 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
8361 for (int i = 0; i < NumBytes; ++i) {
8362 int M = Mask[i / NumEltBytes];
8364 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
8367 if (Zeroable[i / NumEltBytes]) {
8368 PSHUFBMask[i] = ZeroMask;
8372 // We can only use a single input of V1 or V2.
8373 SDValue SrcV = (M >= Size ? V2 : V1);
8379 // PSHUFB can't cross lanes, ensure this doesn't happen.
8380 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
8384 M = M * NumEltBytes + (i % NumEltBytes);
8385 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
8387 assert(V && "Failed to find a source input");
8389 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
8390 return DAG.getBitcast(
8391 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
8392 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
8395 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
8396 const X86Subtarget &Subtarget, SelectionDAG &DAG,
8399 // X86 has dedicated shuffle that can be lowered to VEXPAND
8400 static SDValue lowerVectorShuffleToEXPAND(const SDLoc &DL, MVT VT,
8401 const APInt &Zeroable,
8402 ArrayRef<int> Mask, SDValue &V1,
8403 SDValue &V2, SelectionDAG &DAG,
8404 const X86Subtarget &Subtarget) {
8405 bool IsLeftZeroSide = true;
8406 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
8409 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
8411 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
8412 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
8413 unsigned NumElts = VT.getVectorNumElements();
8414 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
8415 "Unexpected number of vector elements");
8416 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
8417 Subtarget, DAG, DL);
8418 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
8419 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
8420 return DAG.getSelect(DL, VT, VMask,
8421 DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector),
8425 static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
8426 unsigned &UnpackOpcode, bool IsUnary,
8427 ArrayRef<int> TargetMask, SDLoc &DL,
8429 const X86Subtarget &Subtarget) {
8430 int NumElts = VT.getVectorNumElements();
8432 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
8433 for (int i = 0; i != NumElts; i += 2) {
8434 int M1 = TargetMask[i + 0];
8435 int M2 = TargetMask[i + 1];
8436 Undef1 &= (SM_SentinelUndef == M1);
8437 Undef2 &= (SM_SentinelUndef == M2);
8438 Zero1 &= isUndefOrZero(M1);
8439 Zero2 &= isUndefOrZero(M2);
8441 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
8442 "Zeroable shuffle detected");
8444 // Attempt to match the target mask against the unpack lo/hi mask patterns.
8445 SmallVector<int, 64> Unpckl, Unpckh;
8446 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
8447 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
8448 UnpackOpcode = X86ISD::UNPCKL;
8449 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
8450 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
8454 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
8455 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
8456 UnpackOpcode = X86ISD::UNPCKH;
8457 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
8458 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
8462 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
8463 if (IsUnary && (Zero1 || Zero2)) {
8464 // Don't bother if we can blend instead.
8465 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
8466 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
8469 bool MatchLo = true, MatchHi = true;
8470 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
8471 int M = TargetMask[i];
8473 // Ignore if the input is known to be zero or the index is undef.
8474 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
8475 (M == SM_SentinelUndef))
8478 MatchLo &= (M == Unpckl[i]);
8479 MatchHi &= (M == Unpckh[i]);
8482 if (MatchLo || MatchHi) {
8483 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
8484 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
8485 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
8490 // If a binary shuffle, commute and try again.
8492 ShuffleVectorSDNode::commuteMask(Unpckl);
8493 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
8494 UnpackOpcode = X86ISD::UNPCKL;
8499 ShuffleVectorSDNode::commuteMask(Unpckh);
8500 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
8501 UnpackOpcode = X86ISD::UNPCKH;
8510 // X86 has dedicated unpack instructions that can handle specific blend
8511 // operations: UNPCKH and UNPCKL.
8512 static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT,
8513 ArrayRef<int> Mask, SDValue V1,
8514 SDValue V2, SelectionDAG &DAG) {
8515 SmallVector<int, 8> Unpckl;
8516 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
8517 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
8518 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
8520 SmallVector<int, 8> Unpckh;
8521 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
8522 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
8523 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
8525 // Commute and try again.
8526 ShuffleVectorSDNode::commuteMask(Unpckl);
8527 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
8528 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
8530 ShuffleVectorSDNode::commuteMask(Unpckh);
8531 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
8532 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
8537 /// \brief Try to emit a bitmask instruction for a shuffle.
8539 /// This handles cases where we can model a blend exactly as a bitmask due to
8540 /// one of the inputs being zeroable.
8541 static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
8542 SDValue V2, ArrayRef<int> Mask,
8543 const APInt &Zeroable,
8544 SelectionDAG &DAG) {
8545 assert(!VT.isFloatingPoint() && "Floating point types are not supported");
8546 MVT EltVT = VT.getVectorElementType();
8547 SDValue Zero = DAG.getConstant(0, DL, EltVT);
8548 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
8549 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
8551 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
8554 if (Mask[i] % Size != i)
8555 return SDValue(); // Not a blend.
8557 V = Mask[i] < Size ? V1 : V2;
8558 else if (V != (Mask[i] < Size ? V1 : V2))
8559 return SDValue(); // Can only let one input through the mask.
8561 VMaskOps[i] = AllOnes;
8564 return SDValue(); // No non-zeroable elements!
8566 SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
8567 return DAG.getNode(ISD::AND, DL, VT, V, VMask);
8570 /// \brief Try to emit a blend instruction for a shuffle using bit math.
8572 /// This is used as a fallback approach when first class blend instructions are
8573 /// unavailable. Currently it is only suitable for integer vectors, but could
8574 /// be generalized for floating point vectors if desirable.
8575 static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
8576 SDValue V2, ArrayRef<int> Mask,
8577 SelectionDAG &DAG) {
8578 assert(VT.isInteger() && "Only supports integer vector types!");
8579 MVT EltVT = VT.getVectorElementType();
8580 SDValue Zero = DAG.getConstant(0, DL, EltVT);
8581 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
8582 SmallVector<SDValue, 16> MaskOps;
8583 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
8584 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
8585 return SDValue(); // Shuffled input!
8586 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
8589 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
8590 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
8591 // We have to cast V2 around.
8592 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
8593 V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
8594 DAG.getBitcast(MaskVT, V1Mask),
8595 DAG.getBitcast(MaskVT, V2)));
8596 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
8599 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
8600 SDValue PreservedSrc,
8601 const X86Subtarget &Subtarget,
8604 static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
8605 MutableArrayRef<int> TargetMask,
8606 bool &ForceV1Zero, bool &ForceV2Zero,
8607 uint64_t &BlendMask) {
8608 bool V1IsZeroOrUndef =
8609 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
8610 bool V2IsZeroOrUndef =
8611 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
8614 ForceV1Zero = false, ForceV2Zero = false;
8615 assert(TargetMask.size() <= 64 && "Shuffle mask too big for blend mask");
8617 // Attempt to generate the binary blend mask. If an input is zero then
8618 // we can use any lane.
8619 // TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
8620 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
8621 int M = TargetMask[i];
8622 if (M == SM_SentinelUndef)
8626 if (M == i + Size) {
8627 BlendMask |= 1ull << i;
8630 if (M == SM_SentinelZero) {
8631 if (V1IsZeroOrUndef) {
8636 if (V2IsZeroOrUndef) {
8638 BlendMask |= 1ull << i;
8639 TargetMask[i] = i + Size;
8648 uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size, int Scale) {
8649 uint64_t ScaledMask = 0;
8650 for (int i = 0; i != Size; ++i)
8651 if (BlendMask & (1ull << i))
8652 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
8656 /// \brief Try to emit a blend instruction for a shuffle.
8658 /// This doesn't do any checks for the availability of instructions for blending
8659 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
8660 /// be matched in the backend with the type given. What it does check for is
8661 /// that the shuffle mask is a blend, or convertible into a blend with zero.
8662 static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
8663 SDValue V2, ArrayRef<int> Original,
8664 const APInt &Zeroable,
8665 const X86Subtarget &Subtarget,
8666 SelectionDAG &DAG) {
8667 SmallVector<int, 64> Mask = createTargetShuffleMask(Original, Zeroable);
8669 uint64_t BlendMask = 0;
8670 bool ForceV1Zero = false, ForceV2Zero = false;
8671 if (!matchVectorShuffleAsBlend(V1, V2, Mask, ForceV1Zero, ForceV2Zero,
8675 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
8677 V1 = getZeroVector(VT, Subtarget, DAG, DL);
8679 V2 = getZeroVector(VT, Subtarget, DAG, DL);
8681 switch (VT.SimpleTy) {
8686 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
8687 DAG.getConstant(BlendMask, DL, MVT::i8));
8691 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
8695 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
8696 // that instruction.
8697 if (Subtarget.hasAVX2()) {
8698 // Scale the blend by the number of 32-bit dwords per element.
8699 int Scale = VT.getScalarSizeInBits() / 32;
8700 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale);
8701 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
8702 V1 = DAG.getBitcast(BlendVT, V1);
8703 V2 = DAG.getBitcast(BlendVT, V2);
8704 return DAG.getBitcast(
8705 VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
8706 DAG.getConstant(BlendMask, DL, MVT::i8)));
8710 // For integer shuffles we need to expand the mask and cast the inputs to
8711 // v8i16s prior to blending.
8712 int Scale = 8 / VT.getVectorNumElements();
8713 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale);
8714 V1 = DAG.getBitcast(MVT::v8i16, V1);
8715 V2 = DAG.getBitcast(MVT::v8i16, V2);
8716 return DAG.getBitcast(VT,
8717 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
8718 DAG.getConstant(BlendMask, DL, MVT::i8)));
8722 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
8723 SmallVector<int, 8> RepeatedMask;
8724 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
8725 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
8726 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
8728 for (int i = 0; i < 8; ++i)
8729 if (RepeatedMask[i] >= 8)
8730 BlendMask |= 1ull << i;
8731 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
8732 DAG.getConstant(BlendMask, DL, MVT::i8));
8738 assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
8739 "256-bit byte-blends require AVX2 support!");
8741 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
8743 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
8744 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
8745 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
8748 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
8749 if (SDValue Masked =
8750 lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG))
8753 // Scale the blend by the number of bytes per element.
8754 int Scale = VT.getScalarSizeInBits() / 8;
8756 // This form of blend is always done on bytes. Compute the byte vector
8758 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
8760 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
8761 // mix of LLVM's code generator and the x86 backend. We tell the code
8762 // generator that boolean values in the elements of an x86 vector register
8763 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
8764 // mapping a select to operand #1, and 'false' mapping to operand #2. The
8765 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
8766 // of the element (the remaining are ignored) and 0 in that high bit would
8767 // mean operand #1 while 1 in the high bit would mean operand #2. So while
8768 // the LLVM model for boolean values in vector elements gets the relevant
8769 // bit set, it is set backwards and over constrained relative to x86's
8771 SmallVector<SDValue, 32> VSELECTMask;
8772 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8773 for (int j = 0; j < Scale; ++j)
8774 VSELECTMask.push_back(
8775 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
8776 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
8779 V1 = DAG.getBitcast(BlendVT, V1);
8780 V2 = DAG.getBitcast(BlendVT, V2);
8781 return DAG.getBitcast(
8783 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
8793 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
8794 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
8795 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
8798 llvm_unreachable("Not a supported integer vector type!");
8802 /// \brief Try to lower as a blend of elements from two inputs followed by
8803 /// a single-input permutation.
8805 /// This matches the pattern where we can blend elements from two inputs and
8806 /// then reduce the shuffle to a single-input permutation.
8807 static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
8808 SDValue V1, SDValue V2,
8810 SelectionDAG &DAG) {
8811 // We build up the blend mask while checking whether a blend is a viable way
8812 // to reduce the shuffle.
8813 SmallVector<int, 32> BlendMask(Mask.size(), -1);
8814 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
8816 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
8820 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
8822 if (BlendMask[Mask[i] % Size] < 0)
8823 BlendMask[Mask[i] % Size] = Mask[i];
8824 else if (BlendMask[Mask[i] % Size] != Mask[i])
8825 return SDValue(); // Can't blend in the needed input!
8827 PermuteMask[i] = Mask[i] % Size;
8830 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
8831 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
8834 /// \brief Generic routine to decompose a shuffle and blend into independent
8835 /// blends and permutes.
8837 /// This matches the extremely common pattern for handling combined
8838 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
8839 /// operations. It will try to pick the best arrangement of shuffles and
8841 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(const SDLoc &DL,
8845 SelectionDAG &DAG) {
8846 // Shuffle the input elements into the desired positions in V1 and V2 and
8847 // blend them together.
8848 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8849 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8850 SmallVector<int, 32> BlendMask(Mask.size(), -1);
8851 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8852 if (Mask[i] >= 0 && Mask[i] < Size) {
8853 V1Mask[i] = Mask[i];
8855 } else if (Mask[i] >= Size) {
8856 V2Mask[i] = Mask[i] - Size;
8857 BlendMask[i] = i + Size;
8860 // Try to lower with the simpler initial blend strategy unless one of the
8861 // input shuffles would be a no-op. We prefer to shuffle inputs as the
8862 // shuffle may be able to fold with a load or other benefit. However, when
8863 // we'll have to do 2x as many shuffles in order to achieve this, blending
8864 // first is a better strategy.
8865 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
8866 if (SDValue BlendPerm =
8867 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
8870 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8871 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8872 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
8875 /// \brief Try to lower a vector shuffle as a rotation.
8877 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
8878 static int matchVectorShuffleAsRotate(SDValue &V1, SDValue &V2,
8879 ArrayRef<int> Mask) {
8880 int NumElts = Mask.size();
8882 // We need to detect various ways of spelling a rotation:
8883 // [11, 12, 13, 14, 15, 0, 1, 2]
8884 // [-1, 12, 13, 14, -1, -1, 1, -1]
8885 // [-1, -1, -1, -1, -1, -1, 1, 2]
8886 // [ 3, 4, 5, 6, 7, 8, 9, 10]
8887 // [-1, 4, 5, 6, -1, -1, 9, -1]
8888 // [-1, 4, 5, 6, -1, -1, -1, -1]
8891 for (int i = 0; i < NumElts; ++i) {
8893 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
8894 "Unexpected mask index.");
8898 // Determine where a rotated vector would have started.
8899 int StartIdx = i - (M % NumElts);
8901 // The identity rotation isn't interesting, stop.
8904 // If we found the tail of a vector the rotation must be the missing
8905 // front. If we found the head of a vector, it must be how much of the
8907 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
8910 Rotation = CandidateRotation;
8911 else if (Rotation != CandidateRotation)
8912 // The rotations don't match, so we can't match this mask.
8915 // Compute which value this mask is pointing at.
8916 SDValue MaskV = M < NumElts ? V1 : V2;
8918 // Compute which of the two target values this index should be assigned
8919 // to. This reflects whether the high elements are remaining or the low
8920 // elements are remaining.
8921 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
8923 // Either set up this value if we've not encountered it before, or check
8924 // that it remains consistent.
8927 else if (TargetV != MaskV)
8928 // This may be a rotation, but it pulls from the inputs in some
8929 // unsupported interleaving.
8933 // Check that we successfully analyzed the mask, and normalize the results.
8934 assert(Rotation != 0 && "Failed to locate a viable rotation!");
8935 assert((Lo || Hi) && "Failed to find a rotated input vector!");
8947 /// \brief Try to lower a vector shuffle as a byte rotation.
8949 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
8950 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
8951 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
8952 /// try to generically lower a vector shuffle through such an pattern. It
8953 /// does not check for the profitability of lowering either as PALIGNR or
8954 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
8955 /// This matches shuffle vectors that look like:
8957 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
8959 /// Essentially it concatenates V1 and V2, shifts right by some number of
8960 /// elements, and takes the low elements as the result. Note that while this is
8961 /// specified as a *right shift* because x86 is little-endian, it is a *left
8962 /// rotate* of the vector lanes.
8963 static int matchVectorShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
8964 ArrayRef<int> Mask) {
8965 // Don't accept any shuffles with zero elements.
8966 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
8969 // PALIGNR works on 128-bit lanes.
8970 SmallVector<int, 16> RepeatedMask;
8971 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
8974 int Rotation = matchVectorShuffleAsRotate(V1, V2, RepeatedMask);
8978 // PALIGNR rotates bytes, so we need to scale the
8979 // rotation based on how many bytes are in the vector lane.
8980 int NumElts = RepeatedMask.size();
8981 int Scale = 16 / NumElts;
8982 return Rotation * Scale;
8985 static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT,
8986 SDValue V1, SDValue V2,
8988 const X86Subtarget &Subtarget,
8989 SelectionDAG &DAG) {
8990 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
8992 SDValue Lo = V1, Hi = V2;
8993 int ByteRotation = matchVectorShuffleAsByteRotate(VT, Lo, Hi, Mask);
8994 if (ByteRotation <= 0)
8997 // Cast the inputs to i8 vector of correct length to match PALIGNR or
8999 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
9000 Lo = DAG.getBitcast(ByteVT, Lo);
9001 Hi = DAG.getBitcast(ByteVT, Hi);
9003 // SSSE3 targets can use the palignr instruction.
9004 if (Subtarget.hasSSSE3()) {
9005 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
9006 "512-bit PALIGNR requires BWI instructions");
9007 return DAG.getBitcast(
9008 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
9009 DAG.getConstant(ByteRotation, DL, MVT::i8)));
9012 assert(VT.is128BitVector() &&
9013 "Rotate-based lowering only supports 128-bit lowering!");
9014 assert(Mask.size() <= 16 &&
9015 "Can shuffle at most 16 bytes in a 128-bit vector!");
9016 assert(ByteVT == MVT::v16i8 &&
9017 "SSE2 rotate lowering only needed for v16i8!");
9019 // Default SSE2 implementation
9020 int LoByteShift = 16 - ByteRotation;
9021 int HiByteShift = ByteRotation;
9023 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
9024 DAG.getConstant(LoByteShift, DL, MVT::i8));
9025 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
9026 DAG.getConstant(HiByteShift, DL, MVT::i8));
9027 return DAG.getBitcast(VT,
9028 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
9031 /// \brief Try to lower a vector shuffle as a dword/qword rotation.
9033 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
9034 /// rotation of the concatenation of two vectors; This routine will
9035 /// try to generically lower a vector shuffle through such an pattern.
9037 /// Essentially it concatenates V1 and V2, shifts right by some number of
9038 /// elements, and takes the low elements as the result. Note that while this is
9039 /// specified as a *right shift* because x86 is little-endian, it is a *left
9040 /// rotate* of the vector lanes.
9041 static SDValue lowerVectorShuffleAsRotate(const SDLoc &DL, MVT VT,
9042 SDValue V1, SDValue V2,
9044 const X86Subtarget &Subtarget,
9045 SelectionDAG &DAG) {
9046 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
9047 "Only 32-bit and 64-bit elements are supported!");
9049 // 128/256-bit vectors are only supported with VLX.
9050 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
9051 && "VLX required for 128/256-bit vectors");
9053 SDValue Lo = V1, Hi = V2;
9054 int Rotation = matchVectorShuffleAsRotate(Lo, Hi, Mask);
9058 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
9059 DAG.getConstant(Rotation, DL, MVT::i8));
9062 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
9064 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
9065 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
9066 /// matches elements from one of the input vectors shuffled to the left or
9067 /// right with zeroable elements 'shifted in'. It handles both the strictly
9068 /// bit-wise element shifts and the byte shift across an entire 128-bit double
9071 /// PSHL : (little-endian) left bit shift.
9072 /// [ zz, 0, zz, 2 ]
9073 /// [ -1, 4, zz, -1 ]
9074 /// PSRL : (little-endian) right bit shift.
9076 /// [ -1, -1, 7, zz]
9077 /// PSLLDQ : (little-endian) left byte shift
9078 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
9079 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
9080 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
9081 /// PSRLDQ : (little-endian) right byte shift
9082 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
9083 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
9084 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
9085 static int matchVectorShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
9086 unsigned ScalarSizeInBits,
9087 ArrayRef<int> Mask, int MaskOffset,
9088 const APInt &Zeroable,
9089 const X86Subtarget &Subtarget) {
9090 int Size = Mask.size();
9091 unsigned SizeInBits = Size * ScalarSizeInBits;
9093 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
9094 for (int i = 0; i < Size; i += Scale)
9095 for (int j = 0; j < Shift; ++j)
9096 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
9102 auto MatchShift = [&](int Shift, int Scale, bool Left) {
9103 for (int i = 0; i != Size; i += Scale) {
9104 unsigned Pos = Left ? i + Shift : i;
9105 unsigned Low = Left ? i : i + Shift;
9106 unsigned Len = Scale - Shift;
9107 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
9111 int ShiftEltBits = ScalarSizeInBits * Scale;
9112 bool ByteShift = ShiftEltBits > 64;
9113 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
9114 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
9115 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
9117 // Normalize the scale for byte shifts to still produce an i64 element
9119 Scale = ByteShift ? Scale / 2 : Scale;
9121 // We need to round trip through the appropriate type for the shift.
9122 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
9123 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
9124 : MVT::getVectorVT(ShiftSVT, Size / Scale);
9125 return (int)ShiftAmt;
9128 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
9129 // keep doubling the size of the integer elements up to that. We can
9130 // then shift the elements of the integer vector by whole multiples of
9131 // their width within the elements of the larger integer vector. Test each
9132 // multiple to see if we can find a match with the moved element indices
9133 // and that the shifted in elements are all zeroable.
9134 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
9135 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
9136 for (int Shift = 1; Shift != Scale; ++Shift)
9137 for (bool Left : {true, false})
9138 if (CheckZeros(Shift, Scale, Left)) {
9139 int ShiftAmt = MatchShift(Shift, Scale, Left);
9148 static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
9149 SDValue V2, ArrayRef<int> Mask,
9150 const APInt &Zeroable,
9151 const X86Subtarget &Subtarget,
9152 SelectionDAG &DAG) {
9153 int Size = Mask.size();
9154 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
9160 // Try to match shuffle against V1 shift.
9161 int ShiftAmt = matchVectorShuffleAsShift(
9162 ShiftVT, Opcode, VT.getScalarSizeInBits(), Mask, 0, Zeroable, Subtarget);
9164 // If V1 failed, try to match shuffle against V2 shift.
9167 matchVectorShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
9168 Mask, Size, Zeroable, Subtarget);
9175 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
9176 "Illegal integer vector type");
9177 V = DAG.getBitcast(ShiftVT, V);
9178 V = DAG.getNode(Opcode, DL, ShiftVT, V,
9179 DAG.getConstant(ShiftAmt, DL, MVT::i8));
9180 return DAG.getBitcast(VT, V);
9183 /// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
9184 static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
9185 SDValue V2, ArrayRef<int> Mask,
9186 const APInt &Zeroable,
9187 SelectionDAG &DAG) {
9188 int Size = Mask.size();
9189 int HalfSize = Size / 2;
9190 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
9191 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
9193 // Upper half must be undefined.
9194 if (!isUndefInRange(Mask, HalfSize, HalfSize))
9197 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
9198 // Remainder of lower half result is zero and upper half is all undef.
9199 auto LowerAsEXTRQ = [&]() {
9200 // Determine the extraction length from the part of the
9201 // lower half that isn't zeroable.
9203 for (; Len > 0; --Len)
9204 if (!Zeroable[Len - 1])
9206 assert(Len > 0 && "Zeroable shuffle mask");
9208 // Attempt to match first Len sequential elements from the lower half.
9211 for (int i = 0; i != Len; ++i) {
9215 SDValue &V = (M < Size ? V1 : V2);
9218 // The extracted elements must start at a valid index and all mask
9219 // elements must be in the lower half.
9220 if (i > M || M >= HalfSize)
9223 if (Idx < 0 || (Src == V && Idx == (M - i))) {
9234 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
9235 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
9236 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
9237 return DAG.getNode(X86ISD::EXTRQI, DL, VT, Src,
9238 DAG.getConstant(BitLen, DL, MVT::i8),
9239 DAG.getConstant(BitIdx, DL, MVT::i8));
9242 if (SDValue ExtrQ = LowerAsEXTRQ())
9245 // INSERTQ: Extract lowest Len elements from lower half of second source and
9246 // insert over first source, starting at Idx.
9247 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
9248 auto LowerAsInsertQ = [&]() {
9249 for (int Idx = 0; Idx != HalfSize; ++Idx) {
9252 // Attempt to match first source from mask before insertion point.
9253 if (isUndefInRange(Mask, 0, Idx)) {
9255 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
9257 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
9263 // Extend the extraction length looking to match both the insertion of
9264 // the second source and the remaining elements of the first.
9265 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
9270 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
9272 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
9278 // Match the remaining elements of the lower half.
9279 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
9281 } else if ((!Base || (Base == V1)) &&
9282 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
9284 } else if ((!Base || (Base == V2)) &&
9285 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
9292 // We may not have a base (first source) - this can safely be undefined.
9294 Base = DAG.getUNDEF(VT);
9296 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
9297 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
9298 return DAG.getNode(X86ISD::INSERTQI, DL, VT, Base, Insert,
9299 DAG.getConstant(BitLen, DL, MVT::i8),
9300 DAG.getConstant(BitIdx, DL, MVT::i8));
9307 if (SDValue InsertQ = LowerAsInsertQ())
9313 /// \brief Lower a vector shuffle as a zero or any extension.
9315 /// Given a specific number of elements, element bit width, and extension
9316 /// stride, produce either a zero or any extension based on the available
9317 /// features of the subtarget. The extended elements are consecutive and
9318 /// begin and can start from an offsetted element index in the input; to
9319 /// avoid excess shuffling the offset must either being in the bottom lane
9320 /// or at the start of a higher lane. All extended elements must be from
9322 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
9323 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
9324 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
9325 assert(Scale > 1 && "Need a scale to extend.");
9326 int EltBits = VT.getScalarSizeInBits();
9327 int NumElements = VT.getVectorNumElements();
9328 int NumEltsPerLane = 128 / EltBits;
9329 int OffsetLane = Offset / NumEltsPerLane;
9330 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
9331 "Only 8, 16, and 32 bit elements can be extended.");
9332 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
9333 assert(0 <= Offset && "Extension offset must be positive.");
9334 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
9335 "Extension offset must be in the first lane or start an upper lane.");
9337 // Check that an index is in same lane as the base offset.
9338 auto SafeOffset = [&](int Idx) {
9339 return OffsetLane == (Idx / NumEltsPerLane);
9342 // Shift along an input so that the offset base moves to the first element.
9343 auto ShuffleOffset = [&](SDValue V) {
9347 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
9348 for (int i = 0; i * Scale < NumElements; ++i) {
9349 int SrcIdx = i + Offset;
9350 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
9352 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
9355 // Found a valid zext mask! Try various lowering strategies based on the
9356 // input type and available ISA extensions.
9357 if (Subtarget.hasSSE41()) {
9358 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
9359 // PUNPCK will catch this in a later shuffle match.
9360 if (Offset && Scale == 2 && VT.is128BitVector())
9362 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
9363 NumElements / Scale);
9364 InputV = ShuffleOffset(InputV);
9365 InputV = getExtendInVec(X86ISD::VZEXT, DL, ExtVT, InputV, DAG);
9366 return DAG.getBitcast(VT, InputV);
9369 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
9371 // For any extends we can cheat for larger element sizes and use shuffle
9372 // instructions that can fold with a load and/or copy.
9373 if (AnyExt && EltBits == 32) {
9374 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
9376 return DAG.getBitcast(
9377 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9378 DAG.getBitcast(MVT::v4i32, InputV),
9379 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
9381 if (AnyExt && EltBits == 16 && Scale > 2) {
9382 int PSHUFDMask[4] = {Offset / 2, -1,
9383 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
9384 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9385 DAG.getBitcast(MVT::v4i32, InputV),
9386 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
9387 int PSHUFWMask[4] = {1, -1, -1, -1};
9388 unsigned OddEvenOp = (Offset & 1 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW);
9389 return DAG.getBitcast(
9390 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
9391 DAG.getBitcast(MVT::v8i16, InputV),
9392 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
9395 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
9397 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
9398 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
9399 assert(VT.is128BitVector() && "Unexpected vector width!");
9401 int LoIdx = Offset * EltBits;
9402 SDValue Lo = DAG.getBitcast(
9403 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
9404 DAG.getConstant(EltBits, DL, MVT::i8),
9405 DAG.getConstant(LoIdx, DL, MVT::i8)));
9407 if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
9408 !SafeOffset(Offset + 1))
9409 return DAG.getBitcast(VT, Lo);
9411 int HiIdx = (Offset + 1) * EltBits;
9412 SDValue Hi = DAG.getBitcast(
9413 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
9414 DAG.getConstant(EltBits, DL, MVT::i8),
9415 DAG.getConstant(HiIdx, DL, MVT::i8)));
9416 return DAG.getBitcast(VT,
9417 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
9420 // If this would require more than 2 unpack instructions to expand, use
9421 // pshufb when available. We can only use more than 2 unpack instructions
9422 // when zero extending i8 elements which also makes it easier to use pshufb.
9423 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
9424 assert(NumElements == 16 && "Unexpected byte vector width!");
9425 SDValue PSHUFBMask[16];
9426 for (int i = 0; i < 16; ++i) {
9427 int Idx = Offset + (i / Scale);
9428 PSHUFBMask[i] = DAG.getConstant(
9429 (i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
9431 InputV = DAG.getBitcast(MVT::v16i8, InputV);
9432 return DAG.getBitcast(
9433 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
9434 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
9437 // If we are extending from an offset, ensure we start on a boundary that
9438 // we can unpack from.
9439 int AlignToUnpack = Offset % (NumElements / Scale);
9440 if (AlignToUnpack) {
9441 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
9442 for (int i = AlignToUnpack; i < NumElements; ++i)
9443 ShMask[i - AlignToUnpack] = i;
9444 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
9445 Offset -= AlignToUnpack;
9448 // Otherwise emit a sequence of unpacks.
9450 unsigned UnpackLoHi = X86ISD::UNPCKL;
9451 if (Offset >= (NumElements / 2)) {
9452 UnpackLoHi = X86ISD::UNPCKH;
9453 Offset -= (NumElements / 2);
9456 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
9457 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
9458 : getZeroVector(InputVT, Subtarget, DAG, DL);
9459 InputV = DAG.getBitcast(InputVT, InputV);
9460 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
9464 } while (Scale > 1);
9465 return DAG.getBitcast(VT, InputV);
9468 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
9470 /// This routine will try to do everything in its power to cleverly lower
9471 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
9472 /// check for the profitability of this lowering, it tries to aggressively
9473 /// match this pattern. It will use all of the micro-architectural details it
9474 /// can to emit an efficient lowering. It handles both blends with all-zero
9475 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
9476 /// masking out later).
9478 /// The reason we have dedicated lowering for zext-style shuffles is that they
9479 /// are both incredibly common and often quite performance sensitive.
9480 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
9481 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
9482 const APInt &Zeroable, const X86Subtarget &Subtarget,
9483 SelectionDAG &DAG) {
9484 int Bits = VT.getSizeInBits();
9485 int NumLanes = Bits / 128;
9486 int NumElements = VT.getVectorNumElements();
9487 int NumEltsPerLane = NumElements / NumLanes;
9488 assert(VT.getScalarSizeInBits() <= 32 &&
9489 "Exceeds 32-bit integer zero extension limit");
9490 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
9492 // Define a helper function to check a particular ext-scale and lower to it if
9494 auto Lower = [&](int Scale) -> SDValue {
9499 for (int i = 0; i < NumElements; ++i) {
9502 continue; // Valid anywhere but doesn't tell us anything.
9503 if (i % Scale != 0) {
9504 // Each of the extended elements need to be zeroable.
9508 // We no longer are in the anyext case.
9513 // Each of the base elements needs to be consecutive indices into the
9514 // same input vector.
9515 SDValue V = M < NumElements ? V1 : V2;
9516 M = M % NumElements;
9519 Offset = M - (i / Scale);
9520 } else if (InputV != V)
9521 return SDValue(); // Flip-flopping inputs.
9523 // Offset must start in the lowest 128-bit lane or at the start of an
9525 // FIXME: Is it ever worth allowing a negative base offset?
9526 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
9527 (Offset % NumEltsPerLane) == 0))
9530 // If we are offsetting, all referenced entries must come from the same
9532 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
9535 if ((M % NumElements) != (Offset + (i / Scale)))
9536 return SDValue(); // Non-consecutive strided elements.
9540 // If we fail to find an input, we have a zero-shuffle which should always
9541 // have already been handled.
9542 // FIXME: Maybe handle this here in case during blending we end up with one?
9546 // If we are offsetting, don't extend if we only match a single input, we
9547 // can always do better by using a basic PSHUF or PUNPCK.
9548 if (Offset != 0 && Matches < 2)
9551 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
9552 DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG);
9555 // The widest scale possible for extending is to a 64-bit integer.
9556 assert(Bits % 64 == 0 &&
9557 "The number of bits in a vector must be divisible by 64 on x86!");
9558 int NumExtElements = Bits / 64;
9560 // Each iteration, try extending the elements half as much, but into twice as
9562 for (; NumExtElements < NumElements; NumExtElements *= 2) {
9563 assert(NumElements % NumExtElements == 0 &&
9564 "The input vector size must be divisible by the extended size.");
9565 if (SDValue V = Lower(NumElements / NumExtElements))
9569 // General extends failed, but 128-bit vectors may be able to use MOVQ.
9573 // Returns one of the source operands if the shuffle can be reduced to a
9574 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
9575 auto CanZExtLowHalf = [&]() {
9576 for (int i = NumElements / 2; i != NumElements; ++i)
9579 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
9581 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
9586 if (SDValue V = CanZExtLowHalf()) {
9587 V = DAG.getBitcast(MVT::v2i64, V);
9588 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
9589 return DAG.getBitcast(VT, V);
9592 // No viable ext lowering found.
9596 /// \brief Try to get a scalar value for a specific element of a vector.
9598 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
9599 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
9600 SelectionDAG &DAG) {
9601 MVT VT = V.getSimpleValueType();
9602 MVT EltVT = VT.getVectorElementType();
9603 V = peekThroughBitcasts(V);
9605 // If the bitcasts shift the element size, we can't extract an equivalent
9607 MVT NewVT = V.getSimpleValueType();
9608 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
9611 if (V.getOpcode() == ISD::BUILD_VECTOR ||
9612 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
9613 // Ensure the scalar operand is the same size as the destination.
9614 // FIXME: Add support for scalar truncation where possible.
9615 SDValue S = V.getOperand(Idx);
9616 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
9617 return DAG.getBitcast(EltVT, S);
9623 /// \brief Helper to test for a load that can be folded with x86 shuffles.
9625 /// This is particularly important because the set of instructions varies
9626 /// significantly based on whether the operand is a load or not.
9627 static bool isShuffleFoldableLoad(SDValue V) {
9628 V = peekThroughBitcasts(V);
9629 return ISD::isNON_EXTLoad(V.getNode());
9632 /// \brief Try to lower insertion of a single element into a zero vector.
9634 /// This is a common pattern that we have especially efficient patterns to lower
9635 /// across all subtarget feature sets.
9636 static SDValue lowerVectorShuffleAsElementInsertion(
9637 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
9638 const APInt &Zeroable, const X86Subtarget &Subtarget,
9639 SelectionDAG &DAG) {
9641 MVT EltVT = VT.getVectorElementType();
9644 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
9646 bool IsV1Zeroable = true;
9647 for (int i = 0, Size = Mask.size(); i < Size; ++i)
9648 if (i != V2Index && !Zeroable[i]) {
9649 IsV1Zeroable = false;
9653 // Check for a single input from a SCALAR_TO_VECTOR node.
9654 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
9655 // all the smarts here sunk into that routine. However, the current
9656 // lowering of BUILD_VECTOR makes that nearly impossible until the old
9657 // vector shuffle lowering is dead.
9658 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
9660 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
9661 // We need to zext the scalar if it is smaller than an i32.
9662 V2S = DAG.getBitcast(EltVT, V2S);
9663 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
9664 // Using zext to expand a narrow element won't work for non-zero
9669 // Zero-extend directly to i32.
9671 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
9673 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
9674 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
9675 EltVT == MVT::i16) {
9676 // Either not inserting from the low element of the input or the input
9677 // element size is too small to use VZEXT_MOVL to clear the high bits.
9681 if (!IsV1Zeroable) {
9682 // If V1 can't be treated as a zero vector we have fewer options to lower
9683 // this. We can't support integer vectors or non-zero targets cheaply, and
9684 // the V1 elements can't be permuted in any way.
9685 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
9686 if (!VT.isFloatingPoint() || V2Index != 0)
9688 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
9689 V1Mask[V2Index] = -1;
9690 if (!isNoopShuffleMask(V1Mask))
9692 // This is essentially a special case blend operation, but if we have
9693 // general purpose blend operations, they are always faster. Bail and let
9694 // the rest of the lowering handle these as blends.
9695 if (Subtarget.hasSSE41())
9698 // Otherwise, use MOVSD or MOVSS.
9699 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
9700 "Only two types of floating point element types to handle!");
9701 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
9705 // This lowering only works for the low element with floating point vectors.
9706 if (VT.isFloatingPoint() && V2Index != 0)
9709 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
9711 V2 = DAG.getBitcast(VT, V2);
9714 // If we have 4 or fewer lanes we can cheaply shuffle the element into
9715 // the desired position. Otherwise it is more efficient to do a vector
9716 // shift left. We know that we can do a vector shift left because all
9717 // the inputs are zero.
9718 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
9719 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
9720 V2Shuffle[V2Index] = 0;
9721 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
9723 V2 = DAG.getBitcast(MVT::v16i8, V2);
9725 X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
9726 DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL,
9727 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(
9728 DAG.getDataLayout(), VT)));
9729 V2 = DAG.getBitcast(VT, V2);
9735 /// Try to lower broadcast of a single - truncated - integer element,
9736 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
9738 /// This assumes we have AVX2.
9739 static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT,
9740 SDValue V0, int BroadcastIdx,
9741 const X86Subtarget &Subtarget,
9742 SelectionDAG &DAG) {
9743 assert(Subtarget.hasAVX2() &&
9744 "We can only lower integer broadcasts with AVX2!");
9746 EVT EltVT = VT.getVectorElementType();
9747 EVT V0VT = V0.getValueType();
9749 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
9750 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
9752 EVT V0EltVT = V0VT.getVectorElementType();
9753 if (!V0EltVT.isInteger())
9756 const unsigned EltSize = EltVT.getSizeInBits();
9757 const unsigned V0EltSize = V0EltVT.getSizeInBits();
9759 // This is only a truncation if the original element type is larger.
9760 if (V0EltSize <= EltSize)
9763 assert(((V0EltSize % EltSize) == 0) &&
9764 "Scalar type sizes must all be powers of 2 on x86!");
9766 const unsigned V0Opc = V0.getOpcode();
9767 const unsigned Scale = V0EltSize / EltSize;
9768 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
9770 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
9771 V0Opc != ISD::BUILD_VECTOR)
9774 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
9776 // If we're extracting non-least-significant bits, shift so we can truncate.
9777 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
9778 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
9779 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
9780 if (const int OffsetIdx = BroadcastIdx % Scale)
9781 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
9782 DAG.getConstant(OffsetIdx * EltSize, DL, Scalar.getValueType()));
9784 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
9785 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
9788 /// \brief Try to lower broadcast of a single element.
9790 /// For convenience, this code also bundles all of the subtarget feature set
9791 /// filtering. While a little annoying to re-dispatch on type here, there isn't
9792 /// a convenient way to factor it out.
9793 static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
9794 SDValue V1, SDValue V2,
9796 const X86Subtarget &Subtarget,
9797 SelectionDAG &DAG) {
9798 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
9799 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
9800 (Subtarget.hasAVX2() && VT.isInteger())))
9803 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
9804 // we can only broadcast from a register with AVX2.
9805 unsigned NumElts = Mask.size();
9806 unsigned Opcode = VT == MVT::v2f64 ? X86ISD::MOVDDUP : X86ISD::VBROADCAST;
9807 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
9809 // Check that the mask is a broadcast.
9810 int BroadcastIdx = -1;
9811 for (int i = 0; i != (int)NumElts; ++i) {
9812 SmallVector<int, 8> BroadcastMask(NumElts, i);
9813 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
9819 if (BroadcastIdx < 0)
9821 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
9822 "a sorted mask where the broadcast "
9825 // Go up the chain of (vector) values to find a scalar load that we can
9826 // combine with the broadcast.
9829 switch (V.getOpcode()) {
9830 case ISD::BITCAST: {
9831 SDValue VSrc = V.getOperand(0);
9832 MVT SrcVT = VSrc.getSimpleValueType();
9833 if (VT.getScalarSizeInBits() != SrcVT.getScalarSizeInBits())
9838 case ISD::CONCAT_VECTORS: {
9839 int OperandSize = Mask.size() / V.getNumOperands();
9840 V = V.getOperand(BroadcastIdx / OperandSize);
9841 BroadcastIdx %= OperandSize;
9844 case ISD::INSERT_SUBVECTOR: {
9845 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
9846 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
9850 int BeginIdx = (int)ConstantIdx->getZExtValue();
9852 BeginIdx + (int)VInner.getSimpleValueType().getVectorNumElements();
9853 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
9854 BroadcastIdx -= BeginIdx;
9865 // Check if this is a broadcast of a scalar. We special case lowering
9866 // for scalars so that we can more effectively fold with loads.
9867 // First, look through bitcast: if the original value has a larger element
9868 // type than the shuffle, the broadcast element is in essence truncated.
9869 // Make that explicit to ease folding.
9870 if (V.getOpcode() == ISD::BITCAST && VT.isInteger())
9871 if (SDValue TruncBroadcast = lowerVectorShuffleAsTruncBroadcast(
9872 DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG))
9873 return TruncBroadcast;
9875 MVT BroadcastVT = VT;
9877 // Peek through any bitcast (only useful for loads).
9878 SDValue BC = peekThroughBitcasts(V);
9880 // Also check the simpler case, where we can directly reuse the scalar.
9881 if (V.getOpcode() == ISD::BUILD_VECTOR ||
9882 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
9883 V = V.getOperand(BroadcastIdx);
9885 // If we can't broadcast from a register, check that the input is a load.
9886 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
9888 } else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
9889 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
9890 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
9891 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
9892 Opcode = (BroadcastVT.is128BitVector() ? X86ISD::MOVDDUP : Opcode);
9895 // If we are broadcasting a load that is only used by the shuffle
9896 // then we can reduce the vector load to the broadcasted scalar load.
9897 LoadSDNode *Ld = cast<LoadSDNode>(BC);
9898 SDValue BaseAddr = Ld->getOperand(1);
9899 EVT SVT = BroadcastVT.getScalarType();
9900 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
9901 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
9902 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
9903 DAG.getMachineFunction().getMachineMemOperand(
9904 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
9906 // Make sure the newly-created LOAD is in the same position as Ld in
9907 // terms of dependency. We create a TokenFactor for Ld and V,
9908 // and update uses of Ld's output chain to use the TokenFactor.
9909 if (Ld->hasAnyUseOfValue(1)) {
9910 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
9911 SDValue(Ld, 1), SDValue(V.getNode(), 1));
9912 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
9913 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
9914 SDValue(V.getNode(), 1));
9916 } else if (!BroadcastFromReg) {
9917 // We can't broadcast from a vector register.
9919 } else if (BroadcastIdx != 0) {
9920 // We can only broadcast from the zero-element of a vector register,
9921 // but it can be advantageous to broadcast from the zero-element of a
9923 if (!VT.is256BitVector() && !VT.is512BitVector())
9926 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
9927 if (VT == MVT::v4f64 || VT == MVT::v4i64)
9930 // Only broadcast the zero-element of a 128-bit subvector.
9931 unsigned EltSize = VT.getScalarSizeInBits();
9932 if (((BroadcastIdx * EltSize) % 128) != 0)
9935 // The shuffle input might have been a bitcast we looked through; look at
9936 // the original input vector. Emit an EXTRACT_SUBVECTOR of that type; we'll
9937 // later bitcast it to BroadcastVT.
9938 MVT SrcVT = V.getSimpleValueType();
9939 assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
9940 "Unexpected vector element size");
9941 assert((SrcVT.is256BitVector() || SrcVT.is512BitVector()) &&
9942 "Unexpected vector size");
9944 MVT ExtVT = MVT::getVectorVT(SrcVT.getScalarType(), 128 / EltSize);
9945 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, V,
9946 DAG.getIntPtrConstant(BroadcastIdx, DL));
9949 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
9950 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
9951 DAG.getBitcast(MVT::f64, V));
9953 // Bitcast back to the same scalar type as BroadcastVT.
9954 MVT SrcVT = V.getSimpleValueType();
9955 if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
9956 assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
9957 "Unexpected vector element size");
9958 if (SrcVT.isVector()) {
9959 unsigned NumSrcElts = SrcVT.getVectorNumElements();
9960 SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
9962 SrcVT = BroadcastVT.getScalarType();
9964 V = DAG.getBitcast(SrcVT, V);
9967 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
9968 if (!Subtarget.is64Bit() && SrcVT == MVT::i64) {
9969 V = DAG.getBitcast(MVT::f64, V);
9970 unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
9971 BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
9974 // We only support broadcasting from 128-bit vectors to minimize the
9975 // number of patterns we need to deal with in isel. So extract down to
9977 if (SrcVT.getSizeInBits() > 128)
9978 V = extract128BitVector(V, 0, DAG, DL);
9980 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
9983 // Check for whether we can use INSERTPS to perform the shuffle. We only use
9984 // INSERTPS when the V1 elements are already in the correct locations
9985 // because otherwise we can just always use two SHUFPS instructions which
9986 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
9987 // perform INSERTPS if a single V1 element is out of place and all V2
9988 // elements are zeroable.
9989 static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2,
9990 unsigned &InsertPSMask,
9991 const APInt &Zeroable,
9993 SelectionDAG &DAG) {
9994 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
9995 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
9996 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
9998 // Attempt to match INSERTPS with one element from VA or VB being
9999 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
10001 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
10002 ArrayRef<int> CandidateMask) {
10003 unsigned ZMask = 0;
10004 int VADstIndex = -1;
10005 int VBDstIndex = -1;
10006 bool VAUsedInPlace = false;
10008 for (int i = 0; i < 4; ++i) {
10009 // Synthesize a zero mask from the zeroable elements (includes undefs).
10015 // Flag if we use any VA inputs in place.
10016 if (i == CandidateMask[i]) {
10017 VAUsedInPlace = true;
10021 // We can only insert a single non-zeroable element.
10022 if (VADstIndex >= 0 || VBDstIndex >= 0)
10025 if (CandidateMask[i] < 4) {
10026 // VA input out of place for insertion.
10029 // VB input for insertion.
10034 // Don't bother if we have no (non-zeroable) element for insertion.
10035 if (VADstIndex < 0 && VBDstIndex < 0)
10038 // Determine element insertion src/dst indices. The src index is from the
10039 // start of the inserted vector, not the start of the concatenated vector.
10040 unsigned VBSrcIndex = 0;
10041 if (VADstIndex >= 0) {
10042 // If we have a VA input out of place, we use VA as the V2 element
10043 // insertion and don't use the original V2 at all.
10044 VBSrcIndex = CandidateMask[VADstIndex];
10045 VBDstIndex = VADstIndex;
10048 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
10051 // If no V1 inputs are used in place, then the result is created only from
10052 // the zero mask and the V2 insertion - so remove V1 dependency.
10053 if (!VAUsedInPlace)
10054 VA = DAG.getUNDEF(MVT::v4f32);
10056 // Update V1, V2 and InsertPSMask accordingly.
10060 // Insert the V2 element into the desired position.
10061 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
10062 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
10066 if (matchAsInsertPS(V1, V2, Mask))
10069 // Commute and try again.
10070 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10071 ShuffleVectorSDNode::commuteMask(CommutedMask);
10072 if (matchAsInsertPS(V2, V1, CommutedMask))
10078 static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1,
10079 SDValue V2, ArrayRef<int> Mask,
10080 const APInt &Zeroable,
10081 SelectionDAG &DAG) {
10082 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
10083 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
10085 // Attempt to match the insertps pattern.
10086 unsigned InsertPSMask;
10087 if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
10090 // Insert the V2 element into the desired position.
10091 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
10092 DAG.getConstant(InsertPSMask, DL, MVT::i8));
10095 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
10096 /// UNPCK instruction.
10098 /// This specifically targets cases where we end up with alternating between
10099 /// the two inputs, and so can permute them into something that feeds a single
10100 /// UNPCK instruction. Note that this routine only targets integer vectors
10101 /// because for floating point vectors we have a generalized SHUFPS lowering
10102 /// strategy that handles everything that doesn't *exactly* match an unpack,
10103 /// making this clever lowering unnecessary.
10104 static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
10105 SDValue V1, SDValue V2,
10106 ArrayRef<int> Mask,
10107 SelectionDAG &DAG) {
10108 assert(!VT.isFloatingPoint() &&
10109 "This routine only supports integer vectors.");
10110 assert(VT.is128BitVector() &&
10111 "This routine only works on 128-bit vectors.");
10112 assert(!V2.isUndef() &&
10113 "This routine should only be used when blending two inputs.");
10114 assert(Mask.size() >= 2 && "Single element masks are invalid.");
10116 int Size = Mask.size();
10119 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
10121 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
10123 bool UnpackLo = NumLoInputs >= NumHiInputs;
10125 auto TryUnpack = [&](int ScalarSize, int Scale) {
10126 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
10127 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
10129 for (int i = 0; i < Size; ++i) {
10133 // Each element of the unpack contains Scale elements from this mask.
10134 int UnpackIdx = i / Scale;
10136 // We only handle the case where V1 feeds the first slots of the unpack.
10137 // We rely on canonicalization to ensure this is the case.
10138 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
10141 // Setup the mask for this input. The indexing is tricky as we have to
10142 // handle the unpack stride.
10143 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
10144 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
10148 // If we will have to shuffle both inputs to use the unpack, check whether
10149 // we can just unpack first and shuffle the result. If so, skip this unpack.
10150 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
10151 !isNoopShuffleMask(V2Mask))
10154 // Shuffle the inputs into place.
10155 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
10156 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
10158 // Cast the inputs to the type we will use to unpack them.
10159 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
10160 V1 = DAG.getBitcast(UnpackVT, V1);
10161 V2 = DAG.getBitcast(UnpackVT, V2);
10163 // Unpack the inputs and cast the result back to the desired type.
10164 return DAG.getBitcast(
10165 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
10166 UnpackVT, V1, V2));
10169 // We try each unpack from the largest to the smallest to try and find one
10170 // that fits this mask.
10171 int OrigScalarSize = VT.getScalarSizeInBits();
10172 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
10173 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
10176 // If none of the unpack-rooted lowerings worked (or were profitable) try an
10178 if (NumLoInputs == 0 || NumHiInputs == 0) {
10179 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
10180 "We have to have *some* inputs!");
10181 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
10183 // FIXME: We could consider the total complexity of the permute of each
10184 // possible unpacking. Or at the least we should consider how many
10185 // half-crossings are created.
10186 // FIXME: We could consider commuting the unpacks.
10188 SmallVector<int, 32> PermMask((unsigned)Size, -1);
10189 for (int i = 0; i < Size; ++i) {
10193 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
10196 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
10198 return DAG.getVectorShuffle(
10199 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
10201 DAG.getUNDEF(VT), PermMask);
10207 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
10209 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
10210 /// support for floating point shuffles but not integer shuffles. These
10211 /// instructions will incur a domain crossing penalty on some chips though so
10212 /// it is better to avoid lowering through this for integer vectors where
10214 static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
10215 const APInt &Zeroable,
10216 SDValue V1, SDValue V2,
10217 const X86Subtarget &Subtarget,
10218 SelectionDAG &DAG) {
10219 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
10220 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
10221 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
10223 if (V2.isUndef()) {
10224 // Check for being able to broadcast a single element.
10225 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
10226 DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
10229 // Straight shuffle of a single input vector. Simulate this by using the
10230 // single input as both of the "inputs" to this instruction..
10231 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
10233 if (Subtarget.hasAVX()) {
10234 // If we have AVX, we can use VPERMILPS which will allow folding a load
10235 // into the shuffle.
10236 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
10237 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
10240 return DAG.getNode(
10241 X86ISD::SHUFP, DL, MVT::v2f64,
10242 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
10243 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
10244 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
10246 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
10247 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
10249 // If we have a single input, insert that into V1 if we can do so cheaply.
10250 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
10251 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10252 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
10254 // Try inverting the insertion since for v2 masks it is easy to do and we
10255 // can't reliably sort the mask one way or the other.
10256 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
10257 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
10258 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10259 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
10263 // Try to use one of the special instruction patterns to handle two common
10264 // blend patterns if a zero-blend above didn't work.
10265 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
10266 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
10267 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
10268 // We can either use a special instruction to load over the low double or
10269 // to move just the low double.
10270 return DAG.getNode(
10271 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
10272 DL, MVT::v2f64, V2,
10273 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
10275 if (Subtarget.hasSSE41())
10276 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
10277 Zeroable, Subtarget, DAG))
10280 // Use dedicated unpack instructions for masks that match their pattern.
10282 lowerVectorShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
10285 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
10286 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
10287 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
10290 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
10292 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
10293 /// the integer unit to minimize domain crossing penalties. However, for blends
10294 /// it falls back to the floating point shuffle operation with appropriate bit
10296 static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
10297 const APInt &Zeroable,
10298 SDValue V1, SDValue V2,
10299 const X86Subtarget &Subtarget,
10300 SelectionDAG &DAG) {
10301 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
10302 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
10303 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
10305 if (V2.isUndef()) {
10306 // Check for being able to broadcast a single element.
10307 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
10308 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
10311 // Straight shuffle of a single input vector. For everything from SSE2
10312 // onward this has a single fast instruction with no scary immediates.
10313 // We have to map the mask as it is actually a v4i32 shuffle instruction.
10314 V1 = DAG.getBitcast(MVT::v4i32, V1);
10315 int WidenedMask[4] = {
10316 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
10317 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
10318 return DAG.getBitcast(
10320 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
10321 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
10323 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
10324 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
10325 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
10326 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
10328 // If we have a blend of two same-type PACKUS operations and the blend aligns
10329 // with the low and high halves, we can just merge the PACKUS operations.
10330 // This is particularly important as it lets us merge shuffles that this
10331 // routine itself creates.
10332 auto GetPackNode = [](SDValue V) {
10333 V = peekThroughBitcasts(V);
10334 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
10336 if (SDValue V1Pack = GetPackNode(V1))
10337 if (SDValue V2Pack = GetPackNode(V2)) {
10338 EVT PackVT = V1Pack.getValueType();
10339 if (PackVT == V2Pack.getValueType())
10340 return DAG.getBitcast(MVT::v2i64,
10341 DAG.getNode(X86ISD::PACKUS, DL, PackVT,
10342 Mask[0] == 0 ? V1Pack.getOperand(0)
10343 : V1Pack.getOperand(1),
10344 Mask[1] == 2 ? V2Pack.getOperand(0)
10345 : V2Pack.getOperand(1)));
10348 // Try to use shift instructions.
10349 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
10350 Zeroable, Subtarget, DAG))
10353 // When loading a scalar and then shuffling it into a vector we can often do
10354 // the insertion cheaply.
10355 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10356 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
10358 // Try inverting the insertion since for v2 masks it is easy to do and we
10359 // can't reliably sort the mask one way or the other.
10360 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
10361 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10362 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
10365 // We have different paths for blend lowering, but they all must use the
10366 // *exact* same predicate.
10367 bool IsBlendSupported = Subtarget.hasSSE41();
10368 if (IsBlendSupported)
10369 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
10370 Zeroable, Subtarget, DAG))
10373 // Use dedicated unpack instructions for masks that match their pattern.
10375 lowerVectorShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
10378 // Try to use byte rotation instructions.
10379 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
10380 if (Subtarget.hasSSSE3())
10381 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10382 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
10385 // If we have direct support for blends, we should lower by decomposing into
10386 // a permute. That will be faster than the domain cross.
10387 if (IsBlendSupported)
10388 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
10391 // We implement this with SHUFPD which is pretty lame because it will likely
10392 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
10393 // However, all the alternatives are still more cycles and newer chips don't
10394 // have this problem. It would be really nice if x86 had better shuffles here.
10395 V1 = DAG.getBitcast(MVT::v2f64, V1);
10396 V2 = DAG.getBitcast(MVT::v2f64, V2);
10397 return DAG.getBitcast(MVT::v2i64,
10398 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
10401 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
10403 /// This is used to disable more specialized lowerings when the shufps lowering
10404 /// will happen to be efficient.
10405 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
10406 // This routine only handles 128-bit shufps.
10407 assert(Mask.size() == 4 && "Unsupported mask size!");
10408 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
10409 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
10410 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
10411 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
10413 // To lower with a single SHUFPS we need to have the low half and high half
10414 // each requiring a single input.
10415 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
10417 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
10423 /// \brief Lower a vector shuffle using the SHUFPS instruction.
10425 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
10426 /// It makes no assumptions about whether this is the *best* lowering, it simply
10428 static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
10429 ArrayRef<int> Mask, SDValue V1,
10430 SDValue V2, SelectionDAG &DAG) {
10431 SDValue LowV = V1, HighV = V2;
10432 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
10434 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
10436 if (NumV2Elements == 1) {
10437 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
10439 // Compute the index adjacent to V2Index and in the same half by toggling
10441 int V2AdjIndex = V2Index ^ 1;
10443 if (Mask[V2AdjIndex] < 0) {
10444 // Handles all the cases where we have a single V2 element and an undef.
10445 // This will only ever happen in the high lanes because we commute the
10446 // vector otherwise.
10448 std::swap(LowV, HighV);
10449 NewMask[V2Index] -= 4;
10451 // Handle the case where the V2 element ends up adjacent to a V1 element.
10452 // To make this work, blend them together as the first step.
10453 int V1Index = V2AdjIndex;
10454 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
10455 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
10456 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
10458 // Now proceed to reconstruct the final blend as we have the necessary
10459 // high or low half formed.
10466 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
10467 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
10469 } else if (NumV2Elements == 2) {
10470 if (Mask[0] < 4 && Mask[1] < 4) {
10471 // Handle the easy case where we have V1 in the low lanes and V2 in the
10475 } else if (Mask[2] < 4 && Mask[3] < 4) {
10476 // We also handle the reversed case because this utility may get called
10477 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
10478 // arrange things in the right direction.
10484 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
10485 // trying to place elements directly, just blend them and set up the final
10486 // shuffle to place them.
10488 // The first two blend mask elements are for V1, the second two are for
10490 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
10491 Mask[2] < 4 ? Mask[2] : Mask[3],
10492 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
10493 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
10494 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
10495 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
10497 // Now we do a normal shuffle of V1 by giving V1 as both operands to
10500 NewMask[0] = Mask[0] < 4 ? 0 : 2;
10501 NewMask[1] = Mask[0] < 4 ? 2 : 0;
10502 NewMask[2] = Mask[2] < 4 ? 1 : 3;
10503 NewMask[3] = Mask[2] < 4 ? 3 : 1;
10506 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
10507 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
10510 /// \brief Lower 4-lane 32-bit floating point shuffles.
10512 /// Uses instructions exclusively from the floating point unit to minimize
10513 /// domain crossing penalties, as these are sufficient to implement all v4f32
10515 static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
10516 const APInt &Zeroable,
10517 SDValue V1, SDValue V2,
10518 const X86Subtarget &Subtarget,
10519 SelectionDAG &DAG) {
10520 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
10521 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
10522 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10524 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
10526 if (NumV2Elements == 0) {
10527 // Check for being able to broadcast a single element.
10528 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
10529 DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG))
10532 // Use even/odd duplicate instructions for masks that match their pattern.
10533 if (Subtarget.hasSSE3()) {
10534 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
10535 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
10536 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
10537 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
10540 if (Subtarget.hasAVX()) {
10541 // If we have AVX, we can use VPERMILPS which will allow folding a load
10542 // into the shuffle.
10543 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
10544 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
10547 // Otherwise, use a straight shuffle of a single input vector. We pass the
10548 // input vector to both operands to simulate this with a SHUFPS.
10549 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
10550 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
10553 // There are special ways we can lower some single-element blends. However, we
10554 // have custom ways we can lower more complex single-element blends below that
10555 // we defer to if both this and BLENDPS fail to match, so restrict this to
10556 // when the V2 input is targeting element 0 of the mask -- that is the fast
10558 if (NumV2Elements == 1 && Mask[0] >= 4)
10559 if (SDValue V = lowerVectorShuffleAsElementInsertion(
10560 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
10563 if (Subtarget.hasSSE41()) {
10564 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
10565 Zeroable, Subtarget, DAG))
10568 // Use INSERTPS if we can complete the shuffle efficiently.
10570 lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
10573 if (!isSingleSHUFPSMask(Mask))
10574 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
10575 DL, MVT::v4f32, V1, V2, Mask, DAG))
10579 // Use low/high mov instructions.
10580 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
10581 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
10582 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
10583 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
10585 // Use dedicated unpack instructions for masks that match their pattern.
10587 lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
10590 // Otherwise fall back to a SHUFPS lowering strategy.
10591 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
10594 /// \brief Lower 4-lane i32 vector shuffles.
10596 /// We try to handle these with integer-domain shuffles where we can, but for
10597 /// blends we use the floating point domain blend instructions.
10598 static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
10599 const APInt &Zeroable,
10600 SDValue V1, SDValue V2,
10601 const X86Subtarget &Subtarget,
10602 SelectionDAG &DAG) {
10603 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
10604 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
10605 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10607 // Whenever we can lower this as a zext, that instruction is strictly faster
10608 // than any alternative. It also allows us to fold memory operands into the
10609 // shuffle in many cases.
10610 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
10611 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
10614 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
10616 if (NumV2Elements == 0) {
10617 // Check for being able to broadcast a single element.
10618 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
10619 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
10622 // Straight shuffle of a single input vector. For everything from SSE2
10623 // onward this has a single fast instruction with no scary immediates.
10624 // We coerce the shuffle pattern to be compatible with UNPCK instructions
10625 // but we aren't actually going to use the UNPCK instruction because doing
10626 // so prevents folding a load into this instruction or making a copy.
10627 const int UnpackLoMask[] = {0, 0, 1, 1};
10628 const int UnpackHiMask[] = {2, 2, 3, 3};
10629 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
10630 Mask = UnpackLoMask;
10631 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
10632 Mask = UnpackHiMask;
10634 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
10635 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
10638 // Try to use shift instructions.
10639 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
10640 Zeroable, Subtarget, DAG))
10643 // There are special ways we can lower some single-element blends.
10644 if (NumV2Elements == 1)
10645 if (SDValue V = lowerVectorShuffleAsElementInsertion(
10646 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
10649 // We have different paths for blend lowering, but they all must use the
10650 // *exact* same predicate.
10651 bool IsBlendSupported = Subtarget.hasSSE41();
10652 if (IsBlendSupported)
10653 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
10654 Zeroable, Subtarget, DAG))
10657 if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
10661 // Use dedicated unpack instructions for masks that match their pattern.
10663 lowerVectorShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
10666 // Try to use byte rotation instructions.
10667 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
10668 if (Subtarget.hasSSSE3())
10669 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10670 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
10673 // Assume that a single SHUFPS is faster than an alternative sequence of
10674 // multiple instructions (even if the CPU has a domain penalty).
10675 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
10676 if (!isSingleSHUFPSMask(Mask)) {
10677 // If we have direct support for blends, we should lower by decomposing into
10678 // a permute. That will be faster than the domain cross.
10679 if (IsBlendSupported)
10680 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
10683 // Try to lower by permuting the inputs into an unpack instruction.
10684 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
10685 DL, MVT::v4i32, V1, V2, Mask, DAG))
10689 // We implement this with SHUFPS because it can blend from two vectors.
10690 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
10691 // up the inputs, bypassing domain shift penalties that we would incur if we
10692 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
10694 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
10695 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
10696 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
10697 return DAG.getBitcast(MVT::v4i32, ShufPS);
10700 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
10701 /// shuffle lowering, and the most complex part.
10703 /// The lowering strategy is to try to form pairs of input lanes which are
10704 /// targeted at the same half of the final vector, and then use a dword shuffle
10705 /// to place them onto the right half, and finally unpack the paired lanes into
10706 /// their final position.
10708 /// The exact breakdown of how to form these dword pairs and align them on the
10709 /// correct sides is really tricky. See the comments within the function for
10710 /// more of the details.
10712 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
10713 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
10714 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
10715 /// vector, form the analogous 128-bit 8-element Mask.
10716 static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
10717 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
10718 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
10719 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
10720 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
10722 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
10723 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
10724 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
10726 SmallVector<int, 4> LoInputs;
10727 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
10728 std::sort(LoInputs.begin(), LoInputs.end());
10729 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
10730 SmallVector<int, 4> HiInputs;
10731 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
10732 std::sort(HiInputs.begin(), HiInputs.end());
10733 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
10735 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
10736 int NumHToL = LoInputs.size() - NumLToL;
10738 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
10739 int NumHToH = HiInputs.size() - NumLToH;
10740 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
10741 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
10742 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
10743 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
10745 // If we are splatting two values from one half - one to each half, then
10746 // we can shuffle that half so each is splatted to a dword, then splat those
10747 // to their respective halves.
10748 auto SplatHalfs = [&](int LoInput, int HiInput, unsigned ShufWOp,
10750 int PSHUFHalfMask[] = {LoInput % 4, LoInput % 4, HiInput % 4, HiInput % 4};
10751 int PSHUFDMask[] = {DOffset + 0, DOffset + 0, DOffset + 1, DOffset + 1};
10752 V = DAG.getNode(ShufWOp, DL, VT, V,
10753 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
10754 V = DAG.getBitcast(PSHUFDVT, V);
10755 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
10756 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
10757 return DAG.getBitcast(VT, V);
10760 if (NumLToL == 1 && NumLToH == 1 && (NumHToL + NumHToH) == 0)
10761 return SplatHalfs(LToLInputs[0], LToHInputs[0], X86ISD::PSHUFLW, 0);
10762 if (NumHToL == 1 && NumHToH == 1 && (NumLToL + NumLToH) == 0)
10763 return SplatHalfs(HToLInputs[0], HToHInputs[0], X86ISD::PSHUFHW, 2);
10765 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
10766 // such inputs we can swap two of the dwords across the half mark and end up
10767 // with <=2 inputs to each half in each half. Once there, we can fall through
10768 // to the generic code below. For example:
10770 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
10771 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
10773 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
10774 // and an existing 2-into-2 on the other half. In this case we may have to
10775 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
10776 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
10777 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
10778 // because any other situation (including a 3-into-1 or 1-into-3 in the other
10779 // half than the one we target for fixing) will be fixed when we re-enter this
10780 // path. We will also combine away any sequence of PSHUFD instructions that
10781 // result into a single instruction. Here is an example of the tricky case:
10783 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
10784 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
10786 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
10788 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
10789 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
10791 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
10792 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
10794 // The result is fine to be handled by the generic logic.
10795 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
10796 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
10797 int AOffset, int BOffset) {
10798 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
10799 "Must call this with A having 3 or 1 inputs from the A half.");
10800 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
10801 "Must call this with B having 1 or 3 inputs from the B half.");
10802 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
10803 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
10805 bool ThreeAInputs = AToAInputs.size() == 3;
10807 // Compute the index of dword with only one word among the three inputs in
10808 // a half by taking the sum of the half with three inputs and subtracting
10809 // the sum of the actual three inputs. The difference is the remaining
10811 int ADWord, BDWord;
10812 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
10813 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
10814 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
10815 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
10816 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
10817 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
10818 int TripleNonInputIdx =
10819 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
10820 TripleDWord = TripleNonInputIdx / 2;
10822 // We use xor with one to compute the adjacent DWord to whichever one the
10824 OneInputDWord = (OneInput / 2) ^ 1;
10826 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
10827 // and BToA inputs. If there is also such a problem with the BToB and AToB
10828 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
10829 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
10830 // is essential that we don't *create* a 3<-1 as then we might oscillate.
10831 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
10832 // Compute how many inputs will be flipped by swapping these DWords. We
10834 // to balance this to ensure we don't form a 3-1 shuffle in the other
10836 int NumFlippedAToBInputs =
10837 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
10838 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
10839 int NumFlippedBToBInputs =
10840 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
10841 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
10842 if ((NumFlippedAToBInputs == 1 &&
10843 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
10844 (NumFlippedBToBInputs == 1 &&
10845 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
10846 // We choose whether to fix the A half or B half based on whether that
10847 // half has zero flipped inputs. At zero, we may not be able to fix it
10848 // with that half. We also bias towards fixing the B half because that
10849 // will more commonly be the high half, and we have to bias one way.
10850 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
10851 ArrayRef<int> Inputs) {
10852 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
10853 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
10854 // Determine whether the free index is in the flipped dword or the
10855 // unflipped dword based on where the pinned index is. We use this bit
10856 // in an xor to conditionally select the adjacent dword.
10857 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
10858 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
10859 if (IsFixIdxInput == IsFixFreeIdxInput)
10861 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
10862 assert(IsFixIdxInput != IsFixFreeIdxInput &&
10863 "We need to be changing the number of flipped inputs!");
10864 int PSHUFHalfMask[] = {0, 1, 2, 3};
10865 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
10866 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
10868 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
10870 for (int &M : Mask)
10871 if (M >= 0 && M == FixIdx)
10873 else if (M >= 0 && M == FixFreeIdx)
10876 if (NumFlippedBToBInputs != 0) {
10878 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
10879 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
10881 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
10882 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
10883 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
10888 int PSHUFDMask[] = {0, 1, 2, 3};
10889 PSHUFDMask[ADWord] = BDWord;
10890 PSHUFDMask[BDWord] = ADWord;
10891 V = DAG.getBitcast(
10893 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
10894 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
10896 // Adjust the mask to match the new locations of A and B.
10897 for (int &M : Mask)
10898 if (M >= 0 && M/2 == ADWord)
10899 M = 2 * BDWord + M % 2;
10900 else if (M >= 0 && M/2 == BDWord)
10901 M = 2 * ADWord + M % 2;
10903 // Recurse back into this routine to re-compute state now that this isn't
10904 // a 3 and 1 problem.
10905 return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget,
10908 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
10909 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
10910 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
10911 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
10913 // At this point there are at most two inputs to the low and high halves from
10914 // each half. That means the inputs can always be grouped into dwords and
10915 // those dwords can then be moved to the correct half with a dword shuffle.
10916 // We use at most one low and one high word shuffle to collect these paired
10917 // inputs into dwords, and finally a dword shuffle to place them.
10918 int PSHUFLMask[4] = {-1, -1, -1, -1};
10919 int PSHUFHMask[4] = {-1, -1, -1, -1};
10920 int PSHUFDMask[4] = {-1, -1, -1, -1};
10922 // First fix the masks for all the inputs that are staying in their
10923 // original halves. This will then dictate the targets of the cross-half
10925 auto fixInPlaceInputs =
10926 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
10927 MutableArrayRef<int> SourceHalfMask,
10928 MutableArrayRef<int> HalfMask, int HalfOffset) {
10929 if (InPlaceInputs.empty())
10931 if (InPlaceInputs.size() == 1) {
10932 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
10933 InPlaceInputs[0] - HalfOffset;
10934 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
10937 if (IncomingInputs.empty()) {
10938 // Just fix all of the in place inputs.
10939 for (int Input : InPlaceInputs) {
10940 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
10941 PSHUFDMask[Input / 2] = Input / 2;
10946 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
10947 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
10948 InPlaceInputs[0] - HalfOffset;
10949 // Put the second input next to the first so that they are packed into
10950 // a dword. We find the adjacent index by toggling the low bit.
10951 int AdjIndex = InPlaceInputs[0] ^ 1;
10952 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
10953 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
10954 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
10956 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
10957 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
10959 // Now gather the cross-half inputs and place them into a free dword of
10960 // their target half.
10961 // FIXME: This operation could almost certainly be simplified dramatically to
10962 // look more like the 3-1 fixing operation.
10963 auto moveInputsToRightHalf = [&PSHUFDMask](
10964 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
10965 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
10966 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
10968 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
10969 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
10971 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
10973 int LowWord = Word & ~1;
10974 int HighWord = Word | 1;
10975 return isWordClobbered(SourceHalfMask, LowWord) ||
10976 isWordClobbered(SourceHalfMask, HighWord);
10979 if (IncomingInputs.empty())
10982 if (ExistingInputs.empty()) {
10983 // Map any dwords with inputs from them into the right half.
10984 for (int Input : IncomingInputs) {
10985 // If the source half mask maps over the inputs, turn those into
10986 // swaps and use the swapped lane.
10987 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
10988 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
10989 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
10990 Input - SourceOffset;
10991 // We have to swap the uses in our half mask in one sweep.
10992 for (int &M : HalfMask)
10993 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
10995 else if (M == Input)
10996 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
10998 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
10999 Input - SourceOffset &&
11000 "Previous placement doesn't match!");
11002 // Note that this correctly re-maps both when we do a swap and when
11003 // we observe the other side of the swap above. We rely on that to
11004 // avoid swapping the members of the input list directly.
11005 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
11008 // Map the input's dword into the correct half.
11009 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
11010 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
11012 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
11014 "Previous placement doesn't match!");
11017 // And just directly shift any other-half mask elements to be same-half
11018 // as we will have mirrored the dword containing the element into the
11019 // same position within that half.
11020 for (int &M : HalfMask)
11021 if (M >= SourceOffset && M < SourceOffset + 4) {
11022 M = M - SourceOffset + DestOffset;
11023 assert(M >= 0 && "This should never wrap below zero!");
11028 // Ensure we have the input in a viable dword of its current half. This
11029 // is particularly tricky because the original position may be clobbered
11030 // by inputs being moved and *staying* in that half.
11031 if (IncomingInputs.size() == 1) {
11032 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
11033 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
11035 SourceHalfMask[InputFixed - SourceOffset] =
11036 IncomingInputs[0] - SourceOffset;
11037 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
11039 IncomingInputs[0] = InputFixed;
11041 } else if (IncomingInputs.size() == 2) {
11042 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
11043 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
11044 // We have two non-adjacent or clobbered inputs we need to extract from
11045 // the source half. To do this, we need to map them into some adjacent
11046 // dword slot in the source mask.
11047 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
11048 IncomingInputs[1] - SourceOffset};
11050 // If there is a free slot in the source half mask adjacent to one of
11051 // the inputs, place the other input in it. We use (Index XOR 1) to
11052 // compute an adjacent index.
11053 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
11054 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
11055 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
11056 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
11057 InputsFixed[1] = InputsFixed[0] ^ 1;
11058 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
11059 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
11060 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
11061 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
11062 InputsFixed[0] = InputsFixed[1] ^ 1;
11063 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
11064 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
11065 // The two inputs are in the same DWord but it is clobbered and the
11066 // adjacent DWord isn't used at all. Move both inputs to the free
11068 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
11069 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
11070 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
11071 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
11073 // The only way we hit this point is if there is no clobbering
11074 // (because there are no off-half inputs to this half) and there is no
11075 // free slot adjacent to one of the inputs. In this case, we have to
11076 // swap an input with a non-input.
11077 for (int i = 0; i < 4; ++i)
11078 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
11079 "We can't handle any clobbers here!");
11080 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
11081 "Cannot have adjacent inputs here!");
11083 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
11084 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
11086 // We also have to update the final source mask in this case because
11087 // it may need to undo the above swap.
11088 for (int &M : FinalSourceHalfMask)
11089 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
11090 M = InputsFixed[1] + SourceOffset;
11091 else if (M == InputsFixed[1] + SourceOffset)
11092 M = (InputsFixed[0] ^ 1) + SourceOffset;
11094 InputsFixed[1] = InputsFixed[0] ^ 1;
11097 // Point everything at the fixed inputs.
11098 for (int &M : HalfMask)
11099 if (M == IncomingInputs[0])
11100 M = InputsFixed[0] + SourceOffset;
11101 else if (M == IncomingInputs[1])
11102 M = InputsFixed[1] + SourceOffset;
11104 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
11105 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
11108 llvm_unreachable("Unhandled input size!");
11111 // Now hoist the DWord down to the right half.
11112 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
11113 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
11114 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
11115 for (int &M : HalfMask)
11116 for (int Input : IncomingInputs)
11118 M = FreeDWord * 2 + Input % 2;
11120 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
11121 /*SourceOffset*/ 4, /*DestOffset*/ 0);
11122 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
11123 /*SourceOffset*/ 0, /*DestOffset*/ 4);
11125 // Now enact all the shuffles we've computed to move the inputs into their
11127 if (!isNoopShuffleMask(PSHUFLMask))
11128 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
11129 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
11130 if (!isNoopShuffleMask(PSHUFHMask))
11131 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
11132 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
11133 if (!isNoopShuffleMask(PSHUFDMask))
11134 V = DAG.getBitcast(
11136 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
11137 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11139 // At this point, each half should contain all its inputs, and we can then
11140 // just shuffle them into their final position.
11141 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
11142 "Failed to lift all the high half inputs to the low mask!");
11143 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
11144 "Failed to lift all the low half inputs to the high mask!");
11146 // Do a half shuffle for the low mask.
11147 if (!isNoopShuffleMask(LoMask))
11148 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
11149 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
11151 // Do a half shuffle with the high mask after shifting its values down.
11152 for (int &M : HiMask)
11155 if (!isNoopShuffleMask(HiMask))
11156 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
11157 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
11162 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
11163 /// blend if only one input is used.
11164 static SDValue lowerVectorShuffleAsBlendOfPSHUFBs(
11165 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11166 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse,
11168 SDValue V1Mask[16];
11169 SDValue V2Mask[16];
11173 int Size = Mask.size();
11174 int Scale = 16 / Size;
11175 for (int i = 0; i < 16; ++i) {
11176 if (Mask[i / Scale] < 0) {
11177 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
11179 const int ZeroMask = 0x80;
11180 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
11182 int V2Idx = Mask[i / Scale] < Size
11184 : (Mask[i / Scale] - Size) * Scale + i % Scale;
11185 if (Zeroable[i / Scale])
11186 V1Idx = V2Idx = ZeroMask;
11187 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
11188 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
11189 V1InUse |= (ZeroMask != V1Idx);
11190 V2InUse |= (ZeroMask != V2Idx);
11195 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
11196 DAG.getBitcast(MVT::v16i8, V1),
11197 DAG.getBuildVector(MVT::v16i8, DL, V1Mask));
11199 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
11200 DAG.getBitcast(MVT::v16i8, V2),
11201 DAG.getBuildVector(MVT::v16i8, DL, V2Mask));
11203 // If we need shuffled inputs from both, blend the two.
11205 if (V1InUse && V2InUse)
11206 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
11208 V = V1InUse ? V1 : V2;
11210 // Cast the result back to the correct type.
11211 return DAG.getBitcast(VT, V);
11214 /// \brief Generic lowering of 8-lane i16 shuffles.
11216 /// This handles both single-input shuffles and combined shuffle/blends with
11217 /// two inputs. The single input shuffles are immediately delegated to
11218 /// a dedicated lowering routine.
11220 /// The blends are lowered in one of three fundamental ways. If there are few
11221 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
11222 /// of the input is significantly cheaper when lowered as an interleaving of
11223 /// the two inputs, try to interleave them. Otherwise, blend the low and high
11224 /// halves of the inputs separately (making them have relatively few inputs)
11225 /// and then concatenate them.
11226 static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11227 const APInt &Zeroable,
11228 SDValue V1, SDValue V2,
11229 const X86Subtarget &Subtarget,
11230 SelectionDAG &DAG) {
11231 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
11232 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
11233 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11235 // Whenever we can lower this as a zext, that instruction is strictly faster
11236 // than any alternative.
11237 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
11238 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
11241 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
11243 if (NumV2Inputs == 0) {
11244 // Check for being able to broadcast a single element.
11245 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
11246 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
11249 // Try to use shift instructions.
11250 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
11251 Zeroable, Subtarget, DAG))
11254 // Use dedicated unpack instructions for masks that match their pattern.
11256 lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
11259 // Try to use byte rotation instructions.
11260 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1,
11261 Mask, Subtarget, DAG))
11264 // Make a copy of the mask so it can be modified.
11265 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
11266 return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1,
11267 MutableMask, Subtarget,
11271 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
11272 "All single-input shuffles should be canonicalized to be V1-input "
11275 // Try to use shift instructions.
11276 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
11277 Zeroable, Subtarget, DAG))
11280 // See if we can use SSE4A Extraction / Insertion.
11281 if (Subtarget.hasSSE4A())
11282 if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
11286 // There are special ways we can lower some single-element blends.
11287 if (NumV2Inputs == 1)
11288 if (SDValue V = lowerVectorShuffleAsElementInsertion(
11289 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
11292 // We have different paths for blend lowering, but they all must use the
11293 // *exact* same predicate.
11294 bool IsBlendSupported = Subtarget.hasSSE41();
11295 if (IsBlendSupported)
11296 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
11297 Zeroable, Subtarget, DAG))
11300 if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
11304 // Use dedicated unpack instructions for masks that match their pattern.
11306 lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
11309 // Try to use byte rotation instructions.
11310 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11311 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
11314 if (SDValue BitBlend =
11315 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
11318 // Try to lower by permuting the inputs into an unpack instruction.
11319 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
11323 // If we can't directly blend but can use PSHUFB, that will be better as it
11324 // can both shuffle and set up the inefficient blend.
11325 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
11326 bool V1InUse, V2InUse;
11327 return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
11328 Zeroable, DAG, V1InUse, V2InUse);
11331 // We can always bit-blend if we have to so the fallback strategy is to
11332 // decompose into single-input permutes and blends.
11333 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
11337 /// \brief Check whether a compaction lowering can be done by dropping even
11338 /// elements and compute how many times even elements must be dropped.
11340 /// This handles shuffles which take every Nth element where N is a power of
11341 /// two. Example shuffle masks:
11343 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
11344 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
11345 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
11346 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
11347 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
11348 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
11350 /// Any of these lanes can of course be undef.
11352 /// This routine only supports N <= 3.
11353 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
11356 /// \returns N above, or the number of times even elements must be dropped if
11357 /// there is such a number. Otherwise returns zero.
11358 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
11359 bool IsSingleInput) {
11360 // The modulus for the shuffle vector entries is based on whether this is
11361 // a single input or not.
11362 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
11363 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
11364 "We should only be called with masks with a power-of-2 size!");
11366 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
11368 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
11369 // and 2^3 simultaneously. This is because we may have ambiguity with
11370 // partially undef inputs.
11371 bool ViableForN[3] = {true, true, true};
11373 for (int i = 0, e = Mask.size(); i < e; ++i) {
11374 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
11379 bool IsAnyViable = false;
11380 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
11381 if (ViableForN[j]) {
11382 uint64_t N = j + 1;
11384 // The shuffle mask must be equal to (i * 2^N) % M.
11385 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
11386 IsAnyViable = true;
11388 ViableForN[j] = false;
11390 // Early exit if we exhaust the possible powers of two.
11395 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
11399 // Return 0 as there is no viable power of two.
11403 /// \brief Generic lowering of v16i8 shuffles.
11405 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
11406 /// detect any complexity reducing interleaving. If that doesn't help, it uses
11407 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
11408 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
11410 static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11411 const APInt &Zeroable,
11412 SDValue V1, SDValue V2,
11413 const X86Subtarget &Subtarget,
11414 SelectionDAG &DAG) {
11415 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
11416 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
11417 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11419 // Try to use shift instructions.
11420 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
11421 Zeroable, Subtarget, DAG))
11424 // Try to use byte rotation instructions.
11425 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11426 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
11429 // Try to use a zext lowering.
11430 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
11431 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
11434 // See if we can use SSE4A Extraction / Insertion.
11435 if (Subtarget.hasSSE4A())
11436 if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
11440 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
11442 // For single-input shuffles, there are some nicer lowering tricks we can use.
11443 if (NumV2Elements == 0) {
11444 // Check for being able to broadcast a single element.
11445 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
11446 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
11449 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
11450 // Notably, this handles splat and partial-splat shuffles more efficiently.
11451 // However, it only makes sense if the pre-duplication shuffle simplifies
11452 // things significantly. Currently, this means we need to be able to
11453 // express the pre-duplication shuffle as an i16 shuffle.
11455 // FIXME: We should check for other patterns which can be widened into an
11456 // i16 shuffle as well.
11457 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
11458 for (int i = 0; i < 16; i += 2)
11459 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
11464 auto tryToWidenViaDuplication = [&]() -> SDValue {
11465 if (!canWidenViaDuplication(Mask))
11467 SmallVector<int, 4> LoInputs;
11468 copy_if(Mask, std::back_inserter(LoInputs),
11469 [](int M) { return M >= 0 && M < 8; });
11470 std::sort(LoInputs.begin(), LoInputs.end());
11471 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
11473 SmallVector<int, 4> HiInputs;
11474 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
11475 std::sort(HiInputs.begin(), HiInputs.end());
11476 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
11479 bool TargetLo = LoInputs.size() >= HiInputs.size();
11480 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
11481 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
11483 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
11484 SmallDenseMap<int, int, 8> LaneMap;
11485 for (int I : InPlaceInputs) {
11486 PreDupI16Shuffle[I/2] = I/2;
11489 int j = TargetLo ? 0 : 4, je = j + 4;
11490 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
11491 // Check if j is already a shuffle of this input. This happens when
11492 // there are two adjacent bytes after we move the low one.
11493 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
11494 // If we haven't yet mapped the input, search for a slot into which
11496 while (j < je && PreDupI16Shuffle[j] >= 0)
11500 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
11503 // Map this input with the i16 shuffle.
11504 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
11507 // Update the lane map based on the mapping we ended up with.
11508 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
11510 V1 = DAG.getBitcast(
11512 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
11513 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
11515 // Unpack the bytes to form the i16s that will be shuffled into place.
11516 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
11517 MVT::v16i8, V1, V1);
11519 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
11520 for (int i = 0; i < 16; ++i)
11521 if (Mask[i] >= 0) {
11522 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
11523 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
11524 if (PostDupI16Shuffle[i / 2] < 0)
11525 PostDupI16Shuffle[i / 2] = MappedMask;
11527 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
11528 "Conflicting entries in the original shuffle!");
11530 return DAG.getBitcast(
11532 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
11533 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
11535 if (SDValue V = tryToWidenViaDuplication())
11539 if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
11543 // Use dedicated unpack instructions for masks that match their pattern.
11545 lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
11548 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
11549 // with PSHUFB. It is important to do this before we attempt to generate any
11550 // blends but after all of the single-input lowerings. If the single input
11551 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
11552 // want to preserve that and we can DAG combine any longer sequences into
11553 // a PSHUFB in the end. But once we start blending from multiple inputs,
11554 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
11555 // and there are *very* few patterns that would actually be faster than the
11556 // PSHUFB approach because of its ability to zero lanes.
11558 // FIXME: The only exceptions to the above are blends which are exact
11559 // interleavings with direct instructions supporting them. We currently don't
11560 // handle those well here.
11561 if (Subtarget.hasSSSE3()) {
11562 bool V1InUse = false;
11563 bool V2InUse = false;
11565 SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs(
11566 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
11568 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
11569 // do so. This avoids using them to handle blends-with-zero which is
11570 // important as a single pshufb is significantly faster for that.
11571 if (V1InUse && V2InUse) {
11572 if (Subtarget.hasSSE41())
11573 if (SDValue Blend = lowerVectorShuffleAsBlend(
11574 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
11577 // We can use an unpack to do the blending rather than an or in some
11578 // cases. Even though the or may be (very minorly) more efficient, we
11579 // preference this lowering because there are common cases where part of
11580 // the complexity of the shuffles goes away when we do the final blend as
11582 // FIXME: It might be worth trying to detect if the unpack-feeding
11583 // shuffles will both be pshufb, in which case we shouldn't bother with
11585 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
11586 DL, MVT::v16i8, V1, V2, Mask, DAG))
11593 // There are special ways we can lower some single-element blends.
11594 if (NumV2Elements == 1)
11595 if (SDValue V = lowerVectorShuffleAsElementInsertion(
11596 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
11599 if (SDValue BitBlend =
11600 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
11603 // Check whether a compaction lowering can be done. This handles shuffles
11604 // which take every Nth element for some even N. See the helper function for
11607 // We special case these as they can be particularly efficiently handled with
11608 // the PACKUSB instruction on x86 and they show up in common patterns of
11609 // rearranging bytes to truncate wide elements.
11610 bool IsSingleInput = V2.isUndef();
11611 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
11612 // NumEvenDrops is the power of two stride of the elements. Another way of
11613 // thinking about it is that we need to drop the even elements this many
11614 // times to get the original input.
11616 // First we need to zero all the dropped bytes.
11617 assert(NumEvenDrops <= 3 &&
11618 "No support for dropping even elements more than 3 times.");
11619 // We use the mask type to pick which bytes are preserved based on how many
11620 // elements are dropped.
11621 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
11622 SDValue ByteClearMask = DAG.getBitcast(
11623 MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
11624 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
11625 if (!IsSingleInput)
11626 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
11628 // Now pack things back together.
11629 V1 = DAG.getBitcast(MVT::v8i16, V1);
11630 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
11631 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
11632 for (int i = 1; i < NumEvenDrops; ++i) {
11633 Result = DAG.getBitcast(MVT::v8i16, Result);
11634 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
11640 // Handle multi-input cases by blending single-input shuffles.
11641 if (NumV2Elements > 0)
11642 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
11645 // The fallback path for single-input shuffles widens this into two v8i16
11646 // vectors with unpacks, shuffles those, and then pulls them back together
11650 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
11651 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
11652 for (int i = 0; i < 16; ++i)
11654 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
11656 SDValue VLoHalf, VHiHalf;
11657 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
11658 // them out and avoid using UNPCK{L,H} to extract the elements of V as
11660 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
11661 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
11662 // Use a mask to drop the high bytes.
11663 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
11664 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
11665 DAG.getConstant(0x00FF, DL, MVT::v8i16));
11667 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
11668 VHiHalf = DAG.getUNDEF(MVT::v8i16);
11670 // Squash the masks to point directly into VLoHalf.
11671 for (int &M : LoBlendMask)
11674 for (int &M : HiBlendMask)
11678 // Otherwise just unpack the low half of V into VLoHalf and the high half into
11679 // VHiHalf so that we can blend them as i16s.
11680 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
11682 VLoHalf = DAG.getBitcast(
11683 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
11684 VHiHalf = DAG.getBitcast(
11685 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
11688 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
11689 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
11691 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
11694 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
11696 /// This routine breaks down the specific type of 128-bit shuffle and
11697 /// dispatches to the lowering routines accordingly.
11698 static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11699 MVT VT, SDValue V1, SDValue V2,
11700 const APInt &Zeroable,
11701 const X86Subtarget &Subtarget,
11702 SelectionDAG &DAG) {
11703 switch (VT.SimpleTy) {
11705 return lowerV2I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
11707 return lowerV2F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
11709 return lowerV4I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
11711 return lowerV4F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
11713 return lowerV8I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
11715 return lowerV16I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
11718 llvm_unreachable("Unimplemented!");
11722 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
11724 /// This routine just extracts two subvectors, shuffles them independently, and
11725 /// then concatenates them back together. This should work effectively with all
11726 /// AVX vector shuffle types.
11727 static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
11728 SDValue V2, ArrayRef<int> Mask,
11729 SelectionDAG &DAG) {
11730 assert(VT.getSizeInBits() >= 256 &&
11731 "Only for 256-bit or wider vector shuffles!");
11732 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
11733 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
11735 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
11736 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
11738 int NumElements = VT.getVectorNumElements();
11739 int SplitNumElements = NumElements / 2;
11740 MVT ScalarVT = VT.getVectorElementType();
11741 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
11743 // Rather than splitting build-vectors, just build two narrower build
11744 // vectors. This helps shuffling with splats and zeros.
11745 auto SplitVector = [&](SDValue V) {
11746 V = peekThroughBitcasts(V);
11748 MVT OrigVT = V.getSimpleValueType();
11749 int OrigNumElements = OrigVT.getVectorNumElements();
11750 int OrigSplitNumElements = OrigNumElements / 2;
11751 MVT OrigScalarVT = OrigVT.getVectorElementType();
11752 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
11756 auto *BV = dyn_cast<BuildVectorSDNode>(V);
11758 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
11759 DAG.getIntPtrConstant(0, DL));
11760 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
11761 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
11764 SmallVector<SDValue, 16> LoOps, HiOps;
11765 for (int i = 0; i < OrigSplitNumElements; ++i) {
11766 LoOps.push_back(BV->getOperand(i));
11767 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
11769 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
11770 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
11772 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
11773 DAG.getBitcast(SplitVT, HiV));
11776 SDValue LoV1, HiV1, LoV2, HiV2;
11777 std::tie(LoV1, HiV1) = SplitVector(V1);
11778 std::tie(LoV2, HiV2) = SplitVector(V2);
11780 // Now create two 4-way blends of these half-width vectors.
11781 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
11782 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
11783 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
11784 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
11785 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
11786 for (int i = 0; i < SplitNumElements; ++i) {
11787 int M = HalfMask[i];
11788 if (M >= NumElements) {
11789 if (M >= NumElements + SplitNumElements)
11793 V2BlendMask[i] = M - NumElements;
11794 BlendMask[i] = SplitNumElements + i;
11795 } else if (M >= 0) {
11796 if (M >= SplitNumElements)
11800 V1BlendMask[i] = M;
11805 // Because the lowering happens after all combining takes place, we need to
11806 // manually combine these blend masks as much as possible so that we create
11807 // a minimal number of high-level vector shuffle nodes.
11809 // First try just blending the halves of V1 or V2.
11810 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
11811 return DAG.getUNDEF(SplitVT);
11812 if (!UseLoV2 && !UseHiV2)
11813 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
11814 if (!UseLoV1 && !UseHiV1)
11815 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
11817 SDValue V1Blend, V2Blend;
11818 if (UseLoV1 && UseHiV1) {
11820 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
11822 // We only use half of V1 so map the usage down into the final blend mask.
11823 V1Blend = UseLoV1 ? LoV1 : HiV1;
11824 for (int i = 0; i < SplitNumElements; ++i)
11825 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
11826 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
11828 if (UseLoV2 && UseHiV2) {
11830 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
11832 // We only use half of V2 so map the usage down into the final blend mask.
11833 V2Blend = UseLoV2 ? LoV2 : HiV2;
11834 for (int i = 0; i < SplitNumElements; ++i)
11835 if (BlendMask[i] >= SplitNumElements)
11836 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
11838 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
11840 SDValue Lo = HalfBlend(LoMask);
11841 SDValue Hi = HalfBlend(HiMask);
11842 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
11845 /// \brief Either split a vector in halves or decompose the shuffles and the
11848 /// This is provided as a good fallback for many lowerings of non-single-input
11849 /// shuffles with more than one 128-bit lane. In those cases, we want to select
11850 /// between splitting the shuffle into 128-bit components and stitching those
11851 /// back together vs. extracting the single-input shuffles and blending those
11853 static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT,
11854 SDValue V1, SDValue V2,
11855 ArrayRef<int> Mask,
11856 SelectionDAG &DAG) {
11857 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
11858 "shuffles as it could then recurse on itself.");
11859 int Size = Mask.size();
11861 // If this can be modeled as a broadcast of two elements followed by a blend,
11862 // prefer that lowering. This is especially important because broadcasts can
11863 // often fold with memory operands.
11864 auto DoBothBroadcast = [&] {
11865 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
11868 if (V2BroadcastIdx < 0)
11869 V2BroadcastIdx = M - Size;
11870 else if (M - Size != V2BroadcastIdx)
11872 } else if (M >= 0) {
11873 if (V1BroadcastIdx < 0)
11874 V1BroadcastIdx = M;
11875 else if (M != V1BroadcastIdx)
11880 if (DoBothBroadcast())
11881 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
11884 // If the inputs all stem from a single 128-bit lane of each input, then we
11885 // split them rather than blending because the split will decompose to
11886 // unusually few instructions.
11887 int LaneCount = VT.getSizeInBits() / 128;
11888 int LaneSize = Size / LaneCount;
11889 SmallBitVector LaneInputs[2];
11890 LaneInputs[0].resize(LaneCount, false);
11891 LaneInputs[1].resize(LaneCount, false);
11892 for (int i = 0; i < Size; ++i)
11894 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
11895 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
11896 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11898 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
11899 // that the decomposed single-input shuffles don't end up here.
11900 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
11903 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
11904 /// a permutation and blend of those lanes.
11906 /// This essentially blends the out-of-lane inputs to each lane into the lane
11907 /// from a permuted copy of the vector. This lowering strategy results in four
11908 /// instructions in the worst case for a single-input cross lane shuffle which
11909 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
11910 /// of. Special cases for each particular shuffle pattern should be handled
11911 /// prior to trying this lowering.
11912 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT,
11913 SDValue V1, SDValue V2,
11914 ArrayRef<int> Mask,
11915 SelectionDAG &DAG) {
11916 // FIXME: This should probably be generalized for 512-bit vectors as well.
11917 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
11918 int Size = Mask.size();
11919 int LaneSize = Size / 2;
11921 // If there are only inputs from one 128-bit lane, splitting will in fact be
11922 // less expensive. The flags track whether the given lane contains an element
11923 // that crosses to another lane.
11924 bool LaneCrossing[2] = {false, false};
11925 for (int i = 0; i < Size; ++i)
11926 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
11927 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
11928 if (!LaneCrossing[0] || !LaneCrossing[1])
11929 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11931 assert(V2.isUndef() &&
11932 "This last part of this routine only works on single input shuffles");
11934 SmallVector<int, 32> FlippedBlendMask(Size);
11935 for (int i = 0; i < Size; ++i)
11936 FlippedBlendMask[i] =
11937 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
11939 : Mask[i] % LaneSize +
11940 (i / LaneSize) * LaneSize + Size);
11942 // Flip the vector, and blend the results which should now be in-lane. The
11943 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
11944 // 5 for the high source. The value 3 selects the high half of source 2 and
11945 // the value 2 selects the low half of source 2. We only use source 2 to
11946 // allow folding it into a memory operand.
11947 unsigned PERMMask = 3 | 2 << 4;
11948 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
11949 V1, DAG.getConstant(PERMMask, DL, MVT::i8));
11950 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
11953 /// \brief Handle lowering 2-lane 128-bit shuffles.
11954 static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
11955 SDValue V2, ArrayRef<int> Mask,
11956 const APInt &Zeroable,
11957 const X86Subtarget &Subtarget,
11958 SelectionDAG &DAG) {
11959 SmallVector<int, 4> WidenedMask;
11960 if (!canWidenShuffleElements(Mask, WidenedMask))
11963 // TODO: If minimizing size and one of the inputs is a zero vector and the
11964 // the zero vector has only one use, we could use a VPERM2X128 to save the
11965 // instruction bytes needed to explicitly generate the zero vector.
11967 // Blends are faster and handle all the non-lane-crossing cases.
11968 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
11969 Zeroable, Subtarget, DAG))
11972 bool IsV1Zero = ISD::isBuildVectorAllZeros(V1.getNode());
11973 bool IsV2Zero = ISD::isBuildVectorAllZeros(V2.getNode());
11975 // If either input operand is a zero vector, use VPERM2X128 because its mask
11976 // allows us to replace the zero input with an implicit zero.
11977 if (!IsV1Zero && !IsV2Zero) {
11978 // Check for patterns which can be matched with a single insert of a 128-bit
11980 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
11981 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
11982 // With AVX2 we should use VPERMQ/VPERMPD to allow memory folding.
11983 if (Subtarget.hasAVX2() && V2.isUndef())
11986 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
11987 VT.getVectorNumElements() / 2);
11988 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
11989 DAG.getIntPtrConstant(0, DL));
11990 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
11991 OnlyUsesV1 ? V1 : V2,
11992 DAG.getIntPtrConstant(0, DL));
11993 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
11997 // Otherwise form a 128-bit permutation. After accounting for undefs,
11998 // convert the 64-bit shuffle mask selection values into 128-bit
11999 // selection bits by dividing the indexes by 2 and shifting into positions
12000 // defined by a vperm2*128 instruction's immediate control byte.
12002 // The immediate permute control byte looks like this:
12003 // [1:0] - select 128 bits from sources for low half of destination
12005 // [3] - zero low half of destination
12006 // [5:4] - select 128 bits from sources for high half of destination
12008 // [7] - zero high half of destination
12010 int MaskLO = WidenedMask[0] < 0 ? 0 : WidenedMask[0];
12011 int MaskHI = WidenedMask[1] < 0 ? 0 : WidenedMask[1];
12013 unsigned PermMask = MaskLO | (MaskHI << 4);
12015 // If either input is a zero vector, replace it with an undef input.
12016 // Shuffle mask values < 4 are selecting elements of V1.
12017 // Shuffle mask values >= 4 are selecting elements of V2.
12018 // Adjust each half of the permute mask by clearing the half that was
12019 // selecting the zero vector and setting the zero mask bit.
12021 V1 = DAG.getUNDEF(VT);
12023 PermMask = (PermMask & 0xf0) | 0x08;
12025 PermMask = (PermMask & 0x0f) | 0x80;
12028 V2 = DAG.getUNDEF(VT);
12030 PermMask = (PermMask & 0xf0) | 0x08;
12032 PermMask = (PermMask & 0x0f) | 0x80;
12035 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
12036 DAG.getConstant(PermMask, DL, MVT::i8));
12039 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
12040 /// shuffling each lane.
12042 /// This will only succeed when the result of fixing the 128-bit lanes results
12043 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
12044 /// each 128-bit lanes. This handles many cases where we can quickly blend away
12045 /// the lane crosses early and then use simpler shuffles within each lane.
12047 /// FIXME: It might be worthwhile at some point to support this without
12048 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
12049 /// in x86 only floating point has interesting non-repeating shuffles, and even
12050 /// those are still *marginally* more expensive.
12051 static SDValue lowerVectorShuffleByMerging128BitLanes(
12052 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12053 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12054 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
12056 int Size = Mask.size();
12057 int LaneSize = 128 / VT.getScalarSizeInBits();
12058 int NumLanes = Size / LaneSize;
12059 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
12061 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
12062 // check whether the in-128-bit lane shuffles share a repeating pattern.
12063 SmallVector<int, 4> Lanes((unsigned)NumLanes, -1);
12064 SmallVector<int, 4> InLaneMask((unsigned)LaneSize, -1);
12065 for (int i = 0; i < Size; ++i) {
12069 int j = i / LaneSize;
12071 if (Lanes[j] < 0) {
12072 // First entry we've seen for this lane.
12073 Lanes[j] = Mask[i] / LaneSize;
12074 } else if (Lanes[j] != Mask[i] / LaneSize) {
12075 // This doesn't match the lane selected previously!
12079 // Check that within each lane we have a consistent shuffle mask.
12080 int k = i % LaneSize;
12081 if (InLaneMask[k] < 0) {
12082 InLaneMask[k] = Mask[i] % LaneSize;
12083 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
12084 // This doesn't fit a repeating in-lane mask.
12089 // First shuffle the lanes into place.
12090 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
12091 VT.getSizeInBits() / 64);
12092 SmallVector<int, 8> LaneMask((unsigned)NumLanes * 2, -1);
12093 for (int i = 0; i < NumLanes; ++i)
12094 if (Lanes[i] >= 0) {
12095 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
12096 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
12099 V1 = DAG.getBitcast(LaneVT, V1);
12100 V2 = DAG.getBitcast(LaneVT, V2);
12101 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
12103 // Cast it back to the type we actually want.
12104 LaneShuffle = DAG.getBitcast(VT, LaneShuffle);
12106 // Now do a simple shuffle that isn't lane crossing.
12107 SmallVector<int, 8> NewMask((unsigned)Size, -1);
12108 for (int i = 0; i < Size; ++i)
12110 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
12111 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
12112 "Must not introduce lane crosses at this point!");
12114 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
12117 /// Lower shuffles where an entire half of a 256-bit vector is UNDEF.
12118 /// This allows for fast cases such as subvector extraction/insertion
12119 /// or shuffling smaller vector types which can lower more efficiently.
12120 static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT,
12121 SDValue V1, SDValue V2,
12122 ArrayRef<int> Mask,
12123 const X86Subtarget &Subtarget,
12124 SelectionDAG &DAG) {
12125 assert(VT.is256BitVector() && "Expected 256-bit vector");
12127 unsigned NumElts = VT.getVectorNumElements();
12128 unsigned HalfNumElts = NumElts / 2;
12129 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
12131 bool UndefLower = isUndefInRange(Mask, 0, HalfNumElts);
12132 bool UndefUpper = isUndefInRange(Mask, HalfNumElts, HalfNumElts);
12133 if (!UndefLower && !UndefUpper)
12136 // Upper half is undef and lower half is whole upper subvector.
12137 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
12139 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
12140 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
12141 DAG.getIntPtrConstant(HalfNumElts, DL));
12142 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
12143 DAG.getIntPtrConstant(0, DL));
12146 // Lower half is undef and upper half is whole lower subvector.
12147 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
12149 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
12150 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
12151 DAG.getIntPtrConstant(0, DL));
12152 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
12153 DAG.getIntPtrConstant(HalfNumElts, DL));
12156 // If the shuffle only uses two of the four halves of the input operands,
12157 // then extract them and perform the 'half' shuffle at half width.
12158 // e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
12159 int HalfIdx1 = -1, HalfIdx2 = -1;
12160 SmallVector<int, 8> HalfMask(HalfNumElts);
12161 unsigned Offset = UndefLower ? HalfNumElts : 0;
12162 for (unsigned i = 0; i != HalfNumElts; ++i) {
12163 int M = Mask[i + Offset];
12169 // Determine which of the 4 half vectors this element is from.
12170 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
12171 int HalfIdx = M / HalfNumElts;
12173 // Determine the element index into its half vector source.
12174 int HalfElt = M % HalfNumElts;
12176 // We can shuffle with up to 2 half vectors, set the new 'half'
12177 // shuffle mask accordingly.
12178 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
12179 HalfMask[i] = HalfElt;
12180 HalfIdx1 = HalfIdx;
12183 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
12184 HalfMask[i] = HalfElt + HalfNumElts;
12185 HalfIdx2 = HalfIdx;
12189 // Too many half vectors referenced.
12192 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
12194 // Only shuffle the halves of the inputs when useful.
12195 int NumLowerHalves =
12196 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
12197 int NumUpperHalves =
12198 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
12200 // uuuuXXXX - don't extract uppers just to insert again.
12201 if (UndefLower && NumUpperHalves != 0)
12204 // XXXXuuuu - don't extract both uppers, instead shuffle and then extract.
12205 if (UndefUpper && NumUpperHalves == 2)
12208 // AVX2 - XXXXuuuu - always extract lowers.
12209 if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) {
12210 // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
12211 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12213 // AVX2 supports variable 32-bit element cross-lane shuffles.
12214 if (VT == MVT::v8f32 || VT == MVT::v8i32) {
12215 // XXXXuuuu - don't extract lowers and uppers.
12216 if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0)
12221 auto GetHalfVector = [&](int HalfIdx) {
12223 return DAG.getUNDEF(HalfVT);
12224 SDValue V = (HalfIdx < 2 ? V1 : V2);
12225 HalfIdx = (HalfIdx % 2) * HalfNumElts;
12226 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
12227 DAG.getIntPtrConstant(HalfIdx, DL));
12230 SDValue Half1 = GetHalfVector(HalfIdx1);
12231 SDValue Half2 = GetHalfVector(HalfIdx2);
12232 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
12233 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
12234 DAG.getIntPtrConstant(Offset, DL));
12237 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
12240 /// This returns true if the elements from a particular input are already in the
12241 /// slot required by the given mask and require no permutation.
12242 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
12243 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
12244 int Size = Mask.size();
12245 for (int i = 0; i < Size; ++i)
12246 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
12252 /// Handle case where shuffle sources are coming from the same 128-bit lane and
12253 /// every lane can be represented as the same repeating mask - allowing us to
12254 /// shuffle the sources with the repeating shuffle and then permute the result
12255 /// to the destination lanes.
12256 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
12257 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12258 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12259 int NumElts = VT.getVectorNumElements();
12260 int NumLanes = VT.getSizeInBits() / 128;
12261 int NumLaneElts = NumElts / NumLanes;
12263 // On AVX2 we may be able to just shuffle the lowest elements and then
12264 // broadcast the result.
12265 if (Subtarget.hasAVX2()) {
12266 for (unsigned BroadcastSize : {16, 32, 64}) {
12267 if (BroadcastSize <= VT.getScalarSizeInBits())
12269 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
12271 // Attempt to match a repeating pattern every NumBroadcastElts,
12272 // accounting for UNDEFs but only references the lowest 128-bit
12273 // lane of the inputs.
12274 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
12275 for (int i = 0; i != NumElts; i += NumBroadcastElts)
12276 for (int j = 0; j != NumBroadcastElts; ++j) {
12277 int M = Mask[i + j];
12280 int &R = RepeatMask[j];
12281 if (0 != ((M % NumElts) / NumLaneElts))
12283 if (0 <= R && R != M)
12290 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
12291 if (!FindRepeatingBroadcastMask(RepeatMask))
12294 // Shuffle the (lowest) repeated elements in place for broadcast.
12295 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
12297 // Shuffle the actual broadcast.
12298 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
12299 for (int i = 0; i != NumElts; i += NumBroadcastElts)
12300 for (int j = 0; j != NumBroadcastElts; ++j)
12301 BroadcastMask[i + j] = j;
12302 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
12307 // Bail if the shuffle mask doesn't cross 128-bit lanes.
12308 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
12311 // Bail if we already have a repeated lane shuffle mask.
12312 SmallVector<int, 8> RepeatedShuffleMask;
12313 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
12316 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
12317 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
12318 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
12319 int NumSubLanes = NumLanes * SubLaneScale;
12320 int NumSubLaneElts = NumLaneElts / SubLaneScale;
12322 // Check that all the sources are coming from the same lane and see if we can
12323 // form a repeating shuffle mask (local to each sub-lane). At the same time,
12324 // determine the source sub-lane for each destination sub-lane.
12325 int TopSrcSubLane = -1;
12326 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
12327 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
12328 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
12329 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
12331 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
12332 // Extract the sub-lane mask, check that it all comes from the same lane
12333 // and normalize the mask entries to come from the first lane.
12335 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
12336 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
12337 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
12340 int Lane = (M % NumElts) / NumLaneElts;
12341 if ((0 <= SrcLane) && (SrcLane != Lane))
12344 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
12345 SubLaneMask[Elt] = LocalM;
12348 // Whole sub-lane is UNDEF.
12352 // Attempt to match against the candidate repeated sub-lane masks.
12353 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
12354 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
12355 for (int i = 0; i != NumSubLaneElts; ++i) {
12356 if (M1[i] < 0 || M2[i] < 0)
12358 if (M1[i] != M2[i])
12364 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
12365 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
12368 // Merge the sub-lane mask into the matching repeated sub-lane mask.
12369 for (int i = 0; i != NumSubLaneElts; ++i) {
12370 int M = SubLaneMask[i];
12373 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
12374 "Unexpected mask element");
12375 RepeatedSubLaneMask[i] = M;
12378 // Track the top most source sub-lane - by setting the remaining to UNDEF
12379 // we can greatly simplify shuffle matching.
12380 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
12381 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
12382 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
12386 // Bail if we failed to find a matching repeated sub-lane mask.
12387 if (Dst2SrcSubLanes[DstSubLane] < 0)
12390 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
12391 "Unexpected source lane");
12393 // Create a repeating shuffle mask for the entire vector.
12394 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
12395 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
12396 int Lane = SubLane / SubLaneScale;
12397 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
12398 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
12399 int M = RepeatedSubLaneMask[Elt];
12402 int Idx = (SubLane * NumSubLaneElts) + Elt;
12403 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
12406 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
12408 // Shuffle each source sub-lane to its destination.
12409 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
12410 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
12411 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
12412 if (SrcSubLane < 0)
12414 for (int j = 0; j != NumSubLaneElts; ++j)
12415 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
12418 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
12422 static bool matchVectorShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
12423 unsigned &ShuffleImm,
12424 ArrayRef<int> Mask) {
12425 int NumElts = VT.getVectorNumElements();
12426 assert(VT.getScalarSizeInBits() == 64 &&
12427 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
12428 "Unexpected data type for VSHUFPD");
12430 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
12431 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
12433 bool ShufpdMask = true;
12434 bool CommutableMask = true;
12435 for (int i = 0; i < NumElts; ++i) {
12436 if (Mask[i] == SM_SentinelUndef)
12440 int Val = (i & 6) + NumElts * (i & 1);
12441 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
12442 if (Mask[i] < Val || Mask[i] > Val + 1)
12443 ShufpdMask = false;
12444 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
12445 CommutableMask = false;
12446 ShuffleImm |= (Mask[i] % 2) << i;
12451 if (CommutableMask) {
12459 static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
12460 ArrayRef<int> Mask, SDValue V1,
12461 SDValue V2, SelectionDAG &DAG) {
12462 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&&
12463 "Unexpected data type for VSHUFPD");
12465 unsigned Immediate = 0;
12466 if (!matchVectorShuffleWithSHUFPD(VT, V1, V2, Immediate, Mask))
12469 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
12470 DAG.getConstant(Immediate, DL, MVT::i8));
12473 static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT,
12474 ArrayRef<int> Mask, SDValue V1,
12475 SDValue V2, SelectionDAG &DAG) {
12476 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
12477 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
12479 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
12481 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
12483 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
12486 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
12488 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
12489 /// isn't available.
12490 static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12491 const APInt &Zeroable,
12492 SDValue V1, SDValue V2,
12493 const X86Subtarget &Subtarget,
12494 SelectionDAG &DAG) {
12495 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
12496 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
12497 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12499 if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask,
12500 Zeroable, Subtarget, DAG))
12503 if (V2.isUndef()) {
12504 // Check for being able to broadcast a single element.
12505 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
12506 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
12509 // Use low duplicate instructions for masks that match their pattern.
12510 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
12511 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
12513 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
12514 // Non-half-crossing single input shuffles can be lowered with an
12515 // interleaved permutation.
12516 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
12517 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
12518 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
12519 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
12522 // With AVX2 we have direct support for this permutation.
12523 if (Subtarget.hasAVX2())
12524 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
12525 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12527 // Try to create an in-lane repeating shuffle mask and then shuffle the
12528 // the results into the target lanes.
12529 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
12530 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
12533 // Otherwise, fall back.
12534 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
12538 // Use dedicated unpack instructions for masks that match their pattern.
12540 lowerVectorShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
12543 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
12544 Zeroable, Subtarget, DAG))
12547 // Check if the blend happens to exactly fit that of SHUFPD.
12549 lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
12552 // Try to create an in-lane repeating shuffle mask and then shuffle the
12553 // the results into the target lanes.
12554 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
12555 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
12558 // Try to simplify this by merging 128-bit lanes to enable a lane-based
12559 // shuffle. However, if we have AVX2 and either inputs are already in place,
12560 // we will be able to shuffle even across lanes the other input in a single
12561 // instruction so skip this pattern.
12562 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
12563 isShuffleMaskInputInPlace(1, Mask))))
12564 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
12565 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
12567 // If we have VLX support, we can use VEXPAND.
12568 if (Subtarget.hasVLX())
12569 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask,
12570 V1, V2, DAG, Subtarget))
12573 // If we have AVX2 then we always want to lower with a blend because an v4 we
12574 // can fully permute the elements.
12575 if (Subtarget.hasAVX2())
12576 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
12579 // Otherwise fall back on generic lowering.
12580 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
12583 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
12585 /// This routine is only called when we have AVX2 and thus a reasonable
12586 /// instruction set for v4i64 shuffling..
12587 static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12588 const APInt &Zeroable,
12589 SDValue V1, SDValue V2,
12590 const X86Subtarget &Subtarget,
12591 SelectionDAG &DAG) {
12592 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
12593 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
12594 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12595 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
12597 if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask,
12598 Zeroable, Subtarget, DAG))
12601 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
12602 Zeroable, Subtarget, DAG))
12605 // Check for being able to broadcast a single element.
12606 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2,
12607 Mask, Subtarget, DAG))
12610 if (V2.isUndef()) {
12611 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
12612 // can use lower latency instructions that will operate on both lanes.
12613 SmallVector<int, 2> RepeatedMask;
12614 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
12615 SmallVector<int, 4> PSHUFDMask;
12616 scaleShuffleMask(2, RepeatedMask, PSHUFDMask);
12617 return DAG.getBitcast(
12619 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
12620 DAG.getBitcast(MVT::v8i32, V1),
12621 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12624 // AVX2 provides a direct instruction for permuting a single input across
12626 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
12627 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12630 // Try to use shift instructions.
12631 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
12632 Zeroable, Subtarget, DAG))
12635 // If we have VLX support, we can use VALIGN or VEXPAND.
12636 if (Subtarget.hasVLX()) {
12637 if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i64, V1, V2,
12638 Mask, Subtarget, DAG))
12641 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask,
12642 V1, V2, DAG, Subtarget))
12646 // Try to use PALIGNR.
12647 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v4i64, V1, V2,
12648 Mask, Subtarget, DAG))
12651 // Use dedicated unpack instructions for masks that match their pattern.
12653 lowerVectorShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
12656 // Try to simplify this by merging 128-bit lanes to enable a lane-based
12657 // shuffle. However, if we have AVX2 and either inputs are already in place,
12658 // we will be able to shuffle even across lanes the other input in a single
12659 // instruction so skip this pattern.
12660 if (!isShuffleMaskInputInPlace(0, Mask) &&
12661 !isShuffleMaskInputInPlace(1, Mask))
12662 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
12663 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
12666 // Otherwise fall back on generic blend lowering.
12667 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
12671 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
12673 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
12674 /// isn't available.
12675 static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12676 const APInt &Zeroable,
12677 SDValue V1, SDValue V2,
12678 const X86Subtarget &Subtarget,
12679 SelectionDAG &DAG) {
12680 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
12681 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
12682 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
12684 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
12685 Zeroable, Subtarget, DAG))
12688 // Check for being able to broadcast a single element.
12689 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2,
12690 Mask, Subtarget, DAG))
12693 // If the shuffle mask is repeated in each 128-bit lane, we have many more
12694 // options to efficiently lower the shuffle.
12695 SmallVector<int, 4> RepeatedMask;
12696 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
12697 assert(RepeatedMask.size() == 4 &&
12698 "Repeated masks must be half the mask width!");
12700 // Use even/odd duplicate instructions for masks that match their pattern.
12701 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
12702 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
12703 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
12704 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
12707 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
12708 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
12710 // Use dedicated unpack instructions for masks that match their pattern.
12712 lowerVectorShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
12715 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
12716 // have already handled any direct blends.
12717 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
12720 // Try to create an in-lane repeating shuffle mask and then shuffle the
12721 // the results into the target lanes.
12722 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
12723 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
12726 // If we have a single input shuffle with different shuffle patterns in the
12727 // two 128-bit lanes use the variable mask to VPERMILPS.
12728 if (V2.isUndef()) {
12729 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
12730 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
12731 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
12733 if (Subtarget.hasAVX2())
12734 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
12736 // Otherwise, fall back.
12737 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
12741 // Try to simplify this by merging 128-bit lanes to enable a lane-based
12743 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
12744 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
12746 // If we have VLX support, we can use VEXPAND.
12747 if (Subtarget.hasVLX())
12748 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask,
12749 V1, V2, DAG, Subtarget))
12752 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
12753 // since after split we get a more efficient code using vpunpcklwd and
12754 // vpunpckhwd instrs than vblend.
12755 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
12756 if (SDValue V = lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2,
12760 // If we have AVX2 then we always want to lower with a blend because at v8 we
12761 // can fully permute the elements.
12762 if (Subtarget.hasAVX2())
12763 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
12766 // Otherwise fall back on generic lowering.
12767 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
12770 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
12772 /// This routine is only called when we have AVX2 and thus a reasonable
12773 /// instruction set for v8i32 shuffling..
12774 static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12775 const APInt &Zeroable,
12776 SDValue V1, SDValue V2,
12777 const X86Subtarget &Subtarget,
12778 SelectionDAG &DAG) {
12779 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
12780 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
12781 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
12782 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
12784 // Whenever we can lower this as a zext, that instruction is strictly faster
12785 // than any alternative. It also allows us to fold memory operands into the
12786 // shuffle in many cases.
12787 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
12788 DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
12791 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
12792 // since after split we get a more efficient code than vblend by using
12793 // vpunpcklwd and vpunpckhwd instrs.
12794 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
12795 !Subtarget.hasAVX512())
12797 lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, DAG))
12800 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
12801 Zeroable, Subtarget, DAG))
12804 // Check for being able to broadcast a single element.
12805 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2,
12806 Mask, Subtarget, DAG))
12809 // If the shuffle mask is repeated in each 128-bit lane we can use more
12810 // efficient instructions that mirror the shuffles across the two 128-bit
12812 SmallVector<int, 4> RepeatedMask;
12813 bool Is128BitLaneRepeatedShuffle =
12814 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
12815 if (Is128BitLaneRepeatedShuffle) {
12816 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
12818 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
12819 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
12821 // Use dedicated unpack instructions for masks that match their pattern.
12823 lowerVectorShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
12827 // Try to use shift instructions.
12828 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
12829 Zeroable, Subtarget, DAG))
12832 // If we have VLX support, we can use VALIGN or EXPAND.
12833 if (Subtarget.hasVLX()) {
12834 if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i32, V1, V2,
12835 Mask, Subtarget, DAG))
12838 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask,
12839 V1, V2, DAG, Subtarget))
12843 // Try to use byte rotation instructions.
12844 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
12845 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
12848 // Try to create an in-lane repeating shuffle mask and then shuffle the
12849 // results into the target lanes.
12850 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
12851 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
12854 // If the shuffle patterns aren't repeated but it is a single input, directly
12855 // generate a cross-lane VPERMD instruction.
12856 if (V2.isUndef()) {
12857 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
12858 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
12861 // Assume that a single SHUFPS is faster than an alternative sequence of
12862 // multiple instructions (even if the CPU has a domain penalty).
12863 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
12864 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
12865 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
12866 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
12867 SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
12868 CastV1, CastV2, DAG);
12869 return DAG.getBitcast(MVT::v8i32, ShufPS);
12872 // Try to simplify this by merging 128-bit lanes to enable a lane-based
12874 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
12875 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
12878 // Otherwise fall back on generic blend lowering.
12879 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
12883 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
12885 /// This routine is only called when we have AVX2 and thus a reasonable
12886 /// instruction set for v16i16 shuffling..
12887 static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12888 const APInt &Zeroable,
12889 SDValue V1, SDValue V2,
12890 const X86Subtarget &Subtarget,
12891 SelectionDAG &DAG) {
12892 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
12893 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
12894 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
12895 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
12897 // Whenever we can lower this as a zext, that instruction is strictly faster
12898 // than any alternative. It also allows us to fold memory operands into the
12899 // shuffle in many cases.
12900 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
12901 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
12904 // Check for being able to broadcast a single element.
12905 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2,
12906 Mask, Subtarget, DAG))
12909 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
12910 Zeroable, Subtarget, DAG))
12913 // Use dedicated unpack instructions for masks that match their pattern.
12915 lowerVectorShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
12918 // Try to use shift instructions.
12919 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
12920 Zeroable, Subtarget, DAG))
12923 // Try to use byte rotation instructions.
12924 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
12925 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
12928 // Try to create an in-lane repeating shuffle mask and then shuffle the
12929 // the results into the target lanes.
12930 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
12931 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
12934 if (V2.isUndef()) {
12935 // There are no generalized cross-lane shuffle operations available on i16
12937 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
12938 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
12941 SmallVector<int, 8> RepeatedMask;
12942 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
12943 // As this is a single-input shuffle, the repeated mask should be
12944 // a strictly valid v8i16 mask that we can pass through to the v8i16
12945 // lowering to handle even the v16 case.
12946 return lowerV8I16GeneralSingleInputVectorShuffle(
12947 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
12951 if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
12952 DL, MVT::v16i16, Mask, V1, V2, Zeroable, Subtarget, DAG))
12955 // AVX512BWVL can lower to VPERMW.
12956 if (Subtarget.hasBWI() && Subtarget.hasVLX())
12957 return lowerVectorShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
12959 // Try to simplify this by merging 128-bit lanes to enable a lane-based
12961 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
12962 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
12965 // Otherwise fall back on generic lowering.
12966 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
12969 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
12971 /// This routine is only called when we have AVX2 and thus a reasonable
12972 /// instruction set for v32i8 shuffling..
12973 static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12974 const APInt &Zeroable,
12975 SDValue V1, SDValue V2,
12976 const X86Subtarget &Subtarget,
12977 SelectionDAG &DAG) {
12978 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
12979 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
12980 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
12981 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
12983 // Whenever we can lower this as a zext, that instruction is strictly faster
12984 // than any alternative. It also allows us to fold memory operands into the
12985 // shuffle in many cases.
12986 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
12987 DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
12990 // Check for being able to broadcast a single element.
12991 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2,
12992 Mask, Subtarget, DAG))
12995 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
12996 Zeroable, Subtarget, DAG))
12999 // Use dedicated unpack instructions for masks that match their pattern.
13001 lowerVectorShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
13004 // Try to use shift instructions.
13005 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
13006 Zeroable, Subtarget, DAG))
13009 // Try to use byte rotation instructions.
13010 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
13011 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
13014 // Try to create an in-lane repeating shuffle mask and then shuffle the
13015 // the results into the target lanes.
13016 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
13017 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
13020 // There are no generalized cross-lane shuffle operations available on i8
13022 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
13023 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask,
13026 if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
13027 DL, MVT::v32i8, Mask, V1, V2, Zeroable, Subtarget, DAG))
13030 // Try to simplify this by merging 128-bit lanes to enable a lane-based
13032 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
13033 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
13036 // Otherwise fall back on generic lowering.
13037 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
13040 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
13042 /// This routine either breaks down the specific type of a 256-bit x86 vector
13043 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
13044 /// together based on the available instructions.
13045 static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13046 MVT VT, SDValue V1, SDValue V2,
13047 const APInt &Zeroable,
13048 const X86Subtarget &Subtarget,
13049 SelectionDAG &DAG) {
13050 // If we have a single input to the zero element, insert that into V1 if we
13051 // can do so cheaply.
13052 int NumElts = VT.getVectorNumElements();
13053 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
13055 if (NumV2Elements == 1 && Mask[0] >= NumElts)
13056 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
13057 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
13060 // Handle special cases where the lower or upper half is UNDEF.
13062 lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
13065 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
13066 // can check for those subtargets here and avoid much of the subtarget
13067 // querying in the per-vector-type lowering routines. With AVX1 we have
13068 // essentially *zero* ability to manipulate a 256-bit vector with integer
13069 // types. Since we'll use floating point types there eventually, just
13070 // immediately cast everything to a float and operate entirely in that domain.
13071 if (VT.isInteger() && !Subtarget.hasAVX2()) {
13072 int ElementBits = VT.getScalarSizeInBits();
13073 if (ElementBits < 32) {
13074 // No floating point type available, if we can't use the bit operations
13075 // for masking/blending then decompose into 128-bit vectors.
13077 lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG))
13079 if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
13081 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
13084 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
13085 VT.getVectorNumElements());
13086 V1 = DAG.getBitcast(FpVT, V1);
13087 V2 = DAG.getBitcast(FpVT, V2);
13088 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
13091 switch (VT.SimpleTy) {
13093 return lowerV4F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13095 return lowerV4I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13097 return lowerV8F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13099 return lowerV8I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13101 return lowerV16I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13103 return lowerV32I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13106 llvm_unreachable("Not a valid 256-bit x86 vector type!");
13110 /// \brief Try to lower a vector shuffle as a 128-bit shuffles.
13111 static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
13112 ArrayRef<int> Mask, SDValue V1,
13113 SDValue V2, SelectionDAG &DAG) {
13114 assert(VT.getScalarSizeInBits() == 64 &&
13115 "Unexpected element type size for 128bit shuffle.");
13117 // To handle 256 bit vector requires VLX and most probably
13118 // function lowerV2X128VectorShuffle() is better solution.
13119 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
13121 SmallVector<int, 4> WidenedMask;
13122 if (!canWidenShuffleElements(Mask, WidenedMask))
13125 // Check for patterns which can be matched with a single insert of a 256-bit
13127 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
13128 {0, 1, 2, 3, 0, 1, 2, 3});
13129 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
13130 {0, 1, 2, 3, 8, 9, 10, 11})) {
13131 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
13132 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
13133 DAG.getIntPtrConstant(0, DL));
13134 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
13135 OnlyUsesV1 ? V1 : V2,
13136 DAG.getIntPtrConstant(0, DL));
13137 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
13140 assert(WidenedMask.size() == 4);
13142 // See if this is an insertion of the lower 128-bits of V2 into V1.
13143 bool IsInsert = true;
13145 for (int i = 0; i < 4; ++i) {
13146 assert(WidenedMask[i] >= -1);
13147 if (WidenedMask[i] < 0)
13150 // Make sure all V1 subvectors are in place.
13151 if (WidenedMask[i] < 4) {
13152 if (WidenedMask[i] != i) {
13157 // Make sure we only have a single V2 index and its the lowest 128-bits.
13158 if (V2Index >= 0 || WidenedMask[i] != 4) {
13165 if (IsInsert && V2Index >= 0) {
13166 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
13167 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
13168 DAG.getIntPtrConstant(0, DL));
13169 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
13172 // Try to lower to to vshuf64x2/vshuf32x4.
13173 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
13174 unsigned PermMask = 0;
13175 // Insure elements came from the same Op.
13176 for (int i = 0; i < 4; ++i) {
13177 assert(WidenedMask[i] >= -1);
13178 if (WidenedMask[i] < 0)
13181 SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
13182 unsigned OpIndex = i / 2;
13183 if (Ops[OpIndex].isUndef())
13185 else if (Ops[OpIndex] != Op)
13188 // Convert the 128-bit shuffle mask selection values into 128-bit selection
13189 // bits defined by a vshuf64x2 instruction's immediate control byte.
13190 PermMask |= (WidenedMask[i] % 4) << (i * 2);
13193 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
13194 DAG.getConstant(PermMask, DL, MVT::i8));
13197 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
13198 static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13199 const APInt &Zeroable,
13200 SDValue V1, SDValue V2,
13201 const X86Subtarget &Subtarget,
13202 SelectionDAG &DAG) {
13203 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
13204 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
13205 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13207 if (V2.isUndef()) {
13208 // Use low duplicate instructions for masks that match their pattern.
13209 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
13210 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
13212 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
13213 // Non-half-crossing single input shuffles can be lowered with an
13214 // interleaved permutation.
13215 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
13216 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
13217 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
13218 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
13219 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
13220 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
13223 SmallVector<int, 4> RepeatedMask;
13224 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
13225 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
13226 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
13229 if (SDValue Shuf128 =
13230 lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, V1, V2, DAG))
13233 if (SDValue Unpck =
13234 lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
13237 // Check if the blend happens to exactly fit that of SHUFPD.
13239 lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
13242 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1,
13243 V2, DAG, Subtarget))
13246 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
13247 Zeroable, Subtarget, DAG))
13250 return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
13253 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
13254 static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13255 const APInt &Zeroable,
13256 SDValue V1, SDValue V2,
13257 const X86Subtarget &Subtarget,
13258 SelectionDAG &DAG) {
13259 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
13260 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
13261 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
13263 // If the shuffle mask is repeated in each 128-bit lane, we have many more
13264 // options to efficiently lower the shuffle.
13265 SmallVector<int, 4> RepeatedMask;
13266 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
13267 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
13269 // Use even/odd duplicate instructions for masks that match their pattern.
13270 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
13271 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
13272 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
13273 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
13276 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
13277 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
13279 // Use dedicated unpack instructions for masks that match their pattern.
13280 if (SDValue Unpck =
13281 lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
13284 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
13285 Zeroable, Subtarget, DAG))
13288 // Otherwise, fall back to a SHUFPS sequence.
13289 return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
13291 // If we have AVX512F support, we can use VEXPAND.
13292 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
13293 V1, V2, DAG, Subtarget))
13296 return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
13299 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
13300 static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13301 const APInt &Zeroable,
13302 SDValue V1, SDValue V2,
13303 const X86Subtarget &Subtarget,
13304 SelectionDAG &DAG) {
13305 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
13306 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
13307 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13309 if (SDValue Shuf128 =
13310 lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, V1, V2, DAG))
13313 if (V2.isUndef()) {
13314 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
13315 // can use lower latency instructions that will operate on all four
13317 SmallVector<int, 2> Repeated128Mask;
13318 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
13319 SmallVector<int, 4> PSHUFDMask;
13320 scaleShuffleMask(2, Repeated128Mask, PSHUFDMask);
13321 return DAG.getBitcast(
13323 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
13324 DAG.getBitcast(MVT::v16i32, V1),
13325 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13328 SmallVector<int, 4> Repeated256Mask;
13329 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
13330 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
13331 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
13334 // Try to use shift instructions.
13335 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
13336 Zeroable, Subtarget, DAG))
13339 // Try to use VALIGN.
13340 if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i64, V1, V2,
13341 Mask, Subtarget, DAG))
13344 // Try to use PALIGNR.
13345 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i64, V1, V2,
13346 Mask, Subtarget, DAG))
13349 if (SDValue Unpck =
13350 lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
13352 // If we have AVX512F support, we can use VEXPAND.
13353 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1,
13354 V2, DAG, Subtarget))
13357 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
13358 Zeroable, Subtarget, DAG))
13361 return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
13364 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
13365 static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13366 const APInt &Zeroable,
13367 SDValue V1, SDValue V2,
13368 const X86Subtarget &Subtarget,
13369 SelectionDAG &DAG) {
13370 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
13371 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
13372 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
13374 // Whenever we can lower this as a zext, that instruction is strictly faster
13375 // than any alternative. It also allows us to fold memory operands into the
13376 // shuffle in many cases.
13377 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
13378 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13381 // If the shuffle mask is repeated in each 128-bit lane we can use more
13382 // efficient instructions that mirror the shuffles across the four 128-bit
13384 SmallVector<int, 4> RepeatedMask;
13385 bool Is128BitLaneRepeatedShuffle =
13386 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
13387 if (Is128BitLaneRepeatedShuffle) {
13388 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
13390 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
13391 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
13393 // Use dedicated unpack instructions for masks that match their pattern.
13395 lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
13399 // Try to use shift instructions.
13400 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
13401 Zeroable, Subtarget, DAG))
13404 // Try to use VALIGN.
13405 if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v16i32, V1, V2,
13406 Mask, Subtarget, DAG))
13409 // Try to use byte rotation instructions.
13410 if (Subtarget.hasBWI())
13411 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
13412 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
13415 // Assume that a single SHUFPS is faster than using a permv shuffle.
13416 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13417 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
13418 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
13419 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
13420 SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
13421 CastV1, CastV2, DAG);
13422 return DAG.getBitcast(MVT::v16i32, ShufPS);
13424 // If we have AVX512F support, we can use VEXPAND.
13425 if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask,
13426 V1, V2, DAG, Subtarget))
13429 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
13430 Zeroable, Subtarget, DAG))
13432 return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
13435 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
13436 static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13437 const APInt &Zeroable,
13438 SDValue V1, SDValue V2,
13439 const X86Subtarget &Subtarget,
13440 SelectionDAG &DAG) {
13441 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
13442 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
13443 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
13444 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
13446 // Whenever we can lower this as a zext, that instruction is strictly faster
13447 // than any alternative. It also allows us to fold memory operands into the
13448 // shuffle in many cases.
13449 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
13450 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13453 // Use dedicated unpack instructions for masks that match their pattern.
13455 lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
13458 // Try to use shift instructions.
13459 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
13460 Zeroable, Subtarget, DAG))
13463 // Try to use byte rotation instructions.
13464 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
13465 DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
13468 if (V2.isUndef()) {
13469 SmallVector<int, 8> RepeatedMask;
13470 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
13471 // As this is a single-input shuffle, the repeated mask should be
13472 // a strictly valid v8i16 mask that we can pass through to the v8i16
13473 // lowering to handle even the v32 case.
13474 return lowerV8I16GeneralSingleInputVectorShuffle(
13475 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
13479 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
13480 Zeroable, Subtarget, DAG))
13483 return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
13486 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
13487 static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13488 const APInt &Zeroable,
13489 SDValue V1, SDValue V2,
13490 const X86Subtarget &Subtarget,
13491 SelectionDAG &DAG) {
13492 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
13493 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
13494 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
13495 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
13497 // Whenever we can lower this as a zext, that instruction is strictly faster
13498 // than any alternative. It also allows us to fold memory operands into the
13499 // shuffle in many cases.
13500 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
13501 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
13504 // Use dedicated unpack instructions for masks that match their pattern.
13506 lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
13509 // Try to use shift instructions.
13510 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
13511 Zeroable, Subtarget, DAG))
13514 // Try to use byte rotation instructions.
13515 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
13516 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
13519 if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
13520 DL, MVT::v64i8, Mask, V1, V2, Zeroable, Subtarget, DAG))
13523 // VBMI can use VPERMV/VPERMV3 byte shuffles.
13524 if (Subtarget.hasVBMI())
13525 return lowerVectorShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
13527 // Try to create an in-lane repeating shuffle mask and then shuffle the
13528 // the results into the target lanes.
13529 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
13530 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
13533 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
13534 Zeroable, Subtarget, DAG))
13537 // FIXME: Implement direct support for this type!
13538 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
13541 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
13543 /// This routine either breaks down the specific type of a 512-bit x86 vector
13544 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
13545 /// together based on the available instructions.
13546 static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13547 MVT VT, SDValue V1, SDValue V2,
13548 const APInt &Zeroable,
13549 const X86Subtarget &Subtarget,
13550 SelectionDAG &DAG) {
13551 assert(Subtarget.hasAVX512() &&
13552 "Cannot lower 512-bit vectors w/ basic ISA!");
13554 // If we have a single input to the zero element, insert that into V1 if we
13555 // can do so cheaply.
13556 int NumElts = Mask.size();
13557 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
13559 if (NumV2Elements == 1 && Mask[0] >= NumElts)
13560 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
13561 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
13564 // Check for being able to broadcast a single element.
13565 if (SDValue Broadcast =
13566 lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
13569 // Dispatch to each element type for lowering. If we don't have support for
13570 // specific element type shuffles at 512 bits, immediately split them and
13571 // lower them. Each lowering routine of a given type is allowed to assume that
13572 // the requisite ISA extensions for that element type are available.
13573 switch (VT.SimpleTy) {
13575 return lowerV8F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13577 return lowerV16F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13579 return lowerV8I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13581 return lowerV16I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13583 return lowerV32I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13585 return lowerV64I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
13588 llvm_unreachable("Not a valid 512-bit x86 vector type!");
13592 // Lower vXi1 vector shuffles.
13593 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
13594 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
13595 // vector, shuffle and then truncate it back.
13596 static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
13597 MVT VT, SDValue V1, SDValue V2,
13598 const X86Subtarget &Subtarget,
13599 SelectionDAG &DAG) {
13600 assert(Subtarget.hasAVX512() &&
13601 "Cannot lower 512-bit vectors w/o basic ISA!");
13603 switch (VT.SimpleTy) {
13605 llvm_unreachable("Expected a vector of i1 elements");
13607 ExtVT = MVT::v2i64;
13610 ExtVT = MVT::v4i32;
13613 ExtVT = MVT::v8i64; // Take 512-bit type, more shuffles on KNL
13616 ExtVT = MVT::v16i32;
13619 ExtVT = MVT::v32i16;
13622 ExtVT = MVT::v64i8;
13626 if (ISD::isBuildVectorAllZeros(V1.getNode()))
13627 V1 = getZeroVector(ExtVT, Subtarget, DAG, DL);
13628 else if (ISD::isBuildVectorAllOnes(V1.getNode()))
13629 V1 = getOnesVector(ExtVT, DAG, DL);
13631 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
13634 V2 = DAG.getUNDEF(ExtVT);
13635 else if (ISD::isBuildVectorAllZeros(V2.getNode()))
13636 V2 = getZeroVector(ExtVT, Subtarget, DAG, DL);
13637 else if (ISD::isBuildVectorAllOnes(V2.getNode()))
13638 V2 = getOnesVector(ExtVT, DAG, DL);
13640 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
13642 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
13643 // i1 was sign extended we can use X86ISD::CVT2MASK.
13644 int NumElems = VT.getVectorNumElements();
13645 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
13646 (Subtarget.hasDQI() && (NumElems < 32)))
13647 return DAG.getNode(X86ISD::CVT2MASK, DL, VT, Shuffle);
13649 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
13652 /// Helper function that returns true if the shuffle mask should be
13653 /// commuted to improve canonicalization.
13654 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
13655 int NumElements = Mask.size();
13657 int NumV1Elements = 0, NumV2Elements = 0;
13661 else if (M < NumElements)
13666 // Commute the shuffle as needed such that more elements come from V1 than
13667 // V2. This allows us to match the shuffle pattern strictly on how many
13668 // elements come from V1 without handling the symmetric cases.
13669 if (NumV2Elements > NumV1Elements)
13672 assert(NumV1Elements > 0 && "No V1 indices");
13674 if (NumV2Elements == 0)
13677 // When the number of V1 and V2 elements are the same, try to minimize the
13678 // number of uses of V2 in the low half of the vector. When that is tied,
13679 // ensure that the sum of indices for V1 is equal to or lower than the sum
13680 // indices for V2. When those are equal, try to ensure that the number of odd
13681 // indices for V1 is lower than the number of odd indices for V2.
13682 if (NumV1Elements == NumV2Elements) {
13683 int LowV1Elements = 0, LowV2Elements = 0;
13684 for (int M : Mask.slice(0, NumElements / 2))
13685 if (M >= NumElements)
13689 if (LowV2Elements > LowV1Elements)
13691 if (LowV2Elements == LowV1Elements) {
13692 int SumV1Indices = 0, SumV2Indices = 0;
13693 for (int i = 0, Size = Mask.size(); i < Size; ++i)
13694 if (Mask[i] >= NumElements)
13696 else if (Mask[i] >= 0)
13698 if (SumV2Indices < SumV1Indices)
13700 if (SumV2Indices == SumV1Indices) {
13701 int NumV1OddIndices = 0, NumV2OddIndices = 0;
13702 for (int i = 0, Size = Mask.size(); i < Size; ++i)
13703 if (Mask[i] >= NumElements)
13704 NumV2OddIndices += i % 2;
13705 else if (Mask[i] >= 0)
13706 NumV1OddIndices += i % 2;
13707 if (NumV2OddIndices < NumV1OddIndices)
13716 /// \brief Top-level lowering for x86 vector shuffles.
13718 /// This handles decomposition, canonicalization, and lowering of all x86
13719 /// vector shuffles. Most of the specific lowering strategies are encapsulated
13720 /// above in helper routines. The canonicalization attempts to widen shuffles
13721 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
13722 /// s.t. only one of the two inputs needs to be tested, etc.
13723 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
13724 SelectionDAG &DAG) {
13725 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
13726 ArrayRef<int> Mask = SVOp->getMask();
13727 SDValue V1 = Op.getOperand(0);
13728 SDValue V2 = Op.getOperand(1);
13729 MVT VT = Op.getSimpleValueType();
13730 int NumElements = VT.getVectorNumElements();
13732 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
13734 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
13735 "Can't lower MMX shuffles");
13737 bool V1IsUndef = V1.isUndef();
13738 bool V2IsUndef = V2.isUndef();
13739 if (V1IsUndef && V2IsUndef)
13740 return DAG.getUNDEF(VT);
13742 // When we create a shuffle node we put the UNDEF node to second operand,
13743 // but in some cases the first operand may be transformed to UNDEF.
13744 // In this case we should just commute the node.
13746 return DAG.getCommutedVectorShuffle(*SVOp);
13748 // Check for non-undef masks pointing at an undef vector and make the masks
13749 // undef as well. This makes it easier to match the shuffle based solely on
13753 if (M >= NumElements) {
13754 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
13755 for (int &M : NewMask)
13756 if (M >= NumElements)
13758 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
13761 // Check for illegal shuffle mask element index values.
13762 int MaskUpperLimit = Mask.size() * (V2IsUndef ? 1 : 2); (void)MaskUpperLimit;
13763 assert(llvm::all_of(Mask,
13764 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
13765 "Out of bounds shuffle index");
13767 // We actually see shuffles that are entirely re-arrangements of a set of
13768 // zero inputs. This mostly happens while decomposing complex shuffles into
13769 // simple ones. Directly lower these as a buildvector of zeros.
13770 APInt Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
13771 if (Zeroable.isAllOnesValue())
13772 return getZeroVector(VT, Subtarget, DAG, DL);
13774 // Try to collapse shuffles into using a vector type with fewer elements but
13775 // wider element types. We cap this to not form integers or floating point
13776 // elements wider than 64 bits, but it might be interesting to form i128
13777 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
13778 SmallVector<int, 16> WidenedMask;
13779 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
13780 canWidenShuffleElements(Mask, WidenedMask)) {
13781 MVT NewEltVT = VT.isFloatingPoint()
13782 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
13783 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
13784 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
13785 // Make sure that the new vector type is legal. For example, v2f64 isn't
13787 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
13788 V1 = DAG.getBitcast(NewVT, V1);
13789 V2 = DAG.getBitcast(NewVT, V2);
13790 return DAG.getBitcast(
13791 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
13795 // Commute the shuffle if it will improve canonicalization.
13796 if (canonicalizeShuffleMaskWithCommute(Mask))
13797 return DAG.getCommutedVectorShuffle(*SVOp);
13799 // For each vector width, delegate to a specialized lowering routine.
13800 if (VT.is128BitVector())
13801 return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
13804 if (VT.is256BitVector())
13805 return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
13808 if (VT.is512BitVector())
13809 return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
13813 return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
13815 llvm_unreachable("Unimplemented!");
13818 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
13819 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
13820 const X86Subtarget &Subtarget,
13821 SelectionDAG &DAG) {
13822 SDValue Cond = Op.getOperand(0);
13823 SDValue LHS = Op.getOperand(1);
13824 SDValue RHS = Op.getOperand(2);
13826 MVT VT = Op.getSimpleValueType();
13828 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13830 auto *CondBV = cast<BuildVectorSDNode>(Cond);
13832 // Only non-legal VSELECTs reach this lowering, convert those into generic
13833 // shuffles and re-use the shuffle lowering path for blends.
13834 SmallVector<int, 32> Mask;
13835 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
13836 SDValue CondElt = CondBV->getOperand(i);
13838 isa<ConstantSDNode>(CondElt) ? i + (isNullConstant(CondElt) ? Size : 0)
13841 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
13844 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13845 // A vselect where all conditions and data are constants can be optimized into
13846 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13847 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13848 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13849 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13852 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
13853 // with patterns on the mask registers on AVX-512.
13854 if (Op->getOperand(0).getValueType().getScalarSizeInBits() == 1)
13857 // Try to lower this to a blend-style vector shuffle. This can handle all
13858 // constant condition cases.
13859 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
13862 // Variable blends are only legal from SSE4.1 onward.
13863 if (!Subtarget.hasSSE41())
13867 MVT VT = Op.getSimpleValueType();
13869 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
13870 // into an i1 condition so that we can use the mask-based 512-bit blend
13872 if (VT.getSizeInBits() == 512) {
13873 SDValue Cond = Op.getOperand(0);
13874 // The vNi1 condition case should be handled above as it can be trivially
13876 assert(Cond.getValueType().getScalarSizeInBits() ==
13877 VT.getScalarSizeInBits() &&
13878 "Should have a size-matched integer condition!");
13879 // Build a mask by testing the condition against itself (tests for zero).
13880 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
13881 SDValue Mask = DAG.getNode(X86ISD::TESTM, dl, MaskVT, Cond, Cond);
13882 // Now return a new VSELECT using the mask.
13883 return DAG.getSelect(dl, VT, Mask, Op.getOperand(1), Op.getOperand(2));
13886 // Only some types will be legal on some subtargets. If we can emit a legal
13887 // VSELECT-matching blend, return Op, and but if we need to expand, return
13889 switch (VT.SimpleTy) {
13891 // Most of the vector types have blends past SSE4.1.
13895 // The byte blends for AVX vectors were introduced only in AVX2.
13896 if (Subtarget.hasAVX2())
13903 // AVX-512 BWI and VLX features support VSELECT with i16 elements.
13904 if (Subtarget.hasBWI() && Subtarget.hasVLX())
13907 // FIXME: We should custom lower this by fixing the condition and using i8
13913 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13914 MVT VT = Op.getSimpleValueType();
13917 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13920 if (VT.getSizeInBits() == 8) {
13921 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13922 Op.getOperand(0), Op.getOperand(1));
13923 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13924 DAG.getValueType(VT));
13925 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13928 if (VT == MVT::f32) {
13929 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13930 // the result back to FR32 register. It's only worth matching if the
13931 // result has a single use which is a store or a bitcast to i32. And in
13932 // the case of a store, it's not worth it if the index is a constant 0,
13933 // because a MOVSSmr can be used instead, which is smaller and faster.
13934 if (!Op.hasOneUse())
13936 SDNode *User = *Op.getNode()->use_begin();
13937 if ((User->getOpcode() != ISD::STORE ||
13938 isNullConstant(Op.getOperand(1))) &&
13939 (User->getOpcode() != ISD::BITCAST ||
13940 User->getValueType(0) != MVT::i32))
13942 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13943 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
13945 return DAG.getBitcast(MVT::f32, Extract);
13948 if (VT == MVT::i32 || VT == MVT::i64) {
13949 // ExtractPS/pextrq works with constant index.
13950 if (isa<ConstantSDNode>(Op.getOperand(1)))
13957 /// Extract one bit from mask vector, like v16i1 or v8i1.
13958 /// AVX-512 feature.
13960 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13961 SDValue Vec = Op.getOperand(0);
13963 MVT VecVT = Vec.getSimpleValueType();
13964 SDValue Idx = Op.getOperand(1);
13965 MVT EltVT = Op.getSimpleValueType();
13967 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
13968 "Unexpected vector type in ExtractBitFromMaskVector");
13970 // variable index can't be handled in mask registers,
13971 // extend vector to VR512/128
13972 if (!isa<ConstantSDNode>(Idx)) {
13973 unsigned NumElts = VecVT.getVectorNumElements();
13974 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
13975 // than extending to 128/256bit.
13976 unsigned VecSize = (NumElts <= 4 ? 128 : 512);
13977 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(VecSize/NumElts), NumElts);
13978 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVT, Vec);
13979 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13980 ExtVT.getVectorElementType(), Ext, Idx);
13981 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13984 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13985 if ((!Subtarget.hasDQI() && (VecVT.getVectorNumElements() == 8)) ||
13986 (VecVT.getVectorNumElements() < 8)) {
13987 // Use kshiftlw/rw instruction.
13988 VecVT = MVT::v16i1;
13989 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT,
13990 DAG.getUNDEF(VecVT),
13992 DAG.getIntPtrConstant(0, dl));
13994 unsigned MaxSift = VecVT.getVectorNumElements() - 1;
13995 if (MaxSift - IdxVal)
13996 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, Vec,
13997 DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
13998 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
13999 DAG.getConstant(MaxSift, dl, MVT::i8));
14000 return DAG.getNode(X86ISD::VEXTRACT, dl, Op.getSimpleValueType(), Vec,
14001 DAG.getIntPtrConstant(0, dl));
14005 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
14006 SelectionDAG &DAG) const {
14008 SDValue Vec = Op.getOperand(0);
14009 MVT VecVT = Vec.getSimpleValueType();
14010 SDValue Idx = Op.getOperand(1);
14012 if (VecVT.getVectorElementType() == MVT::i1)
14013 return ExtractBitFromMaskVector(Op, DAG);
14015 if (!isa<ConstantSDNode>(Idx)) {
14016 // Its more profitable to go through memory (1 cycles throughput)
14017 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
14018 // IACA tool was used to get performance estimation
14019 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
14021 // example : extractelement <16 x i8> %a, i32 %i
14023 // Block Throughput: 3.00 Cycles
14024 // Throughput Bottleneck: Port5
14026 // | Num Of | Ports pressure in cycles | |
14027 // | Uops | 0 - DV | 5 | 6 | 7 | |
14028 // ---------------------------------------------
14029 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
14030 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
14031 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
14032 // Total Num Of Uops: 4
14035 // Block Throughput: 1.00 Cycles
14036 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
14038 // | | Ports pressure in cycles | |
14039 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
14040 // ---------------------------------------------------------
14041 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
14042 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
14043 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
14044 // Total Num Of Uops: 4
14049 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
14051 // If this is a 256-bit vector result, first extract the 128-bit vector and
14052 // then extract the element from the 128-bit vector.
14053 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
14054 // Get the 128-bit vector.
14055 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
14056 MVT EltVT = VecVT.getVectorElementType();
14058 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
14059 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
14061 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
14062 // this can be done with a mask.
14063 IdxVal &= ElemsPerChunk - 1;
14064 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
14065 DAG.getConstant(IdxVal, dl, MVT::i32));
14068 assert(VecVT.is128BitVector() && "Unexpected vector length");
14070 MVT VT = Op.getSimpleValueType();
14072 if (VT.getSizeInBits() == 16) {
14073 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
14074 // we're going to zero extend the register or fold the store (SSE41 only).
14075 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
14076 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
14077 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
14078 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
14079 DAG.getBitcast(MVT::v4i32, Vec), Idx));
14081 // Transform it so it match pextrw which produces a 32-bit result.
14082 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
14083 Op.getOperand(0), Op.getOperand(1));
14084 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
14085 DAG.getValueType(VT));
14086 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
14089 if (Subtarget.hasSSE41())
14090 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
14093 // TODO: We only extract a single element from v16i8, we can probably afford
14094 // to be more aggressive here before using the default approach of spilling to
14096 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
14097 // Extract either the lowest i32 or any i16, and extract the sub-byte.
14098 int DWordIdx = IdxVal / 4;
14099 if (DWordIdx == 0) {
14100 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
14101 DAG.getBitcast(MVT::v4i32, Vec),
14102 DAG.getIntPtrConstant(DWordIdx, dl));
14103 int ShiftVal = (IdxVal % 4) * 8;
14105 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
14106 DAG.getConstant(ShiftVal, dl, MVT::i32));
14107 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
14110 int WordIdx = IdxVal / 2;
14111 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
14112 DAG.getBitcast(MVT::v8i16, Vec),
14113 DAG.getIntPtrConstant(WordIdx, dl));
14114 int ShiftVal = (IdxVal % 2) * 8;
14116 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
14117 DAG.getConstant(ShiftVal, dl, MVT::i16));
14118 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
14121 if (VT.getSizeInBits() == 32) {
14125 // SHUFPS the element to the lowest double word, then movss.
14126 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
14127 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
14128 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
14129 DAG.getIntPtrConstant(0, dl));
14132 if (VT.getSizeInBits() == 64) {
14133 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
14134 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
14135 // to match extract_elt for f64.
14139 // UNPCKHPD the element to the lowest double word, then movsd.
14140 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
14141 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
14142 int Mask[2] = { 1, -1 };
14143 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
14144 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
14145 DAG.getIntPtrConstant(0, dl));
14151 /// Insert one bit to mask vector, like v16i1 or v8i1.
14152 /// AVX-512 feature.
14154 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
14156 SDValue Vec = Op.getOperand(0);
14157 SDValue Elt = Op.getOperand(1);
14158 SDValue Idx = Op.getOperand(2);
14159 MVT VecVT = Vec.getSimpleValueType();
14161 if (!isa<ConstantSDNode>(Idx)) {
14162 // Non constant index. Extend source and destination,
14163 // insert element and then truncate the result.
14164 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
14165 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
14166 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
14167 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
14168 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
14169 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
14172 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
14173 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
14174 unsigned NumElems = VecVT.getVectorNumElements();
14176 if(Vec.isUndef()) {
14178 EltInVec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, EltInVec,
14179 DAG.getConstant(IdxVal, dl, MVT::i8));
14183 // Insertion of one bit into first position
14184 if (IdxVal == 0 ) {
14185 // Clean top bits of vector.
14186 EltInVec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, EltInVec,
14187 DAG.getConstant(NumElems - 1, dl, MVT::i8));
14188 EltInVec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, EltInVec,
14189 DAG.getConstant(NumElems - 1, dl, MVT::i8));
14190 // Clean the first bit in source vector.
14191 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
14192 DAG.getConstant(1 , dl, MVT::i8));
14193 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, Vec,
14194 DAG.getConstant(1, dl, MVT::i8));
14196 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
14198 // Insertion of one bit into last position
14199 if (IdxVal == NumElems -1) {
14200 // Move the bit to the last position inside the vector.
14201 EltInVec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, EltInVec,
14202 DAG.getConstant(IdxVal, dl, MVT::i8));
14203 // Clean the last bit in the source vector.
14204 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, VecVT, Vec,
14205 DAG.getConstant(1, dl, MVT::i8));
14206 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
14207 DAG.getConstant(1 , dl, MVT::i8));
14209 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
14212 // Use shuffle to insert element.
14213 SmallVector<int, 64> MaskVec(NumElems);
14214 for (unsigned i = 0; i != NumElems; ++i)
14215 MaskVec[i] = (i == IdxVal) ? NumElems : i;
14217 return DAG.getVectorShuffle(VecVT, dl, Vec, EltInVec, MaskVec);
14220 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
14221 SelectionDAG &DAG) const {
14222 MVT VT = Op.getSimpleValueType();
14223 MVT EltVT = VT.getVectorElementType();
14224 unsigned NumElts = VT.getVectorNumElements();
14226 if (EltVT == MVT::i1)
14227 return InsertBitToMaskVector(Op, DAG);
14230 SDValue N0 = Op.getOperand(0);
14231 SDValue N1 = Op.getOperand(1);
14232 SDValue N2 = Op.getOperand(2);
14233 if (!isa<ConstantSDNode>(N2))
14235 auto *N2C = cast<ConstantSDNode>(N2);
14236 unsigned IdxVal = N2C->getZExtValue();
14238 bool IsZeroElt = X86::isZeroNode(N1);
14239 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
14241 // If we are inserting a element, see if we can do this more efficiently with
14242 // a blend shuffle with a rematerializable vector than a costly integer
14244 // TODO: pre-SSE41 targets will tend to use bit masking - this could still
14245 // be beneficial if we are inserting several zeros and can combine the masks.
14246 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() && NumElts <= 8) {
14247 SmallVector<int, 8> BlendMask;
14248 for (unsigned i = 0; i != NumElts; ++i)
14249 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
14250 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
14251 : DAG.getConstant(-1, dl, VT);
14252 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
14255 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
14256 // into that, and then insert the subvector back into the result.
14257 if (VT.is256BitVector() || VT.is512BitVector()) {
14258 // With a 256-bit vector, we can insert into the zero element efficiently
14259 // using a blend if we have AVX or AVX2 and the right data type.
14260 if (VT.is256BitVector() && IdxVal == 0) {
14261 // TODO: It is worthwhile to cast integer to floating point and back
14262 // and incur a domain crossing penalty if that's what we'll end up
14263 // doing anyway after extracting to a 128-bit vector.
14264 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
14265 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
14266 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
14267 N2 = DAG.getIntPtrConstant(1, dl);
14268 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
14272 // Get the desired 128-bit vector chunk.
14273 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
14275 // Insert the element into the desired chunk.
14276 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
14277 assert(isPowerOf2_32(NumEltsIn128));
14278 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
14279 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
14281 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
14282 DAG.getConstant(IdxIn128, dl, MVT::i32));
14284 // Insert the changed part back into the bigger vector
14285 return insert128BitVector(N0, V, IdxVal, DAG, dl);
14287 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
14289 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
14290 // argument. SSE41 required for pinsrb.
14291 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
14293 if (VT == MVT::v8i16) {
14294 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
14295 Opc = X86ISD::PINSRW;
14297 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
14298 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
14299 Opc = X86ISD::PINSRB;
14302 if (N1.getValueType() != MVT::i32)
14303 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
14304 if (N2.getValueType() != MVT::i32)
14305 N2 = DAG.getIntPtrConstant(IdxVal, dl);
14306 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
14309 if (Subtarget.hasSSE41()) {
14310 if (EltVT == MVT::f32) {
14311 // Bits [7:6] of the constant are the source select. This will always be
14312 // zero here. The DAG Combiner may combine an extract_elt index into
14313 // these bits. For example (insert (extract, 3), 2) could be matched by
14314 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
14315 // Bits [5:4] of the constant are the destination select. This is the
14316 // value of the incoming immediate.
14317 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
14318 // combine either bitwise AND or insert of float 0.0 to set these bits.
14320 bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
14321 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
14322 // If this is an insertion of 32-bits into the low 32-bits of
14323 // a vector, we prefer to generate a blend with immediate rather
14324 // than an insertps. Blends are simpler operations in hardware and so
14325 // will always have equal or better performance than insertps.
14326 // But if optimizing for size and there's a load folding opportunity,
14327 // generate insertps because blendps does not have a 32-bit memory
14329 N2 = DAG.getIntPtrConstant(1, dl);
14330 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
14331 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
14333 N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
14334 // Create this as a scalar to vector..
14335 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
14336 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
14339 // PINSR* works with constant index.
14340 if (EltVT == MVT::i32 || EltVT == MVT::i64)
14347 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
14348 SelectionDAG &DAG) {
14350 MVT OpVT = Op.getSimpleValueType();
14352 // It's always cheaper to replace a xor+movd with xorps and simplifies further
14354 if (X86::isZeroNode(Op.getOperand(0)))
14355 return getZeroVector(OpVT, Subtarget, DAG, dl);
14357 // If this is a 256-bit vector result, first insert into a 128-bit
14358 // vector and then insert into the 256-bit vector.
14359 if (!OpVT.is128BitVector()) {
14360 // Insert into a 128-bit vector.
14361 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
14362 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
14363 OpVT.getVectorNumElements() / SizeFactor);
14365 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
14367 // Insert the 128-bit vector.
14368 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
14370 assert(OpVT.is128BitVector() && "Expected an SSE type!");
14372 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
14373 if (OpVT == MVT::v4i32)
14376 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
14377 return DAG.getBitcast(
14378 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
14381 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
14382 // a simple subregister reference or explicit instructions to grab
14383 // upper bits of a vector.
14384 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
14385 SelectionDAG &DAG) {
14386 assert(Subtarget.hasAVX() && "EXTRACT_SUBVECTOR requires AVX");
14389 SDValue In = Op.getOperand(0);
14390 SDValue Idx = Op.getOperand(1);
14391 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
14392 MVT ResVT = Op.getSimpleValueType();
14394 assert((In.getSimpleValueType().is256BitVector() ||
14395 In.getSimpleValueType().is512BitVector()) &&
14396 "Can only extract from 256-bit or 512-bit vectors");
14398 // If the input is a buildvector just emit a smaller one.
14399 unsigned ElemsPerChunk = ResVT.getVectorNumElements();
14400 if (In.getOpcode() == ISD::BUILD_VECTOR)
14401 return DAG.getBuildVector(
14402 ResVT, dl, makeArrayRef(In->op_begin() + IdxVal, ElemsPerChunk));
14404 // Everything else is legal.
14408 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
14409 // simple superregister reference or explicit instructions to insert
14410 // the upper bits of a vector.
14411 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
14412 SelectionDAG &DAG) {
14413 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
14415 return insert1BitVector(Op, DAG, Subtarget);
14418 // Returns the appropriate wrapper opcode for a global reference.
14419 unsigned X86TargetLowering::getGlobalWrapperKind(const GlobalValue *GV) const {
14420 // References to absolute symbols are never PC-relative.
14421 if (GV && GV->isAbsoluteSymbolRef())
14422 return X86ISD::Wrapper;
14424 CodeModel::Model M = getTargetMachine().getCodeModel();
14425 if (Subtarget.isPICStyleRIPRel() &&
14426 (M == CodeModel::Small || M == CodeModel::Kernel))
14427 return X86ISD::WrapperRIP;
14429 return X86ISD::Wrapper;
14432 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
14433 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
14434 // one of the above mentioned nodes. It has to be wrapped because otherwise
14435 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
14436 // be used to form addressing mode. These wrapped nodes will be selected
14439 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
14440 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
14442 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14443 // global base reg.
14444 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
14446 auto PtrVT = getPointerTy(DAG.getDataLayout());
14447 SDValue Result = DAG.getTargetConstantPool(
14448 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
14450 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
14451 // With PIC, the address is actually $g + Offset.
14454 DAG.getNode(ISD::ADD, DL, PtrVT,
14455 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
14461 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
14462 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
14464 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14465 // global base reg.
14466 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
14468 auto PtrVT = getPointerTy(DAG.getDataLayout());
14469 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
14471 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
14473 // With PIC, the address is actually $g + Offset.
14476 DAG.getNode(ISD::ADD, DL, PtrVT,
14477 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
14483 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
14484 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
14486 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14487 // global base reg.
14488 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
14489 unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
14491 auto PtrVT = getPointerTy(DAG.getDataLayout());
14492 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
14495 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
14497 // With PIC, the address is actually $g + Offset.
14498 if (isPositionIndependent() && !Subtarget.is64Bit()) {
14500 DAG.getNode(ISD::ADD, DL, PtrVT,
14501 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
14504 // For symbols that require a load from a stub to get the address, emit the
14506 if (isGlobalStubReference(OpFlag))
14507 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
14508 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
14514 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
14515 // Create the TargetBlockAddressAddress node.
14516 unsigned char OpFlags =
14517 Subtarget.classifyBlockAddressReference();
14518 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
14519 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
14521 auto PtrVT = getPointerTy(DAG.getDataLayout());
14522 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
14523 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
14525 // With PIC, the address is actually $g + Offset.
14526 if (isGlobalRelativeToPICBase(OpFlags)) {
14527 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
14528 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
14534 SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
14535 const SDLoc &dl, int64_t Offset,
14536 SelectionDAG &DAG) const {
14537 // Create the TargetGlobalAddress node, folding in the constant
14538 // offset if it is legal.
14539 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
14540 CodeModel::Model M = DAG.getTarget().getCodeModel();
14541 auto PtrVT = getPointerTy(DAG.getDataLayout());
14543 if (OpFlags == X86II::MO_NO_FLAG &&
14544 X86::isOffsetSuitableForCodeModel(Offset, M)) {
14545 // A direct static reference to a global.
14546 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
14549 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
14552 Result = DAG.getNode(getGlobalWrapperKind(GV), dl, PtrVT, Result);
14554 // With PIC, the address is actually $g + Offset.
14555 if (isGlobalRelativeToPICBase(OpFlags)) {
14556 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
14557 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
14560 // For globals that require a load from a stub to get the address, emit the
14562 if (isGlobalStubReference(OpFlags))
14563 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
14564 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
14566 // If there was a non-zero offset that we didn't fold, create an explicit
14567 // addition for it.
14569 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
14570 DAG.getConstant(Offset, dl, PtrVT));
14576 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
14577 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
14578 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
14579 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
14583 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
14584 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
14585 unsigned char OperandFlags, bool LocalDynamic = false) {
14586 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
14587 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14589 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14590 GA->getValueType(0),
14594 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
14598 SDValue Ops[] = { Chain, TGA, *InFlag };
14599 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
14601 SDValue Ops[] = { Chain, TGA };
14602 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
14605 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
14606 MFI.setAdjustsStack(true);
14607 MFI.setHasCalls(true);
14609 SDValue Flag = Chain.getValue(1);
14610 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
14613 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
14615 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
14618 SDLoc dl(GA); // ? function entry point might be better
14619 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
14620 DAG.getNode(X86ISD::GlobalBaseReg,
14621 SDLoc(), PtrVT), InFlag);
14622 InFlag = Chain.getValue(1);
14624 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
14627 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
14629 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
14631 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
14632 X86::RAX, X86II::MO_TLSGD);
14635 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
14641 // Get the start address of the TLS block for this module.
14642 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
14643 .getInfo<X86MachineFunctionInfo>();
14644 MFI->incNumLocalDynamicTLSAccesses();
14648 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
14649 X86II::MO_TLSLD, /*LocalDynamic=*/true);
14652 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
14653 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
14654 InFlag = Chain.getValue(1);
14655 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
14656 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
14659 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
14663 unsigned char OperandFlags = X86II::MO_DTPOFF;
14664 unsigned WrapperKind = X86ISD::Wrapper;
14665 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14666 GA->getValueType(0),
14667 GA->getOffset(), OperandFlags);
14668 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
14670 // Add x@dtpoff with the base.
14671 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
14674 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
14675 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
14676 const EVT PtrVT, TLSModel::Model model,
14677 bool is64Bit, bool isPIC) {
14680 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
14681 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
14682 is64Bit ? 257 : 256));
14684 SDValue ThreadPointer =
14685 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
14686 MachinePointerInfo(Ptr));
14688 unsigned char OperandFlags = 0;
14689 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
14691 unsigned WrapperKind = X86ISD::Wrapper;
14692 if (model == TLSModel::LocalExec) {
14693 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
14694 } else if (model == TLSModel::InitialExec) {
14696 OperandFlags = X86II::MO_GOTTPOFF;
14697 WrapperKind = X86ISD::WrapperRIP;
14699 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
14702 llvm_unreachable("Unexpected model");
14705 // emit "addl x@ntpoff,%eax" (local exec)
14706 // or "addl x@indntpoff,%eax" (initial exec)
14707 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
14709 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
14710 GA->getOffset(), OperandFlags);
14711 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
14713 if (model == TLSModel::InitialExec) {
14714 if (isPIC && !is64Bit) {
14715 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
14716 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
14720 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
14721 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
14724 // The address of the thread local variable is the add of the thread
14725 // pointer with the offset of the variable.
14726 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
14730 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
14732 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
14734 if (DAG.getTarget().Options.EmulatedTLS)
14735 return LowerToTLSEmulatedModel(GA, DAG);
14737 const GlobalValue *GV = GA->getGlobal();
14738 auto PtrVT = getPointerTy(DAG.getDataLayout());
14739 bool PositionIndependent = isPositionIndependent();
14741 if (Subtarget.isTargetELF()) {
14742 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
14744 case TLSModel::GeneralDynamic:
14745 if (Subtarget.is64Bit())
14746 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
14747 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
14748 case TLSModel::LocalDynamic:
14749 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
14750 Subtarget.is64Bit());
14751 case TLSModel::InitialExec:
14752 case TLSModel::LocalExec:
14753 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
14754 PositionIndependent);
14756 llvm_unreachable("Unknown TLS model.");
14759 if (Subtarget.isTargetDarwin()) {
14760 // Darwin only has one model of TLS. Lower to that.
14761 unsigned char OpFlag = 0;
14762 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
14763 X86ISD::WrapperRIP : X86ISD::Wrapper;
14765 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14766 // global base reg.
14767 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
14769 OpFlag = X86II::MO_TLVP_PIC_BASE;
14771 OpFlag = X86II::MO_TLVP;
14773 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
14774 GA->getValueType(0),
14775 GA->getOffset(), OpFlag);
14776 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
14778 // With PIC32, the address is actually $g + Offset.
14780 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
14781 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
14784 // Lowering the machine isd will make sure everything is in the right
14786 SDValue Chain = DAG.getEntryNode();
14787 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14788 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
14789 SDValue Args[] = { Chain, Offset };
14790 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
14791 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
14792 DAG.getIntPtrConstant(0, DL, true),
14793 Chain.getValue(1), DL);
14795 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
14796 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
14797 MFI.setAdjustsStack(true);
14799 // And our return value (tls address) is in the standard call return value
14801 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
14802 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
14805 if (Subtarget.isTargetKnownWindowsMSVC() ||
14806 Subtarget.isTargetWindowsItanium() ||
14807 Subtarget.isTargetWindowsGNU()) {
14808 // Just use the implicit TLS architecture
14809 // Need to generate something similar to:
14810 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
14812 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
14813 // mov rcx, qword [rdx+rcx*8]
14814 // mov eax, .tls$:tlsvar
14815 // [rax+rcx] contains the address
14816 // Windows 64bit: gs:0x58
14817 // Windows 32bit: fs:__tls_array
14820 SDValue Chain = DAG.getEntryNode();
14822 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14823 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14824 // use its literal value of 0x2C.
14825 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
14826 ? Type::getInt8PtrTy(*DAG.getContext(),
14828 : Type::getInt32PtrTy(*DAG.getContext(),
14831 SDValue TlsArray = Subtarget.is64Bit()
14832 ? DAG.getIntPtrConstant(0x58, dl)
14833 : (Subtarget.isTargetWindowsGNU()
14834 ? DAG.getIntPtrConstant(0x2C, dl)
14835 : DAG.getExternalSymbol("_tls_array", PtrVT));
14837 SDValue ThreadPointer =
14838 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
14841 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
14842 res = ThreadPointer;
14844 // Load the _tls_index variable
14845 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
14846 if (Subtarget.is64Bit())
14847 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
14848 MachinePointerInfo(), MVT::i32);
14850 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
14852 auto &DL = DAG.getDataLayout();
14854 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, PtrVT);
14855 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
14857 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
14860 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
14862 // Get the offset of start of .tls section
14863 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14864 GA->getValueType(0),
14865 GA->getOffset(), X86II::MO_SECREL);
14866 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
14868 // The address of the thread local variable is the add of the thread
14869 // pointer with the offset of the variable.
14870 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
14873 llvm_unreachable("TLS not implemented for this target.");
14876 /// Lower SRA_PARTS and friends, which return two i32 values
14877 /// and take a 2 x i32 value to shift plus a shift amount.
14878 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14879 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14880 MVT VT = Op.getSimpleValueType();
14881 unsigned VTBits = VT.getSizeInBits();
14883 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14884 SDValue ShOpLo = Op.getOperand(0);
14885 SDValue ShOpHi = Op.getOperand(1);
14886 SDValue ShAmt = Op.getOperand(2);
14887 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14888 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14890 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14891 DAG.getConstant(VTBits - 1, dl, MVT::i8));
14892 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14893 DAG.getConstant(VTBits - 1, dl, MVT::i8))
14894 : DAG.getConstant(0, dl, VT);
14896 SDValue Tmp2, Tmp3;
14897 if (Op.getOpcode() == ISD::SHL_PARTS) {
14898 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14899 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14901 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14902 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14905 // If the shift amount is larger or equal than the width of a part we can't
14906 // rely on the results of shld/shrd. Insert a test and select the appropriate
14907 // values for large shift amounts.
14908 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14909 DAG.getConstant(VTBits, dl, MVT::i8));
14910 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14911 AndNode, DAG.getConstant(0, dl, MVT::i8));
14914 SDValue CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
14915 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14916 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14918 if (Op.getOpcode() == ISD::SHL_PARTS) {
14919 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14920 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14922 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14923 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14926 SDValue Ops[2] = { Lo, Hi };
14927 return DAG.getMergeValues(Ops, dl);
14930 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14931 SelectionDAG &DAG) const {
14932 SDValue Src = Op.getOperand(0);
14933 MVT SrcVT = Src.getSimpleValueType();
14934 MVT VT = Op.getSimpleValueType();
14937 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14938 if (SrcVT.isVector()) {
14939 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
14940 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
14941 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
14942 DAG.getUNDEF(SrcVT)));
14944 if (SrcVT.getVectorElementType() == MVT::i1) {
14945 if (SrcVT == MVT::v2i1 && TLI.isTypeLegal(SrcVT))
14946 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14947 DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v2i64, Src));
14948 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14949 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14950 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT, Src));
14955 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14956 "Unknown SINT_TO_FP to lower!");
14958 // These are really Legal; return the operand so the caller accepts it as
14960 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14962 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14963 Subtarget.is64Bit()) {
14967 SDValue ValueToStore = Op.getOperand(0);
14968 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14969 !Subtarget.is64Bit())
14970 // Bitcasting to f64 here allows us to do a single 64-bit store from
14971 // an SSE register, avoiding the store forwarding penalty that would come
14972 // with two 32-bit stores.
14973 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
14975 unsigned Size = SrcVT.getSizeInBits()/8;
14976 MachineFunction &MF = DAG.getMachineFunction();
14977 auto PtrVT = getPointerTy(MF.getDataLayout());
14978 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
14979 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
14980 SDValue Chain = DAG.getStore(
14981 DAG.getEntryNode(), dl, ValueToStore, StackSlot,
14982 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
14983 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14986 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14988 SelectionDAG &DAG) const {
14992 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14994 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14996 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14998 unsigned ByteSize = SrcVT.getSizeInBits()/8;
15000 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
15001 MachineMemOperand *MMO;
15003 int SSFI = FI->getIndex();
15004 MMO = DAG.getMachineFunction().getMachineMemOperand(
15005 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
15006 MachineMemOperand::MOLoad, ByteSize, ByteSize);
15008 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
15009 StackSlot = StackSlot.getOperand(1);
15011 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
15012 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
15014 Tys, Ops, SrcVT, MMO);
15017 Chain = Result.getValue(1);
15018 SDValue InFlag = Result.getValue(2);
15020 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
15021 // shouldn't be necessary except that RFP cannot be live across
15022 // multiple blocks. When stackifier is fixed, they can be uncoupled.
15023 MachineFunction &MF = DAG.getMachineFunction();
15024 unsigned SSFISize = Op.getValueSizeInBits()/8;
15025 int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
15026 auto PtrVT = getPointerTy(MF.getDataLayout());
15027 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
15028 Tys = DAG.getVTList(MVT::Other);
15030 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
15032 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
15033 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
15034 MachineMemOperand::MOStore, SSFISize, SSFISize);
15036 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
15037 Ops, Op.getValueType(), MMO);
15038 Result = DAG.getLoad(
15039 Op.getValueType(), DL, Chain, StackSlot,
15040 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
15046 /// 64-bit unsigned integer to double expansion.
15047 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
15048 SelectionDAG &DAG) const {
15049 // This algorithm is not obvious. Here it is what we're trying to output:
15052 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
15053 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
15055 haddpd %xmm0, %xmm0
15057 pshufd $0x4e, %xmm0, %xmm1
15063 LLVMContext *Context = DAG.getContext();
15065 // Build some magic constants.
15066 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
15067 Constant *C0 = ConstantDataVector::get(*Context, CV0);
15068 auto PtrVT = getPointerTy(DAG.getDataLayout());
15069 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
15071 SmallVector<Constant*,2> CV1;
15073 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
15074 APInt(64, 0x4330000000000000ULL))));
15076 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
15077 APInt(64, 0x4530000000000000ULL))));
15078 Constant *C1 = ConstantVector::get(CV1);
15079 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
15081 // Load the 64-bit value into an XMM register.
15082 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
15085 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
15086 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
15087 /* Alignment = */ 16);
15089 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
15092 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
15093 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
15094 /* Alignment = */ 16);
15095 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
15096 // TODO: Are there any fast-math-flags to propagate here?
15097 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
15100 if (Subtarget.hasSSE3()) {
15101 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
15102 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
15104 SDValue S2F = DAG.getBitcast(MVT::v4i32, Sub);
15105 SDValue Shuffle = DAG.getVectorShuffle(MVT::v4i32, dl, S2F, S2F, {2,3,0,1});
15106 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
15107 DAG.getBitcast(MVT::v2f64, Shuffle), Sub);
15110 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
15111 DAG.getIntPtrConstant(0, dl));
15114 /// 32-bit unsigned integer to float expansion.
15115 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
15116 SelectionDAG &DAG) const {
15118 // FP constant to bias correct the final result.
15119 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
15122 // Load the 32-bit value into an XMM register.
15123 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
15126 // Zero out the upper parts of the register.
15127 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
15129 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
15130 DAG.getBitcast(MVT::v2f64, Load),
15131 DAG.getIntPtrConstant(0, dl));
15133 // Or the load with the bias.
15134 SDValue Or = DAG.getNode(
15135 ISD::OR, dl, MVT::v2i64,
15136 DAG.getBitcast(MVT::v2i64,
15137 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
15138 DAG.getBitcast(MVT::v2i64,
15139 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
15141 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
15142 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
15144 // Subtract the bias.
15145 // TODO: Are there any fast-math-flags to propagate here?
15146 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
15148 // Handle final rounding.
15149 MVT DestVT = Op.getSimpleValueType();
15151 if (DestVT.bitsLT(MVT::f64))
15152 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
15153 DAG.getIntPtrConstant(0, dl));
15154 if (DestVT.bitsGT(MVT::f64))
15155 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
15157 // Handle final rounding.
15161 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
15162 const X86Subtarget &Subtarget, SDLoc &DL) {
15163 if (Op.getSimpleValueType() != MVT::v2f64)
15166 SDValue N0 = Op.getOperand(0);
15167 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
15169 // Legalize to v4i32 type.
15170 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
15171 DAG.getUNDEF(MVT::v2i32));
15173 if (Subtarget.hasAVX512())
15174 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
15176 // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
15177 // but using v2i32 to v2f64 with X86ISD::CVTSI2P.
15178 SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
15179 SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
15181 // Two to the power of half-word-size.
15182 SDValue TWOHW = DAG.getConstantFP(1 << 16, DL, MVT::v2f64);
15184 // Clear upper part of LO, lower HI.
15185 SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
15186 SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
15188 SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
15189 fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
15190 SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
15192 // Add the two halves.
15193 return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
15196 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
15197 const X86Subtarget &Subtarget) {
15198 // The algorithm is the following:
15199 // #ifdef __SSE4_1__
15200 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
15201 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
15202 // (uint4) 0x53000000, 0xaa);
15204 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
15205 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
15207 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
15208 // return (float4) lo + fhi;
15210 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
15211 // reassociate the two FADDs, and if we do that, the algorithm fails
15212 // spectacularly (PR24512).
15213 // FIXME: If we ever have some kind of Machine FMF, this should be marked
15214 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
15215 // there's also the MachineCombiner reassociations happening on Machine IR.
15216 if (DAG.getTarget().Options.UnsafeFPMath)
15220 SDValue V = Op->getOperand(0);
15221 MVT VecIntVT = V.getSimpleValueType();
15222 bool Is128 = VecIntVT == MVT::v4i32;
15223 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
15224 // If we convert to something else than the supported type, e.g., to v4f64,
15226 if (VecFloatVT != Op->getSimpleValueType(0))
15229 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
15230 "Unsupported custom type");
15232 // In the #idef/#else code, we have in common:
15233 // - The vector of constants:
15239 // Create the splat vector for 0x4b000000.
15240 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
15241 // Create the splat vector for 0x53000000.
15242 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
15244 // Create the right shift.
15245 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
15246 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
15249 if (Subtarget.hasSSE41()) {
15250 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
15251 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
15252 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
15253 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
15254 // Low will be bitcasted right away, so do not bother bitcasting back to its
15256 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
15257 VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
15258 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
15259 // (uint4) 0x53000000, 0xaa);
15260 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
15261 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
15262 // High will be bitcasted right away, so do not bother bitcasting back to
15263 // its original type.
15264 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
15265 VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
15267 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
15268 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
15269 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
15270 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
15272 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
15273 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
15276 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
15277 SDValue VecCstFAdd = DAG.getConstantFP(
15278 APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
15280 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
15281 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
15282 // TODO: Are there any fast-math-flags to propagate here?
15284 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
15285 // return (float4) lo + fhi;
15286 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
15287 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
15290 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
15291 SelectionDAG &DAG) const {
15292 SDValue N0 = Op.getOperand(0);
15293 MVT SrcVT = N0.getSimpleValueType();
15296 if (SrcVT.getVectorElementType() == MVT::i1) {
15297 if (SrcVT == MVT::v2i1)
15298 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
15299 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, N0));
15300 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
15301 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
15302 DAG.getNode(ISD::ZERO_EXTEND, dl, IntegerVT, N0));
15305 switch (SrcVT.SimpleTy) {
15307 llvm_unreachable("Custom UINT_TO_FP is not supported!");
15312 MVT NVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
15313 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
15314 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
15317 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
15320 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
15323 assert(Subtarget.hasAVX512());
15324 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
15325 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, N0));
15329 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
15330 SelectionDAG &DAG) const {
15331 SDValue N0 = Op.getOperand(0);
15333 auto PtrVT = getPointerTy(DAG.getDataLayout());
15335 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
15336 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
15337 // the optimization here.
15338 if (DAG.SignBitIsZero(N0))
15339 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
15341 if (Op.getSimpleValueType().isVector())
15342 return lowerUINT_TO_FP_vec(Op, DAG);
15344 MVT SrcVT = N0.getSimpleValueType();
15345 MVT DstVT = Op.getSimpleValueType();
15347 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
15348 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
15349 // Conversions from unsigned i32 to f32/f64 are legal,
15350 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
15354 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
15355 return LowerUINT_TO_FP_i64(Op, DAG);
15356 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
15357 return LowerUINT_TO_FP_i32(Op, DAG);
15358 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
15361 // Make a 64-bit buffer, and use it to build an FILD.
15362 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
15363 if (SrcVT == MVT::i32) {
15364 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
15365 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
15366 StackSlot, MachinePointerInfo());
15367 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
15368 OffsetSlot, MachinePointerInfo());
15369 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
15373 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
15374 SDValue ValueToStore = Op.getOperand(0);
15375 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
15376 // Bitcasting to f64 here allows us to do a single 64-bit store from
15377 // an SSE register, avoiding the store forwarding penalty that would come
15378 // with two 32-bit stores.
15379 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
15380 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
15381 MachinePointerInfo());
15382 // For i64 source, we need to add the appropriate power of 2 if the input
15383 // was negative. This is the same as the optimization in
15384 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
15385 // we must be careful to do the computation in x87 extended precision, not
15386 // in SSE. (The generic code can't know it's OK to do this, or how to.)
15387 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
15388 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
15389 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
15390 MachineMemOperand::MOLoad, 8, 8);
15392 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
15393 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
15394 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
15397 APInt FF(32, 0x5F800000ULL);
15399 // Check whether the sign bit is set.
15400 SDValue SignSet = DAG.getSetCC(
15401 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
15402 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
15404 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
15405 SDValue FudgePtr = DAG.getConstantPool(
15406 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
15408 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
15409 SDValue Zero = DAG.getIntPtrConstant(0, dl);
15410 SDValue Four = DAG.getIntPtrConstant(4, dl);
15411 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
15412 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
15414 // Load the value out, extending it from f32 to f80.
15415 // FIXME: Avoid the extend by constructing the right constant pool?
15416 SDValue Fudge = DAG.getExtLoad(
15417 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
15418 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
15419 /* Alignment = */ 4);
15420 // Extend everything to 80 bits to force it to be done on x87.
15421 // TODO: Are there any fast-math-flags to propagate here?
15422 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
15423 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
15424 DAG.getIntPtrConstant(0, dl));
15427 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
15428 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
15429 // just return an <SDValue(), SDValue()> pair.
15430 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
15431 // to i16, i32 or i64, and we lower it to a legal sequence.
15432 // If lowered to the final integer result we return a <result, SDValue()> pair.
15433 // Otherwise we lower it to a sequence ending with a FIST, return a
15434 // <FIST, StackSlot> pair, and the caller is responsible for loading
15435 // the final integer result from StackSlot.
15436 std::pair<SDValue,SDValue>
15437 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
15438 bool IsSigned, bool IsReplace) const {
15441 EVT DstTy = Op.getValueType();
15442 EVT TheVT = Op.getOperand(0).getValueType();
15443 auto PtrVT = getPointerTy(DAG.getDataLayout());
15445 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
15446 // f16 must be promoted before using the lowering in this routine.
15447 // fp128 does not use this lowering.
15448 return std::make_pair(SDValue(), SDValue());
15451 // If using FIST to compute an unsigned i64, we'll need some fixup
15452 // to handle values above the maximum signed i64. A FIST is always
15453 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
15454 bool UnsignedFixup = !IsSigned &&
15455 DstTy == MVT::i64 &&
15456 (!Subtarget.is64Bit() ||
15457 !isScalarFPTypeInSSEReg(TheVT));
15459 if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) {
15460 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
15461 // The low 32 bits of the fist result will have the correct uint32 result.
15462 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
15466 assert(DstTy.getSimpleVT() <= MVT::i64 &&
15467 DstTy.getSimpleVT() >= MVT::i16 &&
15468 "Unknown FP_TO_INT to lower!");
15470 // These are really Legal.
15471 if (DstTy == MVT::i32 &&
15472 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
15473 return std::make_pair(SDValue(), SDValue());
15474 if (Subtarget.is64Bit() &&
15475 DstTy == MVT::i64 &&
15476 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
15477 return std::make_pair(SDValue(), SDValue());
15479 // We lower FP->int64 into FISTP64 followed by a load from a temporary
15481 MachineFunction &MF = DAG.getMachineFunction();
15482 unsigned MemSize = DstTy.getSizeInBits()/8;
15483 int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
15484 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
15487 switch (DstTy.getSimpleVT().SimpleTy) {
15488 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
15489 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
15490 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
15491 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
15494 SDValue Chain = DAG.getEntryNode();
15495 SDValue Value = Op.getOperand(0);
15496 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
15498 if (UnsignedFixup) {
15500 // Conversion to unsigned i64 is implemented with a select,
15501 // depending on whether the source value fits in the range
15502 // of a signed i64. Let Thresh be the FP equivalent of
15503 // 0x8000000000000000ULL.
15505 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
15506 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
15507 // Fist-to-mem64 FistSrc
15508 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
15509 // to XOR'ing the high 32 bits with Adjust.
15511 // Being a power of 2, Thresh is exactly representable in all FP formats.
15512 // For X87 we'd like to use the smallest FP type for this constant, but
15513 // for DAG type consistency we have to match the FP operand type.
15515 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
15516 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
15517 bool LosesInfo = false;
15518 if (TheVT == MVT::f64)
15519 // The rounding mode is irrelevant as the conversion should be exact.
15520 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
15522 else if (TheVT == MVT::f80)
15523 Status = Thresh.convert(APFloat::x87DoubleExtended(),
15524 APFloat::rmNearestTiesToEven, &LosesInfo);
15526 assert(Status == APFloat::opOK && !LosesInfo &&
15527 "FP conversion should have been exact");
15529 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
15531 SDValue Cmp = DAG.getSetCC(DL,
15532 getSetCCResultType(DAG.getDataLayout(),
15533 *DAG.getContext(), TheVT),
15534 Value, ThreshVal, ISD::SETLT);
15535 Adjust = DAG.getSelect(DL, MVT::i32, Cmp,
15536 DAG.getConstant(0, DL, MVT::i32),
15537 DAG.getConstant(0x80000000, DL, MVT::i32));
15538 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
15539 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
15540 *DAG.getContext(), TheVT),
15541 Value, ThreshVal, ISD::SETLT);
15542 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
15545 // FIXME This causes a redundant load/store if the SSE-class value is already
15546 // in memory, such as if it is on the callstack.
15547 if (isScalarFPTypeInSSEReg(TheVT)) {
15548 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
15549 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
15550 MachinePointerInfo::getFixedStack(MF, SSFI));
15551 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
15553 Chain, StackSlot, DAG.getValueType(TheVT)
15556 MachineMemOperand *MMO =
15557 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
15558 MachineMemOperand::MOLoad, MemSize, MemSize);
15559 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
15560 Chain = Value.getValue(1);
15561 SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
15562 StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
15565 MachineMemOperand *MMO =
15566 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
15567 MachineMemOperand::MOStore, MemSize, MemSize);
15569 if (UnsignedFixup) {
15571 // Insert the FIST, load its result as two i32's,
15572 // and XOR the high i32 with Adjust.
15574 SDValue FistOps[] = { Chain, Value, StackSlot };
15575 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
15576 FistOps, DstTy, MMO);
15579 DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo());
15580 SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL);
15583 DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo());
15584 High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
15586 if (Subtarget.is64Bit()) {
15587 // Join High32 and Low32 into a 64-bit result.
15588 // (High32 << 32) | Low32
15589 Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
15590 High32 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, High32);
15591 High32 = DAG.getNode(ISD::SHL, DL, MVT::i64, High32,
15592 DAG.getConstant(32, DL, MVT::i8));
15593 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i64, High32, Low32);
15594 return std::make_pair(Result, SDValue());
15597 SDValue ResultOps[] = { Low32, High32 };
15599 SDValue pair = IsReplace
15600 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResultOps)
15601 : DAG.getMergeValues(ResultOps, DL);
15602 return std::make_pair(pair, SDValue());
15604 // Build the FP_TO_INT*_IN_MEM
15605 SDValue Ops[] = { Chain, Value, StackSlot };
15606 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
15608 return std::make_pair(FIST, StackSlot);
15612 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
15613 const X86Subtarget &Subtarget) {
15614 MVT VT = Op->getSimpleValueType(0);
15615 SDValue In = Op->getOperand(0);
15616 MVT InVT = In.getSimpleValueType();
15619 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
15620 return DAG.getNode(ISD::ZERO_EXTEND, dl, VT, In);
15622 // Optimize vectors in AVX mode:
15625 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
15626 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
15627 // Concat upper and lower parts.
15630 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
15631 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
15632 // Concat upper and lower parts.
15635 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
15636 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
15637 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
15640 if (Subtarget.hasInt256())
15641 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
15643 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
15644 SDValue Undef = DAG.getUNDEF(InVT);
15645 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
15646 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
15647 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
15649 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
15650 VT.getVectorNumElements()/2);
15652 OpLo = DAG.getBitcast(HVT, OpLo);
15653 OpHi = DAG.getBitcast(HVT, OpHi);
15655 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
15658 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
15659 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15660 MVT VT = Op->getSimpleValueType(0);
15661 SDValue In = Op->getOperand(0);
15662 MVT InVT = In.getSimpleValueType();
15664 unsigned NumElts = VT.getVectorNumElements();
15666 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1 &&
15667 (NumElts == 8 || NumElts == 16 || Subtarget.hasBWI()))
15668 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
15670 if (InVT.getVectorElementType() != MVT::i1)
15673 // Extend VT if the target is 256 or 128bit vector and VLX is not supported.
15675 if (!VT.is512BitVector() && !Subtarget.hasVLX())
15676 ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
15679 DAG.getConstant(APInt(ExtVT.getScalarSizeInBits(), 1), DL, ExtVT);
15681 DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), DL, ExtVT);
15683 SDValue SelectedVal = DAG.getSelect(DL, ExtVT, In, One, Zero);
15685 return SelectedVal;
15686 return DAG.getNode(X86ISD::VTRUNC, DL, VT, SelectedVal);
15689 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
15690 SelectionDAG &DAG) {
15691 if (Subtarget.hasFp256())
15692 if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
15698 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
15699 SelectionDAG &DAG) {
15701 MVT VT = Op.getSimpleValueType();
15702 SDValue In = Op.getOperand(0);
15703 MVT SVT = In.getSimpleValueType();
15705 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
15706 return LowerZERO_EXTEND_AVX512(Op, Subtarget, DAG);
15708 if (Subtarget.hasFp256())
15709 if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
15712 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
15713 VT.getVectorNumElements() != SVT.getVectorNumElements());
15717 /// Helper to recursively truncate vector elements in half with PACKSS.
15718 /// It makes use of the fact that vector comparison results will be all-zeros
15719 /// or all-ones to use (vXi8 PACKSS(vYi16, vYi16)) instead of matching types.
15720 /// AVX2 (Int256) sub-targets require extra shuffling as the PACKSS operates
15721 /// within each 128-bit lane.
15722 static SDValue truncateVectorCompareWithPACKSS(EVT DstVT, SDValue In,
15725 const X86Subtarget &Subtarget) {
15726 // Requires SSE2 but AVX512 has fast truncate.
15727 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
15730 EVT SrcVT = In.getValueType();
15732 // No truncation required, we might get here due to recursive calls.
15733 if (SrcVT == DstVT)
15736 // We only support vector truncation to 128bits or greater from a
15737 // 256bits or greater source.
15738 if ((DstVT.getSizeInBits() % 128) != 0)
15740 if ((SrcVT.getSizeInBits() % 256) != 0)
15743 unsigned NumElems = SrcVT.getVectorNumElements();
15744 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
15745 assert(SrcVT.getSizeInBits() > DstVT.getSizeInBits() && "Illegal truncation");
15748 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getScalarSizeInBits() / 2);
15750 // Extract lower/upper subvectors.
15751 unsigned NumSubElts = NumElems / 2;
15752 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
15753 SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
15754 SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
15756 // 256bit -> 128bit truncate - PACKSS lower/upper 128-bit subvectors.
15757 if (SrcVT.is256BitVector()) {
15758 Lo = DAG.getBitcast(MVT::v8i16, Lo);
15759 Hi = DAG.getBitcast(MVT::v8i16, Hi);
15760 SDValue Res = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, Lo, Hi);
15761 return DAG.getBitcast(DstVT, Res);
15764 // AVX2: 512bit -> 256bit truncate - PACKSS lower/upper 256-bit subvectors.
15765 // AVX2: 512bit -> 128bit truncate - PACKSS(PACKSS, PACKSS).
15766 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
15767 Lo = DAG.getBitcast(MVT::v16i16, Lo);
15768 Hi = DAG.getBitcast(MVT::v16i16, Hi);
15769 SDValue Res = DAG.getNode(X86ISD::PACKSS, DL, MVT::v32i8, Lo, Hi);
15771 // 256-bit PACKSS(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
15772 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
15773 Res = DAG.getBitcast(MVT::v4i64, Res);
15774 Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, {0, 2, 1, 3});
15776 if (DstVT.is256BitVector())
15777 return DAG.getBitcast(DstVT, Res);
15779 // If 512bit -> 128bit truncate another stage.
15780 EVT PackedVT = EVT::getVectorVT(*DAG.getContext(), PackedSVT, NumElems);
15781 Res = DAG.getBitcast(PackedVT, Res);
15782 return truncateVectorCompareWithPACKSS(DstVT, Res, DL, DAG, Subtarget);
15785 // Recursively pack lower/upper subvectors, concat result and pack again.
15786 assert(SrcVT.getSizeInBits() >= 512 && "Expected 512-bit vector or greater");
15787 EVT PackedVT = EVT::getVectorVT(*DAG.getContext(), PackedSVT, NumElems / 2);
15788 Lo = truncateVectorCompareWithPACKSS(PackedVT, Lo, DL, DAG, Subtarget);
15789 Hi = truncateVectorCompareWithPACKSS(PackedVT, Hi, DL, DAG, Subtarget);
15791 PackedVT = EVT::getVectorVT(*DAG.getContext(), PackedSVT, NumElems);
15792 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
15793 return truncateVectorCompareWithPACKSS(DstVT, Res, DL, DAG, Subtarget);
15796 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
15797 const X86Subtarget &Subtarget) {
15800 MVT VT = Op.getSimpleValueType();
15801 SDValue In = Op.getOperand(0);
15802 MVT InVT = In.getSimpleValueType();
15804 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
15806 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
15807 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
15808 if (InVT.getScalarSizeInBits() <= 16) {
15809 if (Subtarget.hasBWI()) {
15810 // legal, will go to VPMOVB2M, VPMOVW2M
15811 // Shift packed bytes not supported natively, bitcast to word
15812 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
15813 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
15814 DAG.getBitcast(ExtVT, In),
15815 DAG.getConstant(ShiftInx, DL, ExtVT));
15816 ShiftNode = DAG.getBitcast(InVT, ShiftNode);
15817 return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
15819 // Use TESTD/Q, extended vector to packed dword/qword.
15820 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
15821 "Unexpected vector type.");
15822 unsigned NumElts = InVT.getVectorNumElements();
15823 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
15824 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
15826 ShiftInx = InVT.getScalarSizeInBits() - 1;
15829 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, InVT, In,
15830 DAG.getConstant(ShiftInx, DL, InVT));
15831 return DAG.getNode(X86ISD::TESTM, DL, VT, ShiftNode, ShiftNode);
15834 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
15836 MVT VT = Op.getSimpleValueType();
15837 SDValue In = Op.getOperand(0);
15838 MVT InVT = In.getSimpleValueType();
15840 if (VT == MVT::i1) {
15841 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
15842 "Invalid scalar TRUNCATE operation");
15843 if (InVT.getSizeInBits() >= 32)
15845 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
15846 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
15848 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
15849 "Invalid TRUNCATE operation");
15851 if (VT.getVectorElementType() == MVT::i1)
15852 return LowerTruncateVecI1(Op, DAG, Subtarget);
15854 // vpmovqb/w/d, vpmovdb/w, vpmovwb
15855 if (Subtarget.hasAVX512()) {
15856 // word to byte only under BWI
15857 if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) // v16i16 -> v16i8
15858 return DAG.getNode(X86ISD::VTRUNC, DL, VT,
15859 getExtendInVec(X86ISD::VSEXT, DL, MVT::v16i32, In, DAG));
15860 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
15863 // Truncate with PACKSS if we are truncating a vector zero/all-bits result.
15864 if (InVT.getScalarSizeInBits() == DAG.ComputeNumSignBits(In))
15865 if (SDValue V = truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget))
15868 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
15869 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
15870 if (Subtarget.hasInt256()) {
15871 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
15872 In = DAG.getBitcast(MVT::v8i32, In);
15873 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
15874 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
15875 DAG.getIntPtrConstant(0, DL));
15878 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
15879 DAG.getIntPtrConstant(0, DL));
15880 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
15881 DAG.getIntPtrConstant(2, DL));
15882 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
15883 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
15884 static const int ShufMask[] = {0, 2, 4, 6};
15885 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
15888 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
15889 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
15890 if (Subtarget.hasInt256()) {
15891 In = DAG.getBitcast(MVT::v32i8, In);
15893 // The PSHUFB mask:
15894 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
15895 -1, -1, -1, -1, -1, -1, -1, -1,
15896 16, 17, 20, 21, 24, 25, 28, 29,
15897 -1, -1, -1, -1, -1, -1, -1, -1 };
15898 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
15899 In = DAG.getBitcast(MVT::v4i64, In);
15901 static const int ShufMask2[] = {0, 2, -1, -1};
15902 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
15903 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
15904 DAG.getIntPtrConstant(0, DL));
15905 return DAG.getBitcast(VT, In);
15908 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
15909 DAG.getIntPtrConstant(0, DL));
15911 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
15912 DAG.getIntPtrConstant(4, DL));
15914 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
15915 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
15917 // The PSHUFB mask:
15918 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
15919 -1, -1, -1, -1, -1, -1, -1, -1};
15921 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
15922 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
15924 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
15925 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
15927 // The MOVLHPS Mask:
15928 static const int ShufMask2[] = {0, 1, 4, 5};
15929 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
15930 return DAG.getBitcast(MVT::v8i16, res);
15933 // Handle truncation of V256 to V128 using shuffles.
15934 if (!VT.is128BitVector() || !InVT.is256BitVector())
15937 assert(Subtarget.hasFp256() && "256-bit vector without AVX!");
15939 unsigned NumElems = VT.getVectorNumElements();
15940 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
15942 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
15943 // Prepare truncation shuffle mask
15944 for (unsigned i = 0; i != NumElems; ++i)
15945 MaskVec[i] = i * 2;
15946 In = DAG.getBitcast(NVT, In);
15947 SDValue V = DAG.getVectorShuffle(NVT, DL, In, In, MaskVec);
15948 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
15949 DAG.getIntPtrConstant(0, DL));
15952 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
15953 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
15954 MVT VT = Op.getSimpleValueType();
15956 if (VT.isVector()) {
15957 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
15958 SDValue Src = Op.getOperand(0);
15960 if (VT == MVT::v2i64 && Src.getSimpleValueType() == MVT::v2f32) {
15961 return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
15962 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
15963 DAG.getUNDEF(MVT::v2f32)));
15969 assert(!VT.isVector());
15971 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
15972 IsSigned, /*IsReplace=*/ false);
15973 SDValue FIST = Vals.first, StackSlot = Vals.second;
15974 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
15975 if (!FIST.getNode())
15978 if (StackSlot.getNode())
15979 // Load the result.
15980 return DAG.getLoad(VT, SDLoc(Op), FIST, StackSlot, MachinePointerInfo());
15982 // The node is the result.
15986 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
15988 MVT VT = Op.getSimpleValueType();
15989 SDValue In = Op.getOperand(0);
15990 MVT SVT = In.getSimpleValueType();
15992 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
15994 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
15995 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
15996 In, DAG.getUNDEF(SVT)));
15999 /// The only differences between FABS and FNEG are the mask and the logic op.
16000 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
16001 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
16002 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
16003 "Wrong opcode for lowering FABS or FNEG.");
16005 bool IsFABS = (Op.getOpcode() == ISD::FABS);
16007 // If this is a FABS and it has an FNEG user, bail out to fold the combination
16008 // into an FNABS. We'll lower the FABS after that if it is still in use.
16010 for (SDNode *User : Op->uses())
16011 if (User->getOpcode() == ISD::FNEG)
16015 MVT VT = Op.getSimpleValueType();
16017 bool IsF128 = (VT == MVT::f128);
16019 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
16020 // decide if we should generate a 16-byte constant mask when we only need 4 or
16021 // 8 bytes for the scalar case.
16026 if (VT.isVector()) {
16028 EltVT = VT.getVectorElementType();
16029 } else if (IsF128) {
16030 // SSE instructions are used for optimized f128 logical operations.
16031 LogicVT = MVT::f128;
16034 // There are no scalar bitwise logical SSE/AVX instructions, so we
16035 // generate a 16-byte vector constant and logic op even for the scalar case.
16036 // Using a 16-byte mask allows folding the load of the mask with
16037 // the logic op, so it can save (~4 bytes) on code size.
16038 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
16042 unsigned EltBits = EltVT.getSizeInBits();
16043 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
16045 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignMask(EltBits);
16046 const fltSemantics &Sem =
16047 EltVT == MVT::f64 ? APFloat::IEEEdouble() :
16048 (IsF128 ? APFloat::IEEEquad() : APFloat::IEEEsingle());
16049 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
16051 SDValue Op0 = Op.getOperand(0);
16052 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
16054 IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
16055 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
16057 if (VT.isVector() || IsF128)
16058 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
16060 // For the scalar case extend to a 128-bit vector, perform the logic op,
16061 // and extract the scalar result back out.
16062 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
16063 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
16064 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
16065 DAG.getIntPtrConstant(0, dl));
16068 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
16069 SDValue Mag = Op.getOperand(0);
16070 SDValue Sign = Op.getOperand(1);
16073 // If the sign operand is smaller, extend it first.
16074 MVT VT = Op.getSimpleValueType();
16075 if (Sign.getSimpleValueType().bitsLT(VT))
16076 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
16078 // And if it is bigger, shrink it first.
16079 if (Sign.getSimpleValueType().bitsGT(VT))
16080 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
16082 // At this point the operands and the result should have the same
16083 // type, and that won't be f80 since that is not custom lowered.
16084 bool IsF128 = (VT == MVT::f128);
16085 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
16086 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
16087 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
16088 "Unexpected type in LowerFCOPYSIGN");
16090 MVT EltVT = VT.getScalarType();
16091 const fltSemantics &Sem =
16092 EltVT == MVT::f64 ? APFloat::IEEEdouble()
16093 : (IsF128 ? APFloat::IEEEquad() : APFloat::IEEEsingle());
16095 // Perform all scalar logic operations as 16-byte vectors because there are no
16096 // scalar FP logic instructions in SSE.
16097 // TODO: This isn't necessary. If we used scalar types, we might avoid some
16098 // unnecessary splats, but we might miss load folding opportunities. Should
16099 // this decision be based on OptimizeForSize?
16100 bool IsFakeVector = !VT.isVector() && !IsF128;
16103 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
16105 // The mask constants are automatically splatted for vector types.
16106 unsigned EltSizeInBits = VT.getScalarSizeInBits();
16107 SDValue SignMask = DAG.getConstantFP(
16108 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
16109 SDValue MagMask = DAG.getConstantFP(
16110 APFloat(Sem, ~APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
16112 // First, clear all bits but the sign bit from the second operand (sign).
16114 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
16115 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
16117 // Next, clear the sign bit from the first operand (magnitude).
16118 // TODO: If we had general constant folding for FP logic ops, this check
16119 // wouldn't be necessary.
16121 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Mag)) {
16122 APFloat APF = Op0CN->getValueAPF();
16124 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
16126 // If the magnitude operand wasn't a constant, we need to AND out the sign.
16128 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
16129 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
16132 // OR the magnitude value with the sign bit.
16133 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
16134 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
16135 DAG.getIntPtrConstant(0, dl));
16138 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
16139 SDValue N0 = Op.getOperand(0);
16141 MVT VT = Op.getSimpleValueType();
16143 MVT OpVT = N0.getSimpleValueType();
16144 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
16145 "Unexpected type for FGETSIGN");
16147 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
16148 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
16149 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
16150 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
16151 Res = DAG.getZExtOrTrunc(Res, dl, VT);
16152 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
16156 // Check whether an OR'd tree is PTEST-able.
16157 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget &Subtarget,
16158 SelectionDAG &DAG) {
16159 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
16161 if (!Subtarget.hasSSE41())
16164 if (!Op->hasOneUse())
16167 SDNode *N = Op.getNode();
16170 SmallVector<SDValue, 8> Opnds;
16171 DenseMap<SDValue, unsigned> VecInMap;
16172 SmallVector<SDValue, 8> VecIns;
16173 EVT VT = MVT::Other;
16175 // Recognize a special case where a vector is casted into wide integer to
16177 Opnds.push_back(N->getOperand(0));
16178 Opnds.push_back(N->getOperand(1));
16180 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
16181 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
16182 // BFS traverse all OR'd operands.
16183 if (I->getOpcode() == ISD::OR) {
16184 Opnds.push_back(I->getOperand(0));
16185 Opnds.push_back(I->getOperand(1));
16186 // Re-evaluate the number of nodes to be traversed.
16187 e += 2; // 2 more nodes (LHS and RHS) are pushed.
16191 // Quit if a non-EXTRACT_VECTOR_ELT
16192 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
16195 // Quit if without a constant index.
16196 SDValue Idx = I->getOperand(1);
16197 if (!isa<ConstantSDNode>(Idx))
16200 SDValue ExtractedFromVec = I->getOperand(0);
16201 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
16202 if (M == VecInMap.end()) {
16203 VT = ExtractedFromVec.getValueType();
16204 // Quit if not 128/256-bit vector.
16205 if (!VT.is128BitVector() && !VT.is256BitVector())
16207 // Quit if not the same type.
16208 if (VecInMap.begin() != VecInMap.end() &&
16209 VT != VecInMap.begin()->first.getValueType())
16211 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
16212 VecIns.push_back(ExtractedFromVec);
16214 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
16217 assert((VT.is128BitVector() || VT.is256BitVector()) &&
16218 "Not extracted from 128-/256-bit vector.");
16220 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
16222 for (DenseMap<SDValue, unsigned>::const_iterator
16223 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
16224 // Quit if not all elements are used.
16225 if (I->second != FullMask)
16229 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
16231 // Cast all vectors into TestVT for PTEST.
16232 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
16233 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
16235 // If more than one full vector is evaluated, OR them first before PTEST.
16236 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
16237 // Each iteration will OR 2 nodes and append the result until there is only
16238 // 1 node left, i.e. the final OR'd value of all vectors.
16239 SDValue LHS = VecIns[Slot];
16240 SDValue RHS = VecIns[Slot + 1];
16241 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
16244 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
16247 /// \brief return true if \c Op has a use that doesn't just read flags.
16248 static bool hasNonFlagsUse(SDValue Op) {
16249 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
16251 SDNode *User = *UI;
16252 unsigned UOpNo = UI.getOperandNo();
16253 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
16254 // Look pass truncate.
16255 UOpNo = User->use_begin().getOperandNo();
16256 User = *User->use_begin();
16259 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
16260 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
16266 // Emit KTEST instruction for bit vectors on AVX-512
16267 static SDValue EmitKTEST(SDValue Op, SelectionDAG &DAG,
16268 const X86Subtarget &Subtarget) {
16269 if (Op.getOpcode() == ISD::BITCAST) {
16270 auto hasKTEST = [&](MVT VT) {
16271 unsigned SizeInBits = VT.getSizeInBits();
16272 return (Subtarget.hasDQI() && (SizeInBits == 8 || SizeInBits == 16)) ||
16273 (Subtarget.hasBWI() && (SizeInBits == 32 || SizeInBits == 64));
16275 SDValue Op0 = Op.getOperand(0);
16276 MVT Op0VT = Op0.getValueType().getSimpleVT();
16277 if (Op0VT.isVector() && Op0VT.getVectorElementType() == MVT::i1 &&
16279 return DAG.getNode(X86ISD::KTEST, SDLoc(Op), Op0VT, Op0, Op0);
16284 /// Emit nodes that will be selected as "test Op0,Op0", or something
16286 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
16287 SelectionDAG &DAG) const {
16288 if (Op.getValueType() == MVT::i1) {
16289 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
16290 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
16291 DAG.getConstant(0, dl, MVT::i8));
16293 // CF and OF aren't always set the way we want. Determine which
16294 // of these we need.
16295 bool NeedCF = false;
16296 bool NeedOF = false;
16299 case X86::COND_A: case X86::COND_AE:
16300 case X86::COND_B: case X86::COND_BE:
16303 case X86::COND_G: case X86::COND_GE:
16304 case X86::COND_L: case X86::COND_LE:
16305 case X86::COND_O: case X86::COND_NO: {
16306 // Check if we really need to set the
16307 // Overflow flag. If NoSignedWrap is present
16308 // that is not actually needed.
16309 switch (Op->getOpcode()) {
16314 if (Op.getNode()->getFlags().hasNoSignedWrap())
16324 // See if we can use the EFLAGS value from the operand instead of
16325 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
16326 // we prove that the arithmetic won't overflow, we can't use OF or CF.
16327 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
16328 // Emit KTEST for bit vectors
16329 if (auto Node = EmitKTEST(Op, DAG, Subtarget))
16331 // Emit a CMP with 0, which is the TEST pattern.
16332 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
16333 DAG.getConstant(0, dl, Op.getValueType()));
16335 unsigned Opcode = 0;
16336 unsigned NumOperands = 0;
16338 // Truncate operations may prevent the merge of the SETCC instruction
16339 // and the arithmetic instruction before it. Attempt to truncate the operands
16340 // of the arithmetic instruction and use a reduced bit-width instruction.
16341 bool NeedTruncation = false;
16342 SDValue ArithOp = Op;
16343 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
16344 SDValue Arith = Op->getOperand(0);
16345 // Both the trunc and the arithmetic op need to have one user each.
16346 if (Arith->hasOneUse())
16347 switch (Arith.getOpcode()) {
16354 NeedTruncation = true;
16360 // Sometimes flags can be set either with an AND or with an SRL/SHL
16361 // instruction. SRL/SHL variant should be preferred for masks longer than this
16363 const int ShiftToAndMaxMaskWidth = 32;
16364 const bool ZeroCheck = (X86CC == X86::COND_E || X86CC == X86::COND_NE);
16366 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
16367 // which may be the result of a CAST. We use the variable 'Op', which is the
16368 // non-casted variable when we check for possible users.
16369 switch (ArithOp.getOpcode()) {
16371 // Due to an isel shortcoming, be conservative if this add is likely to be
16372 // selected as part of a load-modify-store instruction. When the root node
16373 // in a match is a store, isel doesn't know how to remap non-chain non-flag
16374 // uses of other nodes in the match, such as the ADD in this case. This
16375 // leads to the ADD being left around and reselected, with the result being
16376 // two adds in the output. Alas, even if none our users are stores, that
16377 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
16378 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
16379 // climbing the DAG back to the root, and it doesn't seem to be worth the
16381 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
16382 UE = Op.getNode()->use_end(); UI != UE; ++UI)
16383 if (UI->getOpcode() != ISD::CopyToReg &&
16384 UI->getOpcode() != ISD::SETCC &&
16385 UI->getOpcode() != ISD::STORE)
16388 if (ConstantSDNode *C =
16389 dyn_cast<ConstantSDNode>(ArithOp.getOperand(1))) {
16390 // An add of one will be selected as an INC.
16391 if (C->isOne() && !Subtarget.slowIncDec()) {
16392 Opcode = X86ISD::INC;
16397 // An add of negative one (subtract of one) will be selected as a DEC.
16398 if (C->isAllOnesValue() && !Subtarget.slowIncDec()) {
16399 Opcode = X86ISD::DEC;
16405 // Otherwise use a regular EFLAGS-setting add.
16406 Opcode = X86ISD::ADD;
16411 // If we have a constant logical shift that's only used in a comparison
16412 // against zero turn it into an equivalent AND. This allows turning it into
16413 // a TEST instruction later.
16414 if (ZeroCheck && Op->hasOneUse() &&
16415 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
16416 EVT VT = Op.getValueType();
16417 unsigned BitWidth = VT.getSizeInBits();
16418 unsigned ShAmt = Op->getConstantOperandVal(1);
16419 if (ShAmt >= BitWidth) // Avoid undefined shifts.
16421 APInt Mask = ArithOp.getOpcode() == ISD::SRL
16422 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
16423 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
16424 if (!Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
16426 Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
16427 DAG.getConstant(Mask, dl, VT));
16432 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
16433 // because a TEST instruction will be better. However, AND should be
16434 // preferred if the instruction can be combined into ANDN.
16435 if (!hasNonFlagsUse(Op)) {
16436 SDValue Op0 = ArithOp->getOperand(0);
16437 SDValue Op1 = ArithOp->getOperand(1);
16438 EVT VT = ArithOp.getValueType();
16439 bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
16440 bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
16441 bool isProperAndn = isAndn && isLegalAndnType && Subtarget.hasBMI();
16443 // If we cannot select an ANDN instruction, check if we can replace
16444 // AND+IMM64 with a shift before giving up. This is possible for masks
16445 // like 0xFF000000 or 0x00FFFFFF and if we care only about the zero flag.
16446 if (!isProperAndn) {
16450 assert(!isa<ConstantSDNode>(Op0) && "AND node isn't canonicalized");
16451 auto *CN = dyn_cast<ConstantSDNode>(Op1);
16455 const APInt &Mask = CN->getAPIntValue();
16456 if (Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
16457 break; // Prefer TEST instruction.
16459 unsigned BitWidth = Mask.getBitWidth();
16460 unsigned LeadingOnes = Mask.countLeadingOnes();
16461 unsigned TrailingZeros = Mask.countTrailingZeros();
16463 if (LeadingOnes + TrailingZeros == BitWidth) {
16464 assert(TrailingZeros < VT.getSizeInBits() &&
16465 "Shift amount should be less than the type width");
16466 MVT ShTy = getScalarShiftAmountTy(DAG.getDataLayout(), VT);
16467 SDValue ShAmt = DAG.getConstant(TrailingZeros, dl, ShTy);
16468 Op = DAG.getNode(ISD::SRL, dl, VT, Op0, ShAmt);
16472 unsigned LeadingZeros = Mask.countLeadingZeros();
16473 unsigned TrailingOnes = Mask.countTrailingOnes();
16475 if (LeadingZeros + TrailingOnes == BitWidth) {
16476 assert(LeadingZeros < VT.getSizeInBits() &&
16477 "Shift amount should be less than the type width");
16478 MVT ShTy = getScalarShiftAmountTy(DAG.getDataLayout(), VT);
16479 SDValue ShAmt = DAG.getConstant(LeadingZeros, dl, ShTy);
16480 Op = DAG.getNode(ISD::SHL, dl, VT, Op0, ShAmt);
16491 // Due to the ISEL shortcoming noted above, be conservative if this op is
16492 // likely to be selected as part of a load-modify-store instruction.
16493 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
16494 UE = Op.getNode()->use_end(); UI != UE; ++UI)
16495 if (UI->getOpcode() == ISD::STORE)
16498 // Otherwise use a regular EFLAGS-setting instruction.
16499 switch (ArithOp.getOpcode()) {
16500 default: llvm_unreachable("unexpected operator!");
16501 case ISD::SUB: Opcode = X86ISD::SUB; break;
16502 case ISD::XOR: Opcode = X86ISD::XOR; break;
16503 case ISD::AND: Opcode = X86ISD::AND; break;
16505 if (!NeedTruncation && ZeroCheck) {
16506 if (SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG))
16509 Opcode = X86ISD::OR;
16523 return SDValue(Op.getNode(), 1);
16529 // If we found that truncation is beneficial, perform the truncation and
16531 if (NeedTruncation) {
16532 EVT VT = Op.getValueType();
16533 SDValue WideVal = Op->getOperand(0);
16534 EVT WideVT = WideVal.getValueType();
16535 unsigned ConvertedOp = 0;
16536 // Use a target machine opcode to prevent further DAGCombine
16537 // optimizations that may separate the arithmetic operations
16538 // from the setcc node.
16539 switch (WideVal.getOpcode()) {
16541 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
16542 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
16543 case ISD::AND: ConvertedOp = X86ISD::AND; break;
16544 case ISD::OR: ConvertedOp = X86ISD::OR; break;
16545 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
16549 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16550 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
16551 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
16552 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
16553 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
16559 // Emit KTEST for bit vectors
16560 if (auto Node = EmitKTEST(Op, DAG, Subtarget))
16563 // Emit a CMP with 0, which is the TEST pattern.
16564 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
16565 DAG.getConstant(0, dl, Op.getValueType()));
16567 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
16568 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
16570 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
16571 DAG.ReplaceAllUsesWith(Op, New);
16572 return SDValue(New.getNode(), 1);
16575 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
16577 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
16578 const SDLoc &dl, SelectionDAG &DAG) const {
16579 if (isNullConstant(Op1))
16580 return EmitTest(Op0, X86CC, dl, DAG);
16582 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
16583 "Unexpected comparison operation for MVT::i1 operands");
16585 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
16586 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
16587 // Only promote the compare up to I32 if it is a 16 bit operation
16588 // with an immediate. 16 bit immediates are to be avoided.
16589 if ((Op0.getValueType() == MVT::i16 &&
16590 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
16591 !DAG.getMachineFunction().getFunction()->optForMinSize() &&
16592 !Subtarget.isAtom()) {
16593 unsigned ExtendOp =
16594 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
16595 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
16596 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
16598 // Use SUB instead of CMP to enable CSE between SUB and CMP.
16599 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
16600 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
16602 return SDValue(Sub.getNode(), 1);
16604 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
16607 /// Convert a comparison if required by the subtarget.
16608 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
16609 SelectionDAG &DAG) const {
16610 // If the subtarget does not support the FUCOMI instruction, floating-point
16611 // comparisons have to be converted.
16612 if (Subtarget.hasCMov() ||
16613 Cmp.getOpcode() != X86ISD::CMP ||
16614 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
16615 !Cmp.getOperand(1).getValueType().isFloatingPoint())
16618 // The instruction selector will select an FUCOM instruction instead of
16619 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
16620 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
16621 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
16623 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
16624 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
16625 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
16626 DAG.getConstant(8, dl, MVT::i8));
16627 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
16629 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
16630 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
16631 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
16634 /// Check if replacement of SQRT with RSQRT should be disabled.
16635 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
16636 EVT VT = Op.getValueType();
16638 // We never want to use both SQRT and RSQRT instructions for the same input.
16639 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
16643 return Subtarget.hasFastVectorFSQRT();
16644 return Subtarget.hasFastScalarFSQRT();
16647 /// The minimum architected relative accuracy is 2^-12. We need one
16648 /// Newton-Raphson step to have a good float result (24 bits of precision).
16649 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
16650 SelectionDAG &DAG, int Enabled,
16651 int &RefinementSteps,
16652 bool &UseOneConstNR,
16653 bool Reciprocal) const {
16654 EVT VT = Op.getValueType();
16656 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
16657 // TODO: Add support for AVX512 (v16f32).
16658 // It is likely not profitable to do this for f64 because a double-precision
16659 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
16660 // instructions: convert to single, rsqrtss, convert back to double, refine
16661 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
16662 // along with FMA, this could be a throughput win.
16663 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
16664 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
16665 (VT == MVT::v8f32 && Subtarget.hasAVX())) {
16666 if (RefinementSteps == ReciprocalEstimate::Unspecified)
16667 RefinementSteps = 1;
16669 UseOneConstNR = false;
16670 return DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
16675 /// The minimum architected relative accuracy is 2^-12. We need one
16676 /// Newton-Raphson step to have a good float result (24 bits of precision).
16677 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
16679 int &RefinementSteps) const {
16680 EVT VT = Op.getValueType();
16682 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
16683 // TODO: Add support for AVX512 (v16f32).
16684 // It is likely not profitable to do this for f64 because a double-precision
16685 // reciprocal estimate with refinement on x86 prior to FMA requires
16686 // 15 instructions: convert to single, rcpss, convert back to double, refine
16687 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
16688 // along with FMA, this could be a throughput win.
16690 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
16691 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
16692 (VT == MVT::v8f32 && Subtarget.hasAVX())) {
16693 // Enable estimate codegen with 1 refinement step for vector division.
16694 // Scalar division estimates are disabled because they break too much
16695 // real-world code. These defaults are intended to match GCC behavior.
16696 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
16699 if (RefinementSteps == ReciprocalEstimate::Unspecified)
16700 RefinementSteps = 1;
16702 return DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
16707 /// If we have at least two divisions that use the same divisor, convert to
16708 /// multiplication by a reciprocal. This may need to be adjusted for a given
16709 /// CPU if a division's cost is not at least twice the cost of a multiplication.
16710 /// This is because we still need one division to calculate the reciprocal and
16711 /// then we need two multiplies by that reciprocal as replacements for the
16712 /// original divisions.
16713 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
16717 /// Helper for creating a X86ISD::SETCC node.
16718 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
16719 SelectionDAG &DAG) {
16720 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16721 DAG.getConstant(Cond, dl, MVT::i8), EFLAGS);
16724 /// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition
16725 /// according to equal/not-equal condition code \p CC.
16726 static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
16727 const SDLoc &dl, SelectionDAG &DAG) {
16728 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
16729 // instruction. Since the shift amount is in-range-or-undefined, we know
16730 // that doing a bittest on the i32 value is ok. We extend to i32 because
16731 // the encoding for the i16 version is larger than the i32 version.
16732 // Also promote i16 to i32 for performance / code size reason.
16733 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
16734 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
16736 // See if we can use the 32-bit instruction instead of the 64-bit one for a
16737 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
16738 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
16739 // known to be zero.
16740 if (Src.getValueType() == MVT::i64 &&
16741 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
16742 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
16744 // If the operand types disagree, extend the shift amount to match. Since
16745 // BT ignores high bits (like shifts) we can use anyextend.
16746 if (Src.getValueType() != BitNo.getValueType())
16747 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
16749 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
16750 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
16751 return getSETCC(Cond, BT, dl , DAG);
16754 /// Result of 'and' is compared against zero. Change to a BT node if possible.
16755 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
16756 const SDLoc &dl, SelectionDAG &DAG) {
16757 SDValue Op0 = And.getOperand(0);
16758 SDValue Op1 = And.getOperand(1);
16759 if (Op0.getOpcode() == ISD::TRUNCATE)
16760 Op0 = Op0.getOperand(0);
16761 if (Op1.getOpcode() == ISD::TRUNCATE)
16762 Op1 = Op1.getOperand(0);
16765 if (Op1.getOpcode() == ISD::SHL)
16766 std::swap(Op0, Op1);
16767 if (Op0.getOpcode() == ISD::SHL) {
16768 if (isOneConstant(Op0.getOperand(0))) {
16769 // If we looked past a truncate, check that it's only truncating away
16771 unsigned BitWidth = Op0.getValueSizeInBits();
16772 unsigned AndBitWidth = And.getValueSizeInBits();
16773 if (BitWidth > AndBitWidth) {
16775 DAG.computeKnownBits(Op0, Known);
16776 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
16780 RHS = Op0.getOperand(1);
16782 } else if (Op1.getOpcode() == ISD::Constant) {
16783 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
16784 uint64_t AndRHSVal = AndRHS->getZExtValue();
16785 SDValue AndLHS = Op0;
16787 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
16788 LHS = AndLHS.getOperand(0);
16789 RHS = AndLHS.getOperand(1);
16792 // Use BT if the immediate can't be encoded in a TEST instruction.
16793 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
16795 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType());
16800 return getBitTestCondition(LHS, RHS, CC, dl, DAG);
16805 // Convert (truncate (srl X, N) to i1) to (bt X, N)
16806 static SDValue LowerTruncateToBT(SDValue Op, ISD::CondCode CC,
16807 const SDLoc &dl, SelectionDAG &DAG) {
16809 assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
16810 "Expected TRUNCATE to i1 node");
16812 if (Op.getOperand(0).getOpcode() != ISD::SRL)
16815 SDValue ShiftRight = Op.getOperand(0);
16816 return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
16820 /// Result of 'and' or 'trunc to i1' is compared against zero.
16821 /// Change to a BT node if possible.
16822 SDValue X86TargetLowering::LowerToBT(SDValue Op, ISD::CondCode CC,
16823 const SDLoc &dl, SelectionDAG &DAG) const {
16824 if (Op.getOpcode() == ISD::AND)
16825 return LowerAndToBT(Op, CC, dl, DAG);
16826 if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
16827 return LowerTruncateToBT(Op, CC, dl, DAG);
16831 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
16833 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
16838 // SSE Condition code mapping:
16847 switch (SetCCOpcode) {
16848 default: llvm_unreachable("Unexpected SETCC condition");
16850 case ISD::SETEQ: SSECC = 0; break;
16852 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
16854 case ISD::SETOLT: SSECC = 1; break;
16856 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
16858 case ISD::SETOLE: SSECC = 2; break;
16859 case ISD::SETUO: SSECC = 3; break;
16861 case ISD::SETNE: SSECC = 4; break;
16862 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
16863 case ISD::SETUGE: SSECC = 5; break;
16864 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
16865 case ISD::SETUGT: SSECC = 6; break;
16866 case ISD::SETO: SSECC = 7; break;
16868 case ISD::SETONE: SSECC = 8; break;
16871 std::swap(Op0, Op1);
16876 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
16877 /// concatenate the result back.
16878 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
16879 MVT VT = Op.getSimpleValueType();
16881 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
16882 "Unsupported value type for operation");
16884 unsigned NumElems = VT.getVectorNumElements();
16886 SDValue CC = Op.getOperand(2);
16888 // Extract the LHS vectors
16889 SDValue LHS = Op.getOperand(0);
16890 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
16891 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
16893 // Extract the RHS vectors
16894 SDValue RHS = Op.getOperand(1);
16895 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
16896 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
16898 // Issue the operation on the smaller types and concatenate the result back
16899 MVT EltVT = VT.getVectorElementType();
16900 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
16901 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
16902 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
16903 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
16906 static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
16907 SDValue Op0 = Op.getOperand(0);
16908 SDValue Op1 = Op.getOperand(1);
16909 SDValue CC = Op.getOperand(2);
16910 MVT VT = Op.getSimpleValueType();
16913 assert(Op0.getSimpleValueType().getVectorElementType() == MVT::i1 &&
16914 "Unexpected type for boolean compare operation");
16915 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
16916 SDValue NotOp0 = DAG.getNode(ISD::XOR, dl, VT, Op0,
16917 DAG.getConstant(-1, dl, VT));
16918 SDValue NotOp1 = DAG.getNode(ISD::XOR, dl, VT, Op1,
16919 DAG.getConstant(-1, dl, VT));
16920 switch (SetCCOpcode) {
16921 default: llvm_unreachable("Unexpected SETCC condition");
16923 // (x == y) -> ~(x ^ y)
16924 return DAG.getNode(ISD::XOR, dl, VT,
16925 DAG.getNode(ISD::XOR, dl, VT, Op0, Op1),
16926 DAG.getConstant(-1, dl, VT));
16928 // (x != y) -> (x ^ y)
16929 return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1);
16932 // (x > y) -> (x & ~y)
16933 return DAG.getNode(ISD::AND, dl, VT, Op0, NotOp1);
16936 // (x < y) -> (~x & y)
16937 return DAG.getNode(ISD::AND, dl, VT, NotOp0, Op1);
16940 // (x <= y) -> (~x | y)
16941 return DAG.getNode(ISD::OR, dl, VT, NotOp0, Op1);
16944 // (x >=y) -> (x | ~y)
16945 return DAG.getNode(ISD::OR, dl, VT, Op0, NotOp1);
16949 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
16951 SDValue Op0 = Op.getOperand(0);
16952 SDValue Op1 = Op.getOperand(1);
16953 SDValue CC = Op.getOperand(2);
16954 MVT VT = Op.getSimpleValueType();
16957 assert(VT.getVectorElementType() == MVT::i1 &&
16958 "Cannot set masked compare for this operation");
16960 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
16962 bool Unsigned = false;
16965 switch (SetCCOpcode) {
16966 default: llvm_unreachable("Unexpected SETCC condition");
16967 case ISD::SETNE: SSECC = 4; break;
16968 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
16969 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
16970 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
16971 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
16972 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
16973 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
16974 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
16975 case ISD::SETULE: Unsigned = true; LLVM_FALLTHROUGH;
16976 case ISD::SETLE: SSECC = 2; break;
16980 std::swap(Op0, Op1);
16982 return DAG.getNode(Opc, dl, VT, Op0, Op1);
16983 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
16984 return DAG.getNode(Opc, dl, VT, Op0, Op1,
16985 DAG.getConstant(SSECC, dl, MVT::i8));
16988 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
16989 /// operand \p Op1. If non-trivial (for example because it's not constant)
16990 /// return an empty value.
16991 static SDValue ChangeVSETULTtoVSETULE(const SDLoc &dl, SDValue Op1,
16992 SelectionDAG &DAG) {
16993 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
16997 MVT VT = Op1.getSimpleValueType();
16998 MVT EVT = VT.getVectorElementType();
16999 unsigned n = VT.getVectorNumElements();
17000 SmallVector<SDValue, 8> ULTOp1;
17002 for (unsigned i = 0; i < n; ++i) {
17003 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
17004 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EVT)
17007 // Avoid underflow.
17008 APInt Val = Elt->getAPIntValue();
17012 ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
17015 return DAG.getBuildVector(VT, dl, ULTOp1);
17018 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
17019 SelectionDAG &DAG) {
17020 SDValue Op0 = Op.getOperand(0);
17021 SDValue Op1 = Op.getOperand(1);
17022 SDValue CC = Op.getOperand(2);
17023 MVT VT = Op.getSimpleValueType();
17024 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
17025 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
17030 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
17031 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
17035 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
17036 assert(VT.getVectorNumElements() <= 16);
17037 Opc = X86ISD::CMPM;
17039 Opc = X86ISD::CMPP;
17040 // The SSE/AVX packed FP comparison nodes are defined with a
17041 // floating-point vector result that matches the operand type. This allows
17042 // them to work with an SSE1 target (integer vector types are not legal).
17043 VT = Op0.getSimpleValueType();
17046 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
17047 // emit two comparisons and a logic op to tie them together.
17048 // TODO: This can be avoided if Intel (and only Intel as of 2016) AVX is
17051 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
17053 // LLVM predicate is SETUEQ or SETONE.
17055 unsigned CombineOpc;
17056 if (Cond == ISD::SETUEQ) {
17059 CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FOR) :
17060 static_cast<unsigned>(ISD::OR);
17062 assert(Cond == ISD::SETONE);
17065 CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FAND) :
17066 static_cast<unsigned>(ISD::AND);
17069 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
17070 DAG.getConstant(CC0, dl, MVT::i8));
17071 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
17072 DAG.getConstant(CC1, dl, MVT::i8));
17073 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
17075 // Handle all other FP comparisons here.
17076 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
17077 DAG.getConstant(SSECC, dl, MVT::i8));
17080 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
17081 // result type of SETCC. The bitcast is expected to be optimized away
17082 // during combining/isel.
17083 if (Opc == X86ISD::CMPP)
17084 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
17089 MVT VTOp0 = Op0.getSimpleValueType();
17090 assert(VTOp0 == Op1.getSimpleValueType() &&
17091 "Expected operands with same type!");
17092 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
17093 "Invalid number of packed elements for source and destination!");
17095 if (VT.is128BitVector() && VTOp0.is256BitVector()) {
17096 // On non-AVX512 targets, a vector of MVT::i1 is promoted by the type
17097 // legalizer to a wider vector type. In the case of 'vsetcc' nodes, the
17098 // legalizer firstly checks if the first operand in input to the setcc has
17099 // a legal type. If so, then it promotes the return type to that same type.
17100 // Otherwise, the return type is promoted to the 'next legal type' which,
17101 // for a vector of MVT::i1 is always a 128-bit integer vector type.
17103 // We reach this code only if the following two conditions are met:
17104 // 1. Both return type and operand type have been promoted to wider types
17105 // by the type legalizer.
17106 // 2. The original operand type has been promoted to a 256-bit vector.
17108 // Note that condition 2. only applies for AVX targets.
17109 SDValue NewOp = DAG.getSetCC(dl, VTOp0, Op0, Op1, Cond);
17110 return DAG.getZExtOrTrunc(NewOp, dl, VT);
17113 // The non-AVX512 code below works under the assumption that source and
17114 // destination types are the same.
17115 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
17116 "Value types for source and destination must be the same!");
17118 // Break 256-bit integer vector compare into smaller ones.
17119 if (VT.is256BitVector() && !Subtarget.hasInt256())
17120 return Lower256IntVSETCC(Op, DAG);
17122 // Operands are boolean (vectors of i1)
17123 MVT OpVT = Op1.getSimpleValueType();
17124 if (OpVT.getVectorElementType() == MVT::i1)
17125 return LowerBoolVSETCC_AVX512(Op, DAG);
17127 // The result is boolean, but operands are int/float
17128 if (VT.getVectorElementType() == MVT::i1) {
17129 // In AVX-512 architecture setcc returns mask with i1 elements,
17130 // But there is no compare instruction for i8 and i16 elements in KNL.
17131 // In this case use SSE compare
17132 bool UseAVX512Inst =
17133 (OpVT.is512BitVector() ||
17134 OpVT.getScalarSizeInBits() >= 32 ||
17135 (Subtarget.hasBWI() && Subtarget.hasVLX()));
17138 return LowerIntVSETCC_AVX512(Op, DAG);
17140 return DAG.getNode(ISD::TRUNCATE, dl, VT,
17141 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
17144 // Lower using XOP integer comparisons.
17145 if ((VT == MVT::v16i8 || VT == MVT::v8i16 ||
17146 VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget.hasXOP()) {
17147 // Translate compare code to XOP PCOM compare mode.
17148 unsigned CmpMode = 0;
17150 default: llvm_unreachable("Unexpected SETCC condition");
17152 case ISD::SETLT: CmpMode = 0x00; break;
17154 case ISD::SETLE: CmpMode = 0x01; break;
17156 case ISD::SETGT: CmpMode = 0x02; break;
17158 case ISD::SETGE: CmpMode = 0x03; break;
17159 case ISD::SETEQ: CmpMode = 0x04; break;
17160 case ISD::SETNE: CmpMode = 0x05; break;
17163 // Are we comparing unsigned or signed integers?
17165 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
17167 return DAG.getNode(Opc, dl, VT, Op0, Op1,
17168 DAG.getConstant(CmpMode, dl, MVT::i8));
17171 // We are handling one of the integer comparisons here. Since SSE only has
17172 // GT and EQ comparisons for integer, swapping operands and multiple
17173 // operations may be required for some comparisons.
17174 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
17176 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
17177 Cond == ISD::SETGE || Cond == ISD::SETUGE;
17178 bool Invert = Cond == ISD::SETNE ||
17179 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
17180 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond);
17182 // Special case: Use min/max operations for SETULE/SETUGE
17183 MVT VET = VT.getVectorElementType();
17185 (Subtarget.hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32)) ||
17186 (Subtarget.hasSSE2() && (VET == MVT::i8));
17187 bool MinMax = false;
17191 case ISD::SETULE: Opc = ISD::UMIN; MinMax = true; break;
17192 case ISD::SETUGE: Opc = ISD::UMAX; MinMax = true; break;
17196 Swap = Invert = FlipSigns = false;
17199 bool HasSubus = Subtarget.hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
17200 bool Subus = false;
17201 if (!MinMax && HasSubus) {
17202 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
17204 // t = psubus Op0, Op1
17205 // pcmpeq t, <0..0>
17208 case ISD::SETULT: {
17209 // If the comparison is against a constant we can turn this into a
17210 // setule. With psubus, setule does not require a swap. This is
17211 // beneficial because the constant in the register is no longer
17212 // destructed as the destination so it can be hoisted out of a loop.
17213 // Only do this pre-AVX since vpcmp* is no longer destructive.
17214 if (Subtarget.hasAVX())
17216 if (SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG)) {
17218 Subus = true; Invert = false; Swap = false;
17222 // Psubus is better than flip-sign because it requires no inversion.
17223 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
17224 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
17228 Opc = X86ISD::SUBUS;
17234 std::swap(Op0, Op1);
17236 // Check that the operation in question is available (most are plain SSE2,
17237 // but PCMPGTQ and PCMPEQQ have different requirements).
17238 if (VT == MVT::v2i64) {
17239 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
17240 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
17242 // First cast everything to the right type.
17243 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
17244 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
17246 // Since SSE has no unsigned integer comparisons, we need to flip the sign
17247 // bits of the inputs before performing those operations. The lower
17248 // compare is always unsigned.
17251 SB = DAG.getConstant(0x80000000U, dl, MVT::v4i32);
17253 SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
17254 SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
17255 SB = DAG.getBuildVector(MVT::v4i32, dl, {Sign, Zero, Sign, Zero});
17257 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
17258 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
17260 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
17261 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
17262 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
17264 // Create masks for only the low parts/high parts of the 64 bit integers.
17265 static const int MaskHi[] = { 1, 1, 3, 3 };
17266 static const int MaskLo[] = { 0, 0, 2, 2 };
17267 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
17268 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
17269 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
17271 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
17272 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
17275 Result = DAG.getNOT(dl, Result, MVT::v4i32);
17277 return DAG.getBitcast(VT, Result);
17280 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
17281 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
17282 // pcmpeqd + pshufd + pand.
17283 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
17285 // First cast everything to the right type.
17286 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
17287 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
17290 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
17292 // Make sure the lower and upper halves are both all-ones.
17293 static const int Mask[] = { 1, 0, 3, 2 };
17294 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
17295 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
17298 Result = DAG.getNOT(dl, Result, MVT::v4i32);
17300 return DAG.getBitcast(VT, Result);
17304 // Since SSE has no unsigned integer comparisons, we need to flip the sign
17305 // bits of the inputs before performing those operations.
17307 MVT EltVT = VT.getVectorElementType();
17308 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
17310 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
17311 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
17314 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
17316 // If the logical-not of the result is required, perform that now.
17318 Result = DAG.getNOT(dl, Result, VT);
17321 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
17324 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
17325 getZeroVector(VT, Subtarget, DAG, dl));
17330 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
17332 MVT VT = Op.getSimpleValueType();
17334 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
17336 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
17337 SDValue Op0 = Op.getOperand(0);
17338 SDValue Op1 = Op.getOperand(1);
17340 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
17342 // Optimize to BT if possible.
17343 // Lower (X & (1 << N)) == 0 to BT(X, N).
17344 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
17345 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
17346 // Lower (trunc (X >> N) to i1) to BT(X, N).
17347 if (Op0.hasOneUse() && isNullConstant(Op1) &&
17348 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
17349 if (SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG)) {
17351 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
17356 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
17358 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
17359 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
17361 // If the input is a setcc, then reuse the input setcc or use a new one with
17362 // the inverted condition.
17363 if (Op0.getOpcode() == X86ISD::SETCC) {
17364 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
17365 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
17369 CCode = X86::GetOppositeBranchCondition(CCode);
17370 SDValue SetCC = getSETCC(CCode, Op0.getOperand(1), dl, DAG);
17372 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
17376 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
17377 if (isOneConstant(Op1)) {
17378 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
17379 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
17381 if (!isNullConstant(Op1)) {
17382 SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i1, Op0, Op1);
17383 return DAG.getSetCC(dl, VT, Xor, DAG.getConstant(0, dl, MVT::i1), CC);
17387 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
17388 X86::CondCode X86CC = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
17389 if (X86CC == X86::COND_INVALID)
17392 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
17393 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
17394 SDValue SetCC = getSETCC(X86CC, EFLAGS, dl, DAG);
17396 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
17400 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
17401 SDValue LHS = Op.getOperand(0);
17402 SDValue RHS = Op.getOperand(1);
17403 SDValue Carry = Op.getOperand(2);
17404 SDValue Cond = Op.getOperand(3);
17407 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
17408 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
17410 // Recreate the carry if needed.
17411 EVT CarryVT = Carry.getValueType();
17412 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
17413 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
17414 Carry, DAG.getConstant(NegOne, DL, CarryVT));
17416 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
17417 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
17418 SDValue SetCC = getSETCC(CC, Cmp.getValue(1), DL, DAG);
17419 if (Op.getSimpleValueType() == MVT::i1)
17420 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
17424 /// Return true if opcode is a X86 logical comparison.
17425 static bool isX86LogicalCmp(SDValue Op) {
17426 unsigned Opc = Op.getOpcode();
17427 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
17428 Opc == X86ISD::SAHF)
17430 if (Op.getResNo() == 1 &&
17431 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
17432 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
17433 Opc == X86ISD::INC || Opc == X86ISD::DEC || Opc == X86ISD::OR ||
17434 Opc == X86ISD::XOR || Opc == X86ISD::AND))
17437 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
17443 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
17444 if (V.getOpcode() != ISD::TRUNCATE)
17447 SDValue VOp0 = V.getOperand(0);
17448 unsigned InBits = VOp0.getValueSizeInBits();
17449 unsigned Bits = V.getValueSizeInBits();
17450 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
17453 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
17454 bool AddTest = true;
17455 SDValue Cond = Op.getOperand(0);
17456 SDValue Op1 = Op.getOperand(1);
17457 SDValue Op2 = Op.getOperand(2);
17459 MVT VT = Op1.getSimpleValueType();
17462 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
17463 // are available or VBLENDV if AVX is available.
17464 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
17465 if (Cond.getOpcode() == ISD::SETCC &&
17466 ((Subtarget.hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
17467 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
17468 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
17469 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
17470 int SSECC = translateX86FSETCC(
17471 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
17474 if (Subtarget.hasAVX512()) {
17475 SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0,
17476 CondOp1, DAG.getConstant(SSECC, DL, MVT::i8));
17477 return DAG.getNode(VT.isVector() ? X86ISD::SELECT : X86ISD::SELECTS,
17478 DL, VT, Cmp, Op1, Op2);
17481 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
17482 DAG.getConstant(SSECC, DL, MVT::i8));
17484 // If we have AVX, we can use a variable vector select (VBLENDV) instead
17485 // of 3 logic instructions for size savings and potentially speed.
17486 // Unfortunately, there is no scalar form of VBLENDV.
17488 // If either operand is a constant, don't try this. We can expect to
17489 // optimize away at least one of the logic instructions later in that
17490 // case, so that sequence would be faster than a variable blend.
17492 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
17493 // uses XMM0 as the selection register. That may need just as many
17494 // instructions as the AND/ANDN/OR sequence due to register moves, so
17497 if (Subtarget.hasAVX() &&
17498 !isa<ConstantFPSDNode>(Op1) && !isa<ConstantFPSDNode>(Op2)) {
17500 // Convert to vectors, do a VSELECT, and convert back to scalar.
17501 // All of the conversions should be optimized away.
17503 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
17504 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
17505 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
17506 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
17508 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
17509 VCmp = DAG.getBitcast(VCmpVT, VCmp);
17511 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
17513 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
17514 VSel, DAG.getIntPtrConstant(0, DL));
17516 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
17517 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
17518 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
17522 // AVX512 fallback is to lower selects of scalar floats to masked moves.
17523 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
17524 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
17525 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
17528 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
17530 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
17531 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
17532 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
17533 Op1Scalar = Op1.getOperand(0);
17535 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
17536 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
17537 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
17538 Op2Scalar = Op2.getOperand(0);
17539 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
17540 SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
17541 Op1Scalar, Op2Scalar);
17542 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
17543 return DAG.getBitcast(VT, newSelect);
17544 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
17545 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
17546 DAG.getIntPtrConstant(0, DL));
17550 if (VT == MVT::v4i1 || VT == MVT::v2i1) {
17551 SDValue zeroConst = DAG.getIntPtrConstant(0, DL);
17552 Op1 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
17553 DAG.getUNDEF(MVT::v8i1), Op1, zeroConst);
17554 Op2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
17555 DAG.getUNDEF(MVT::v8i1), Op2, zeroConst);
17556 SDValue newSelect = DAG.getSelect(DL, MVT::v8i1, Cond, Op1, Op2);
17557 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, newSelect, zeroConst);
17560 if (Cond.getOpcode() == ISD::SETCC) {
17561 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
17563 // If the condition was updated, it's possible that the operands of the
17564 // select were also updated (for example, EmitTest has a RAUW). Refresh
17565 // the local references to the select operands in case they got stale.
17566 Op1 = Op.getOperand(1);
17567 Op2 = Op.getOperand(2);
17571 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
17572 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
17573 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
17574 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
17575 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
17576 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
17577 if (Cond.getOpcode() == X86ISD::SETCC &&
17578 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
17579 isNullConstant(Cond.getOperand(1).getOperand(1))) {
17580 SDValue Cmp = Cond.getOperand(1);
17581 unsigned CondCode =
17582 cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
17584 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
17585 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
17586 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
17588 SDValue CmpOp0 = Cmp.getOperand(0);
17589 // Apply further optimizations for special cases
17590 // (select (x != 0), -1, 0) -> neg & sbb
17591 // (select (x == 0), 0, -1) -> neg & sbb
17592 if (isNullConstant(Y) &&
17593 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
17594 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
17595 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
17596 DAG.getConstant(0, DL,
17597 CmpOp0.getValueType()),
17599 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
17600 DAG.getConstant(X86::COND_B, DL, MVT::i8),
17601 SDValue(Neg.getNode(), 1));
17605 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
17606 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
17607 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
17609 SDValue Res = // Res = 0 or -1.
17610 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
17611 DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp);
17613 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
17614 Res = DAG.getNOT(DL, Res, Res.getValueType());
17616 if (!isNullConstant(Op2))
17617 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
17619 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
17620 Cmp.getOperand(0).getOpcode() == ISD::AND &&
17621 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
17622 SDValue CmpOp0 = Cmp.getOperand(0);
17623 SDValue Src1, Src2;
17624 // true if Op2 is XOR or OR operator and one of its operands
17626 // ( a , a op b) || ( b , a op b)
17627 auto isOrXorPattern = [&]() {
17628 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
17629 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
17631 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
17638 if (isOrXorPattern()) {
17640 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
17641 // we need mask of all zeros or ones with same size of the other
17643 if (CmpSz > VT.getSizeInBits())
17644 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
17645 else if (CmpSz < VT.getSizeInBits())
17646 Neg = DAG.getNode(ISD::AND, DL, VT,
17647 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
17648 DAG.getConstant(1, DL, VT));
17651 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
17652 Neg); // -(and (x, 0x1))
17653 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
17654 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
17659 // Look past (and (setcc_carry (cmp ...)), 1).
17660 if (Cond.getOpcode() == ISD::AND &&
17661 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
17662 isOneConstant(Cond.getOperand(1)))
17663 Cond = Cond.getOperand(0);
17665 // If condition flag is set by a X86ISD::CMP, then use it as the condition
17666 // setting operand in place of the X86ISD::SETCC.
17667 unsigned CondOpcode = Cond.getOpcode();
17668 if (CondOpcode == X86ISD::SETCC ||
17669 CondOpcode == X86ISD::SETCC_CARRY) {
17670 CC = Cond.getOperand(0);
17672 SDValue Cmp = Cond.getOperand(1);
17673 unsigned Opc = Cmp.getOpcode();
17674 MVT VT = Op.getSimpleValueType();
17676 bool IllegalFPCMov = false;
17677 if (VT.isFloatingPoint() && !VT.isVector() &&
17678 !isScalarFPTypeInSSEReg(VT)) // FPStack?
17679 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
17681 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
17682 Opc == X86ISD::BT) { // FIXME
17686 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
17687 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
17688 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
17689 Cond.getOperand(0).getValueType() != MVT::i8)) {
17690 SDValue LHS = Cond.getOperand(0);
17691 SDValue RHS = Cond.getOperand(1);
17692 unsigned X86Opcode;
17695 switch (CondOpcode) {
17696 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
17697 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
17698 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
17699 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
17700 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
17701 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
17702 default: llvm_unreachable("unexpected overflowing operator");
17704 if (CondOpcode == ISD::UMULO)
17705 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
17708 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
17710 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
17712 if (CondOpcode == ISD::UMULO)
17713 Cond = X86Op.getValue(2);
17715 Cond = X86Op.getValue(1);
17717 CC = DAG.getConstant(X86Cond, DL, MVT::i8);
17722 // Look past the truncate if the high bits are known zero.
17723 if (isTruncWithZeroHighBitsInput(Cond, DAG))
17724 Cond = Cond.getOperand(0);
17726 // We know the result of AND is compared against zero. Try to match
17728 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
17729 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
17730 CC = NewSetCC.getOperand(0);
17731 Cond = NewSetCC.getOperand(1);
17738 CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
17739 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
17742 // a < b ? -1 : 0 -> RES = ~setcc_carry
17743 // a < b ? 0 : -1 -> RES = setcc_carry
17744 // a >= b ? -1 : 0 -> RES = setcc_carry
17745 // a >= b ? 0 : -1 -> RES = ~setcc_carry
17746 if (Cond.getOpcode() == X86ISD::SUB) {
17747 Cond = ConvertCmpIfNecessary(Cond, DAG);
17748 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
17750 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
17751 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
17752 (isNullConstant(Op1) || isNullConstant(Op2))) {
17753 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
17754 DAG.getConstant(X86::COND_B, DL, MVT::i8),
17756 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
17757 return DAG.getNOT(DL, Res, Res.getValueType());
17762 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
17763 // widen the cmov and push the truncate through. This avoids introducing a new
17764 // branch during isel and doesn't add any extensions.
17765 if (Op.getValueType() == MVT::i8 &&
17766 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
17767 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
17768 if (T1.getValueType() == T2.getValueType() &&
17769 // Blacklist CopyFromReg to avoid partial register stalls.
17770 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
17771 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
17772 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
17773 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
17777 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
17778 // condition is true.
17779 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
17780 SDValue Ops[] = { Op2, Op1, CC, Cond };
17781 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
17784 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
17785 const X86Subtarget &Subtarget,
17786 SelectionDAG &DAG) {
17787 MVT VT = Op->getSimpleValueType(0);
17788 SDValue In = Op->getOperand(0);
17789 MVT InVT = In.getSimpleValueType();
17790 MVT VTElt = VT.getVectorElementType();
17791 MVT InVTElt = InVT.getVectorElementType();
17795 if ((InVTElt == MVT::i1) &&
17796 (((Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16)) ||
17798 ((Subtarget.hasDQI() && VTElt.getSizeInBits() >= 32))))
17800 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
17802 unsigned NumElts = VT.getVectorNumElements();
17804 if (VT.is512BitVector() && InVTElt != MVT::i1 &&
17805 (NumElts == 8 || NumElts == 16 || Subtarget.hasBWI())) {
17806 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
17807 return getExtendInVec(In.getOpcode(), dl, VT, In.getOperand(0), DAG);
17808 return getExtendInVec(X86ISD::VSEXT, dl, VT, In, DAG);
17811 if (InVTElt != MVT::i1)
17815 if (!VT.is512BitVector() && !Subtarget.hasVLX())
17816 ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
17819 if (Subtarget.hasDQI()) {
17820 V = getExtendInVec(X86ISD::VSEXT, dl, ExtVT, In, DAG);
17821 assert(!VT.is512BitVector() && "Unexpected vector type");
17823 SDValue NegOne = getOnesVector(ExtVT, DAG, dl);
17824 SDValue Zero = getZeroVector(ExtVT, Subtarget, DAG, dl);
17825 V = DAG.getSelect(dl, ExtVT, In, NegOne, Zero);
17830 return DAG.getNode(X86ISD::VTRUNC, dl, VT, V);
17833 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
17834 // For sign extend this needs to handle all vector sizes and SSE4.1 and
17835 // non-SSE4.1 targets. For zero extend this should only handle inputs of
17836 // MVT::v64i8 when BWI is not supported, but AVX512 is.
17837 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
17838 const X86Subtarget &Subtarget,
17839 SelectionDAG &DAG) {
17840 SDValue In = Op->getOperand(0);
17841 MVT VT = Op->getSimpleValueType(0);
17842 MVT InVT = In.getSimpleValueType();
17843 assert(VT.getSizeInBits() == InVT.getSizeInBits());
17845 MVT SVT = VT.getVectorElementType();
17846 MVT InSVT = InVT.getVectorElementType();
17847 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
17849 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
17851 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
17853 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
17854 !(VT.is256BitVector() && Subtarget.hasInt256()) &&
17855 !(VT.is512BitVector() && Subtarget.hasAVX512()))
17860 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
17861 // For 512-bit vectors, we need 128-bits or 256-bits.
17862 if (VT.getSizeInBits() > 128) {
17863 // Input needs to be at least the same number of elements as output, and
17864 // at least 128-bits.
17865 int InSize = InSVT.getSizeInBits() * VT.getVectorNumElements();
17866 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
17869 assert((Op.getOpcode() != ISD::ZERO_EXTEND_VECTOR_INREG ||
17870 InVT == MVT::v64i8) && "Zero extend only for v64i8 input!");
17872 // SSE41 targets can use the pmovsx* instructions directly for 128-bit results,
17873 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
17874 // need to be handled here for 256/512-bit results.
17875 if (Subtarget.hasInt256()) {
17876 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
17877 unsigned ExtOpc = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ?
17878 X86ISD::VSEXT : X86ISD::VZEXT;
17879 return DAG.getNode(ExtOpc, dl, VT, In);
17882 // We should only get here for sign extend.
17883 assert(Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
17884 "Unexpected opcode!");
17886 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
17890 // As SRAI is only available on i16/i32 types, we expand only up to i32
17891 // and handle i64 separately.
17892 while (CurrVT != VT && CurrVT.getVectorElementType() != MVT::i32) {
17893 Curr = DAG.getNode(X86ISD::UNPCKL, dl, CurrVT, DAG.getUNDEF(CurrVT), Curr);
17894 MVT CurrSVT = MVT::getIntegerVT(CurrVT.getScalarSizeInBits() * 2);
17895 CurrVT = MVT::getVectorVT(CurrSVT, CurrVT.getVectorNumElements() / 2);
17896 Curr = DAG.getBitcast(CurrVT, Curr);
17899 SDValue SignExt = Curr;
17900 if (CurrVT != InVT) {
17901 unsigned SignExtShift =
17902 CurrVT.getScalarSizeInBits() - InSVT.getSizeInBits();
17903 SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
17904 DAG.getConstant(SignExtShift, dl, MVT::i8));
17910 if (VT == MVT::v2i64 && CurrVT == MVT::v4i32) {
17911 SDValue Sign = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
17912 DAG.getConstant(31, dl, MVT::i8));
17913 SDValue Ext = DAG.getVectorShuffle(CurrVT, dl, SignExt, Sign, {0, 4, 1, 5});
17914 return DAG.getBitcast(VT, Ext);
17920 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
17921 SelectionDAG &DAG) {
17922 MVT VT = Op->getSimpleValueType(0);
17923 SDValue In = Op->getOperand(0);
17924 MVT InVT = In.getSimpleValueType();
17927 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
17928 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
17930 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
17931 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
17932 (VT != MVT::v16i16 || InVT != MVT::v16i8))
17935 if (Subtarget.hasInt256())
17936 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
17938 // Optimize vectors in AVX mode
17939 // Sign extend v8i16 to v8i32 and
17942 // Divide input vector into two parts
17943 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
17944 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
17945 // concat the vectors to original VT
17947 unsigned NumElems = InVT.getVectorNumElements();
17948 SDValue Undef = DAG.getUNDEF(InVT);
17950 SmallVector<int,8> ShufMask1(NumElems, -1);
17951 for (unsigned i = 0; i != NumElems/2; ++i)
17954 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask1);
17956 SmallVector<int,8> ShufMask2(NumElems, -1);
17957 for (unsigned i = 0; i != NumElems/2; ++i)
17958 ShufMask2[i] = i + NumElems/2;
17960 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask2);
17962 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
17963 VT.getVectorNumElements() / 2);
17965 OpLo = DAG.getSignExtendVectorInReg(OpLo, dl, HalfVT);
17966 OpHi = DAG.getSignExtendVectorInReg(OpHi, dl, HalfVT);
17968 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
17971 // Lower truncating store. We need a special lowering to vXi1 vectors
17972 static SDValue LowerTruncatingStore(SDValue StOp, const X86Subtarget &Subtarget,
17973 SelectionDAG &DAG) {
17974 StoreSDNode *St = cast<StoreSDNode>(StOp.getNode());
17976 EVT MemVT = St->getMemoryVT();
17977 assert(St->isTruncatingStore() && "We only custom truncating store.");
17978 assert(MemVT.isVector() && MemVT.getVectorElementType() == MVT::i1 &&
17979 "Expected truncstore of i1 vector");
17981 SDValue Op = St->getValue();
17982 MVT OpVT = Op.getValueType().getSimpleVT();
17983 unsigned NumElts = OpVT.getVectorNumElements();
17984 if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
17986 // Truncate and store - everything is legal
17987 Op = DAG.getNode(ISD::TRUNCATE, dl, MemVT, Op);
17988 if (MemVT.getSizeInBits() < 8)
17989 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
17990 DAG.getUNDEF(MVT::v8i1), Op,
17991 DAG.getIntPtrConstant(0, dl));
17992 return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
17993 St->getMemOperand());
17996 // A subset, assume that we have only AVX-512F
17997 if (NumElts <= 8) {
17999 // Extend to 8-elts vector
18000 MVT ExtVT = MVT::getVectorVT(OpVT.getScalarType(), 8);
18001 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ExtVT,
18002 DAG.getUNDEF(ExtVT), Op, DAG.getIntPtrConstant(0, dl));
18004 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i1, Op);
18005 return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
18006 St->getMemOperand());
18009 assert(OpVT == MVT::v32i8 && "Unexpected operand type");
18010 // Divide the vector into 2 parts and store each part separately
18011 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
18012 DAG.getIntPtrConstant(0, dl));
18013 Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Lo);
18014 SDValue BasePtr = St->getBasePtr();
18015 SDValue StLo = DAG.getStore(St->getChain(), dl, Lo, BasePtr,
18016 St->getMemOperand());
18017 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
18018 DAG.getIntPtrConstant(16, dl));
18019 Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Hi);
18021 SDValue BasePtrHi =
18022 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
18023 DAG.getConstant(2, dl, BasePtr.getValueType()));
18025 SDValue StHi = DAG.getStore(St->getChain(), dl, Hi,
18026 BasePtrHi, St->getMemOperand());
18027 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StLo, StHi);
18030 static SDValue LowerExtended1BitVectorLoad(SDValue Op,
18031 const X86Subtarget &Subtarget,
18032 SelectionDAG &DAG) {
18034 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
18036 EVT MemVT = Ld->getMemoryVT();
18037 assert(MemVT.isVector() && MemVT.getScalarType() == MVT::i1 &&
18038 "Expected i1 vector load");
18039 unsigned ExtOpcode = Ld->getExtensionType() == ISD::ZEXTLOAD ?
18040 ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
18041 MVT VT = Op.getValueType().getSimpleVT();
18042 unsigned NumElts = VT.getVectorNumElements();
18044 if ((Subtarget.hasBWI() && NumElts >= 32) ||
18045 (Subtarget.hasDQI() && NumElts < 16) ||
18047 // Load and extend - everything is legal
18049 SDValue Load = DAG.getLoad(MVT::v8i1, dl, Ld->getChain(),
18051 Ld->getMemOperand());
18052 // Replace chain users with the new chain.
18053 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
18054 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
18055 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
18056 SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, Load);
18058 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
18059 DAG.getIntPtrConstant(0, dl));
18061 SDValue Load = DAG.getLoad(MemVT, dl, Ld->getChain(),
18063 Ld->getMemOperand());
18064 // Replace chain users with the new chain.
18065 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
18066 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
18068 // Finally, do a normal sign-extend to the desired register.
18069 return DAG.getNode(ExtOpcode, dl, Op.getValueType(), Load);
18072 if (NumElts <= 8) {
18073 // A subset, assume that we have only AVX-512F
18074 unsigned NumBitsToLoad = 8;
18075 MVT TypeToLoad = MVT::getIntegerVT(NumBitsToLoad);
18076 SDValue Load = DAG.getLoad(TypeToLoad, dl, Ld->getChain(),
18078 Ld->getMemOperand());
18079 // Replace chain users with the new chain.
18080 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
18081 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
18083 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumBitsToLoad);
18084 SDValue BitVec = DAG.getBitcast(MaskVT, Load);
18087 return DAG.getNode(ExtOpcode, dl, VT, BitVec);
18089 // we should take care to v4i1 and v2i1
18091 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
18092 SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, BitVec);
18093 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
18094 DAG.getIntPtrConstant(0, dl));
18097 assert(VT == MVT::v32i8 && "Unexpected extload type");
18099 SmallVector<SDValue, 2> Chains;
18101 SDValue BasePtr = Ld->getBasePtr();
18102 SDValue LoadLo = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
18104 Ld->getMemOperand());
18105 Chains.push_back(LoadLo.getValue(1));
18107 SDValue BasePtrHi =
18108 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
18109 DAG.getConstant(2, dl, BasePtr.getValueType()));
18111 SDValue LoadHi = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
18113 Ld->getMemOperand());
18114 Chains.push_back(LoadHi.getValue(1));
18115 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
18116 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
18118 SDValue Lo = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadLo);
18119 SDValue Hi = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadHi);
18120 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v32i8, Lo, Hi);
18123 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
18124 // may emit an illegal shuffle but the expansion is still better than scalar
18125 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
18126 // we'll emit a shuffle and a arithmetic shift.
18127 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
18128 // TODO: It is possible to support ZExt by zeroing the undef values during
18129 // the shuffle phase or after the shuffle.
18130 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget &Subtarget,
18131 SelectionDAG &DAG) {
18132 MVT RegVT = Op.getSimpleValueType();
18133 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
18134 assert(RegVT.isInteger() &&
18135 "We only custom lower integer vector sext loads.");
18137 // Nothing useful we can do without SSE2 shuffles.
18138 assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
18140 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
18142 EVT MemVT = Ld->getMemoryVT();
18143 if (MemVT.getScalarType() == MVT::i1)
18144 return LowerExtended1BitVectorLoad(Op, Subtarget, DAG);
18146 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18147 unsigned RegSz = RegVT.getSizeInBits();
18149 ISD::LoadExtType Ext = Ld->getExtensionType();
18151 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
18152 && "Only anyext and sext are currently implemented.");
18153 assert(MemVT != RegVT && "Cannot extend to the same type");
18154 assert(MemVT.isVector() && "Must load a vector from memory");
18156 unsigned NumElems = RegVT.getVectorNumElements();
18157 unsigned MemSz = MemVT.getSizeInBits();
18158 assert(RegSz > MemSz && "Register size must be greater than the mem size");
18160 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
18161 // The only way in which we have a legal 256-bit vector result but not the
18162 // integer 256-bit operations needed to directly lower a sextload is if we
18163 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
18164 // a 128-bit vector and a normal sign_extend to 256-bits that should get
18165 // correctly legalized. We do this late to allow the canonical form of
18166 // sextload to persist throughout the rest of the DAG combiner -- it wants
18167 // to fold together any extensions it can, and so will fuse a sign_extend
18168 // of an sextload into a sextload targeting a wider value.
18170 if (MemSz == 128) {
18171 // Just switch this to a normal load.
18172 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
18173 "it must be a legal 128-bit vector "
18175 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
18176 Ld->getPointerInfo(), Ld->getAlignment(),
18177 Ld->getMemOperand()->getFlags());
18179 assert(MemSz < 128 &&
18180 "Can't extend a type wider than 128 bits to a 256 bit vector!");
18181 // Do an sext load to a 128-bit vector type. We want to use the same
18182 // number of elements, but elements half as wide. This will end up being
18183 // recursively lowered by this routine, but will succeed as we definitely
18184 // have all the necessary features if we're using AVX1.
18186 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
18187 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
18189 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
18190 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
18191 Ld->getMemOperand()->getFlags());
18194 // Replace chain users with the new chain.
18195 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
18196 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
18198 // Finally, do a normal sign-extend to the desired register.
18199 return DAG.getSExtOrTrunc(Load, dl, RegVT);
18202 // All sizes must be a power of two.
18203 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
18204 "Non-power-of-two elements are not custom lowered!");
18206 // Attempt to load the original value using scalar loads.
18207 // Find the largest scalar type that divides the total loaded size.
18208 MVT SclrLoadTy = MVT::i8;
18209 for (MVT Tp : MVT::integer_valuetypes()) {
18210 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
18215 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
18216 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
18218 SclrLoadTy = MVT::f64;
18220 // Calculate the number of scalar loads that we need to perform
18221 // in order to load our vector from memory.
18222 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
18224 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
18225 "Can only lower sext loads with a single scalar load!");
18227 unsigned loadRegZize = RegSz;
18228 if (Ext == ISD::SEXTLOAD && RegSz >= 256)
18231 // Represent our vector as a sequence of elements which are the
18232 // largest scalar that we can load.
18233 EVT LoadUnitVecVT = EVT::getVectorVT(
18234 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
18236 // Represent the data using the same element type that is stored in
18237 // memory. In practice, we ''widen'' MemVT.
18239 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
18240 loadRegZize / MemVT.getScalarSizeInBits());
18242 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
18243 "Invalid vector type");
18245 // We can't shuffle using an illegal type.
18246 assert(TLI.isTypeLegal(WideVecVT) &&
18247 "We only lower types that form legal widened vector types");
18249 SmallVector<SDValue, 8> Chains;
18250 SDValue Ptr = Ld->getBasePtr();
18251 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, dl,
18252 TLI.getPointerTy(DAG.getDataLayout()));
18253 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
18255 for (unsigned i = 0; i < NumLoads; ++i) {
18256 // Perform a single load.
18257 SDValue ScalarLoad =
18258 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
18259 Ld->getAlignment(), Ld->getMemOperand()->getFlags());
18260 Chains.push_back(ScalarLoad.getValue(1));
18261 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
18262 // another round of DAGCombining.
18264 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
18266 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
18267 ScalarLoad, DAG.getIntPtrConstant(i, dl));
18269 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
18272 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
18274 // Bitcast the loaded value to a vector of the original element type, in
18275 // the size of the target vector type.
18276 SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
18277 unsigned SizeRatio = RegSz / MemSz;
18279 if (Ext == ISD::SEXTLOAD) {
18280 // If we have SSE4.1, we can directly emit a VSEXT node.
18281 if (Subtarget.hasSSE41()) {
18282 SDValue Sext = getExtendInVec(X86ISD::VSEXT, dl, RegVT, SlicedVec, DAG);
18283 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
18287 // Otherwise we'll use SIGN_EXTEND_VECTOR_INREG to sign extend the lowest
18289 assert(TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND_VECTOR_INREG, RegVT) &&
18290 "We can't implement a sext load without SIGN_EXTEND_VECTOR_INREG!");
18292 SDValue Shuff = DAG.getSignExtendVectorInReg(SlicedVec, dl, RegVT);
18293 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
18297 // Redistribute the loaded elements into the different locations.
18298 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
18299 for (unsigned i = 0; i != NumElems; ++i)
18300 ShuffleVec[i * SizeRatio] = i;
18302 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
18303 DAG.getUNDEF(WideVecVT), ShuffleVec);
18305 // Bitcast to the requested type.
18306 Shuff = DAG.getBitcast(RegVT, Shuff);
18307 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
18311 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
18312 /// each of which has no other use apart from the AND / OR.
18313 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
18314 Opc = Op.getOpcode();
18315 if (Opc != ISD::OR && Opc != ISD::AND)
18317 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
18318 Op.getOperand(0).hasOneUse() &&
18319 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
18320 Op.getOperand(1).hasOneUse());
18323 /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
18324 /// SETCC node has a single use.
18325 static bool isXor1OfSetCC(SDValue Op) {
18326 if (Op.getOpcode() != ISD::XOR)
18328 if (isOneConstant(Op.getOperand(1)))
18329 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
18330 Op.getOperand(0).hasOneUse();
18334 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
18335 bool addTest = true;
18336 SDValue Chain = Op.getOperand(0);
18337 SDValue Cond = Op.getOperand(1);
18338 SDValue Dest = Op.getOperand(2);
18341 bool Inverted = false;
18343 if (Cond.getOpcode() == ISD::SETCC) {
18344 // Check for setcc([su]{add,sub,mul}o == 0).
18345 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
18346 isNullConstant(Cond.getOperand(1)) &&
18347 Cond.getOperand(0).getResNo() == 1 &&
18348 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
18349 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
18350 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
18351 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
18352 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
18353 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
18355 Cond = Cond.getOperand(0);
18357 if (SDValue NewCond = LowerSETCC(Cond, DAG))
18362 // FIXME: LowerXALUO doesn't handle these!!
18363 else if (Cond.getOpcode() == X86ISD::ADD ||
18364 Cond.getOpcode() == X86ISD::SUB ||
18365 Cond.getOpcode() == X86ISD::SMUL ||
18366 Cond.getOpcode() == X86ISD::UMUL)
18367 Cond = LowerXALUO(Cond, DAG);
18370 // Look pass (and (setcc_carry (cmp ...)), 1).
18371 if (Cond.getOpcode() == ISD::AND &&
18372 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
18373 isOneConstant(Cond.getOperand(1)))
18374 Cond = Cond.getOperand(0);
18376 // If condition flag is set by a X86ISD::CMP, then use it as the condition
18377 // setting operand in place of the X86ISD::SETCC.
18378 unsigned CondOpcode = Cond.getOpcode();
18379 if (CondOpcode == X86ISD::SETCC ||
18380 CondOpcode == X86ISD::SETCC_CARRY) {
18381 CC = Cond.getOperand(0);
18383 SDValue Cmp = Cond.getOperand(1);
18384 unsigned Opc = Cmp.getOpcode();
18385 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
18386 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
18390 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
18394 // These can only come from an arithmetic instruction with overflow,
18395 // e.g. SADDO, UADDO.
18396 Cond = Cond.getOperand(1);
18402 CondOpcode = Cond.getOpcode();
18403 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
18404 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
18405 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
18406 Cond.getOperand(0).getValueType() != MVT::i8)) {
18407 SDValue LHS = Cond.getOperand(0);
18408 SDValue RHS = Cond.getOperand(1);
18409 unsigned X86Opcode;
18412 // Keep this in sync with LowerXALUO, otherwise we might create redundant
18413 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
18415 switch (CondOpcode) {
18416 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
18418 if (isOneConstant(RHS)) {
18419 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
18422 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
18423 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
18425 if (isOneConstant(RHS)) {
18426 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
18429 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
18430 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
18431 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
18432 default: llvm_unreachable("unexpected overflowing operator");
18435 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
18436 if (CondOpcode == ISD::UMULO)
18437 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
18440 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
18442 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
18444 if (CondOpcode == ISD::UMULO)
18445 Cond = X86Op.getValue(2);
18447 Cond = X86Op.getValue(1);
18449 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
18453 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
18454 SDValue Cmp = Cond.getOperand(0).getOperand(1);
18455 if (CondOpc == ISD::OR) {
18456 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
18457 // two branches instead of an explicit OR instruction with a
18459 if (Cmp == Cond.getOperand(1).getOperand(1) &&
18460 isX86LogicalCmp(Cmp)) {
18461 CC = Cond.getOperand(0).getOperand(0);
18462 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
18463 Chain, Dest, CC, Cmp);
18464 CC = Cond.getOperand(1).getOperand(0);
18468 } else { // ISD::AND
18469 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
18470 // two branches instead of an explicit AND instruction with a
18471 // separate test. However, we only do this if this block doesn't
18472 // have a fall-through edge, because this requires an explicit
18473 // jmp when the condition is false.
18474 if (Cmp == Cond.getOperand(1).getOperand(1) &&
18475 isX86LogicalCmp(Cmp) &&
18476 Op.getNode()->hasOneUse()) {
18477 X86::CondCode CCode =
18478 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
18479 CCode = X86::GetOppositeBranchCondition(CCode);
18480 CC = DAG.getConstant(CCode, dl, MVT::i8);
18481 SDNode *User = *Op.getNode()->use_begin();
18482 // Look for an unconditional branch following this conditional branch.
18483 // We need this because we need to reverse the successors in order
18484 // to implement FCMP_OEQ.
18485 if (User->getOpcode() == ISD::BR) {
18486 SDValue FalseBB = User->getOperand(1);
18488 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
18489 assert(NewBR == User);
18493 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
18494 Chain, Dest, CC, Cmp);
18495 X86::CondCode CCode =
18496 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
18497 CCode = X86::GetOppositeBranchCondition(CCode);
18498 CC = DAG.getConstant(CCode, dl, MVT::i8);
18504 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
18505 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
18506 // It should be transformed during dag combiner except when the condition
18507 // is set by a arithmetics with overflow node.
18508 X86::CondCode CCode =
18509 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
18510 CCode = X86::GetOppositeBranchCondition(CCode);
18511 CC = DAG.getConstant(CCode, dl, MVT::i8);
18512 Cond = Cond.getOperand(0).getOperand(1);
18514 } else if (Cond.getOpcode() == ISD::SETCC &&
18515 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
18516 // For FCMP_OEQ, we can emit
18517 // two branches instead of an explicit AND instruction with a
18518 // separate test. However, we only do this if this block doesn't
18519 // have a fall-through edge, because this requires an explicit
18520 // jmp when the condition is false.
18521 if (Op.getNode()->hasOneUse()) {
18522 SDNode *User = *Op.getNode()->use_begin();
18523 // Look for an unconditional branch following this conditional branch.
18524 // We need this because we need to reverse the successors in order
18525 // to implement FCMP_OEQ.
18526 if (User->getOpcode() == ISD::BR) {
18527 SDValue FalseBB = User->getOperand(1);
18529 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
18530 assert(NewBR == User);
18534 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
18535 Cond.getOperand(0), Cond.getOperand(1));
18536 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
18537 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
18538 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
18539 Chain, Dest, CC, Cmp);
18540 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
18545 } else if (Cond.getOpcode() == ISD::SETCC &&
18546 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
18547 // For FCMP_UNE, we can emit
18548 // two branches instead of an explicit AND instruction with a
18549 // separate test. However, we only do this if this block doesn't
18550 // have a fall-through edge, because this requires an explicit
18551 // jmp when the condition is false.
18552 if (Op.getNode()->hasOneUse()) {
18553 SDNode *User = *Op.getNode()->use_begin();
18554 // Look for an unconditional branch following this conditional branch.
18555 // We need this because we need to reverse the successors in order
18556 // to implement FCMP_UNE.
18557 if (User->getOpcode() == ISD::BR) {
18558 SDValue FalseBB = User->getOperand(1);
18560 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
18561 assert(NewBR == User);
18564 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
18565 Cond.getOperand(0), Cond.getOperand(1));
18566 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
18567 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
18568 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
18569 Chain, Dest, CC, Cmp);
18570 CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8);
18580 // Look pass the truncate if the high bits are known zero.
18581 if (isTruncWithZeroHighBitsInput(Cond, DAG))
18582 Cond = Cond.getOperand(0);
18584 // We know the result is compared against zero. Try to match it to BT.
18585 if (Cond.hasOneUse()) {
18586 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG)) {
18587 CC = NewSetCC.getOperand(0);
18588 Cond = NewSetCC.getOperand(1);
18595 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
18596 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
18597 Cond = EmitTest(Cond, X86Cond, dl, DAG);
18599 Cond = ConvertCmpIfNecessary(Cond, DAG);
18600 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
18601 Chain, Dest, CC, Cond);
18604 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
18605 // Calls to _alloca are needed to probe the stack when allocating more than 4k
18606 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
18607 // that the guard pages used by the OS virtual memory manager are allocated in
18608 // correct sequence.
18610 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
18611 SelectionDAG &DAG) const {
18612 MachineFunction &MF = DAG.getMachineFunction();
18613 bool SplitStack = MF.shouldSplitStack();
18614 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
18619 SDNode *Node = Op.getNode();
18620 SDValue Chain = Op.getOperand(0);
18621 SDValue Size = Op.getOperand(1);
18622 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
18623 EVT VT = Node->getValueType(0);
18625 // Chain the dynamic stack allocation so that it doesn't modify the stack
18626 // pointer when other instructions are using the stack.
18627 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
18629 bool Is64Bit = Subtarget.is64Bit();
18630 MVT SPTy = getPointerTy(DAG.getDataLayout());
18634 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18635 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
18636 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
18637 " not tell us which reg is the stack pointer!");
18639 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
18640 Chain = SP.getValue(1);
18641 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
18642 unsigned StackAlign = TFI.getStackAlignment();
18643 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
18644 if (Align > StackAlign)
18645 Result = DAG.getNode(ISD::AND, dl, VT, Result,
18646 DAG.getConstant(-(uint64_t)Align, dl, VT));
18647 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
18648 } else if (SplitStack) {
18649 MachineRegisterInfo &MRI = MF.getRegInfo();
18652 // The 64 bit implementation of segmented stacks needs to clobber both r10
18653 // r11. This makes it impossible to use it along with nested parameters.
18654 const Function *F = MF.getFunction();
18655 for (const auto &A : F->args()) {
18656 if (A.hasNestAttr())
18657 report_fatal_error("Cannot use segmented stacks with functions that "
18658 "have nested arguments.");
18662 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
18663 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
18664 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
18665 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
18666 DAG.getRegister(Vreg, SPTy));
18668 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18669 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
18670 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
18672 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18673 unsigned SPReg = RegInfo->getStackRegister();
18674 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
18675 Chain = SP.getValue(1);
18678 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
18679 DAG.getConstant(-(uint64_t)Align, dl, VT));
18680 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
18686 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
18687 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
18689 SDValue Ops[2] = {Result, Chain};
18690 return DAG.getMergeValues(Ops, dl);
18693 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
18694 MachineFunction &MF = DAG.getMachineFunction();
18695 auto PtrVT = getPointerTy(MF.getDataLayout());
18696 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18698 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
18701 if (!Subtarget.is64Bit() ||
18702 Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
18703 // vastart just stores the address of the VarArgsFrameIndex slot into the
18704 // memory location argument.
18705 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
18706 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
18707 MachinePointerInfo(SV));
18711 // gp_offset (0 - 6 * 8)
18712 // fp_offset (48 - 48 + 8 * 16)
18713 // overflow_arg_area (point to parameters coming in memory).
18715 SmallVector<SDValue, 8> MemOps;
18716 SDValue FIN = Op.getOperand(1);
18718 SDValue Store = DAG.getStore(
18719 Op.getOperand(0), DL,
18720 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
18721 MachinePointerInfo(SV));
18722 MemOps.push_back(Store);
18725 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
18726 Store = DAG.getStore(
18727 Op.getOperand(0), DL,
18728 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
18729 MachinePointerInfo(SV, 4));
18730 MemOps.push_back(Store);
18732 // Store ptr to overflow_arg_area
18733 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
18734 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
18736 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
18737 MemOps.push_back(Store);
18739 // Store ptr to reg_save_area.
18740 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
18741 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
18742 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
18743 Store = DAG.getStore(
18744 Op.getOperand(0), DL, RSFIN, FIN,
18745 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
18746 MemOps.push_back(Store);
18747 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
18750 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
18751 assert(Subtarget.is64Bit() &&
18752 "LowerVAARG only handles 64-bit va_arg!");
18753 assert(Op.getNumOperands() == 4);
18755 MachineFunction &MF = DAG.getMachineFunction();
18756 if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
18757 // The Win64 ABI uses char* instead of a structure.
18758 return DAG.expandVAArg(Op.getNode());
18760 SDValue Chain = Op.getOperand(0);
18761 SDValue SrcPtr = Op.getOperand(1);
18762 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
18763 unsigned Align = Op.getConstantOperandVal(3);
18766 EVT ArgVT = Op.getNode()->getValueType(0);
18767 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18768 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
18771 // Decide which area this value should be read from.
18772 // TODO: Implement the AMD64 ABI in its entirety. This simple
18773 // selection mechanism works only for the basic types.
18774 if (ArgVT == MVT::f80) {
18775 llvm_unreachable("va_arg for f80 not yet implemented");
18776 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
18777 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
18778 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
18779 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
18781 llvm_unreachable("Unhandled argument type in LowerVAARG");
18784 if (ArgMode == 2) {
18785 // Sanity Check: Make sure using fp_offset makes sense.
18786 assert(!Subtarget.useSoftFloat() &&
18787 !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
18788 Subtarget.hasSSE1());
18791 // Insert VAARG_64 node into the DAG
18792 // VAARG_64 returns two values: Variable Argument Address, Chain
18793 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
18794 DAG.getConstant(ArgMode, dl, MVT::i8),
18795 DAG.getConstant(Align, dl, MVT::i32)};
18796 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
18797 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
18798 VTs, InstOps, MVT::i64,
18799 MachinePointerInfo(SV),
18801 /*Volatile=*/false,
18803 /*WriteMem=*/true);
18804 Chain = VAARG.getValue(1);
18806 // Load the next argument and return it
18807 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
18810 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
18811 SelectionDAG &DAG) {
18812 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
18813 // where a va_list is still an i8*.
18814 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
18815 if (Subtarget.isCallingConvWin64(
18816 DAG.getMachineFunction().getFunction()->getCallingConv()))
18817 // Probably a Win64 va_copy.
18818 return DAG.expandVACopy(Op.getNode());
18820 SDValue Chain = Op.getOperand(0);
18821 SDValue DstPtr = Op.getOperand(1);
18822 SDValue SrcPtr = Op.getOperand(2);
18823 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
18824 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18827 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
18828 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
18830 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
18833 /// Handle vector element shifts where the shift amount is a constant.
18834 /// Takes immediate version of shift as input.
18835 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
18836 SDValue SrcOp, uint64_t ShiftAmt,
18837 SelectionDAG &DAG) {
18838 MVT ElementType = VT.getVectorElementType();
18840 // Bitcast the source vector to the output type, this is mainly necessary for
18841 // vXi8/vXi64 shifts.
18842 if (VT != SrcOp.getSimpleValueType())
18843 SrcOp = DAG.getBitcast(VT, SrcOp);
18845 // Fold this packed shift into its first operand if ShiftAmt is 0.
18849 // Check for ShiftAmt >= element width
18850 if (ShiftAmt >= ElementType.getSizeInBits()) {
18851 if (Opc == X86ISD::VSRAI)
18852 ShiftAmt = ElementType.getSizeInBits() - 1;
18854 return DAG.getConstant(0, dl, VT);
18857 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
18858 && "Unknown target vector shift-by-constant node");
18860 // Fold this packed vector shift into a build vector if SrcOp is a
18861 // vector of Constants or UNDEFs.
18862 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
18863 SmallVector<SDValue, 8> Elts;
18864 unsigned NumElts = SrcOp->getNumOperands();
18865 ConstantSDNode *ND;
18868 default: llvm_unreachable("Unknown opcode!");
18869 case X86ISD::VSHLI:
18870 for (unsigned i=0; i!=NumElts; ++i) {
18871 SDValue CurrentOp = SrcOp->getOperand(i);
18872 if (CurrentOp->isUndef()) {
18873 Elts.push_back(CurrentOp);
18876 ND = cast<ConstantSDNode>(CurrentOp);
18877 const APInt &C = ND->getAPIntValue();
18878 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
18881 case X86ISD::VSRLI:
18882 for (unsigned i=0; i!=NumElts; ++i) {
18883 SDValue CurrentOp = SrcOp->getOperand(i);
18884 if (CurrentOp->isUndef()) {
18885 Elts.push_back(CurrentOp);
18888 ND = cast<ConstantSDNode>(CurrentOp);
18889 const APInt &C = ND->getAPIntValue();
18890 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
18893 case X86ISD::VSRAI:
18894 for (unsigned i=0; i!=NumElts; ++i) {
18895 SDValue CurrentOp = SrcOp->getOperand(i);
18896 if (CurrentOp->isUndef()) {
18897 Elts.push_back(CurrentOp);
18900 ND = cast<ConstantSDNode>(CurrentOp);
18901 const APInt &C = ND->getAPIntValue();
18902 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
18907 return DAG.getBuildVector(VT, dl, Elts);
18910 return DAG.getNode(Opc, dl, VT, SrcOp,
18911 DAG.getConstant(ShiftAmt, dl, MVT::i8));
18914 /// Handle vector element shifts where the shift amount may or may not be a
18915 /// constant. Takes immediate version of shift as input.
18916 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
18917 SDValue SrcOp, SDValue ShAmt,
18918 const X86Subtarget &Subtarget,
18919 SelectionDAG &DAG) {
18920 MVT SVT = ShAmt.getSimpleValueType();
18921 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
18923 // Catch shift-by-constant.
18924 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
18925 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
18926 CShAmt->getZExtValue(), DAG);
18928 // Change opcode to non-immediate version
18930 default: llvm_unreachable("Unknown target vector shift node");
18931 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
18932 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
18933 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
18936 // Need to build a vector containing shift amount.
18937 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
18938 // +=================+============+=======================================+
18939 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
18940 // +=================+============+=======================================+
18941 // | i64 | Yes, No | Use ShAmt as lowest elt |
18942 // | i32 | Yes | zero-extend in-reg |
18943 // | (i32 zext(i16)) | Yes | zero-extend in-reg |
18944 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
18945 // +=================+============+=======================================+
18947 if (SVT == MVT::i64)
18948 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
18949 else if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
18950 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
18951 ShAmt = ShAmt.getOperand(0);
18952 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v8i16, ShAmt);
18953 ShAmt = DAG.getZeroExtendVectorInReg(ShAmt, SDLoc(ShAmt), MVT::v2i64);
18954 } else if (Subtarget.hasSSE41() &&
18955 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
18956 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
18957 ShAmt = DAG.getZeroExtendVectorInReg(ShAmt, SDLoc(ShAmt), MVT::v2i64);
18959 SmallVector<SDValue, 4> ShOps = {ShAmt, DAG.getConstant(0, dl, SVT),
18960 DAG.getUNDEF(SVT), DAG.getUNDEF(SVT)};
18961 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
18964 // The return type has to be a 128-bit type with the same element
18965 // type as the input type.
18966 MVT EltVT = VT.getVectorElementType();
18967 MVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
18969 ShAmt = DAG.getBitcast(ShVT, ShAmt);
18970 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
18973 /// \brief Return Mask with the necessary casting or extending
18974 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
18975 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
18976 const X86Subtarget &Subtarget, SelectionDAG &DAG,
18979 if (isAllOnesConstant(Mask))
18980 return DAG.getTargetConstant(1, dl, MaskVT);
18981 if (X86::isZeroNode(Mask))
18982 return DAG.getTargetConstant(0, dl, MaskVT);
18984 if (MaskVT.bitsGT(Mask.getSimpleValueType())) {
18985 // Mask should be extended
18986 Mask = DAG.getNode(ISD::ANY_EXTEND, dl,
18987 MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
18990 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
18991 if (MaskVT == MVT::v64i1) {
18992 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
18993 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
18995 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
18996 DAG.getConstant(0, dl, MVT::i32));
18997 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
18998 DAG.getConstant(1, dl, MVT::i32));
19000 Lo = DAG.getBitcast(MVT::v32i1, Lo);
19001 Hi = DAG.getBitcast(MVT::v32i1, Hi);
19003 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
19005 // MaskVT require < 64bit. Truncate mask (should succeed in any case),
19007 MVT TruncVT = MVT::getIntegerVT(MaskVT.getSizeInBits());
19008 return DAG.getBitcast(MaskVT,
19009 DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Mask));
19013 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
19014 Mask.getSimpleValueType().getSizeInBits());
19015 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
19016 // are extracted by EXTRACT_SUBVECTOR.
19017 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
19018 DAG.getBitcast(BitcastVT, Mask),
19019 DAG.getIntPtrConstant(0, dl));
19023 /// \brief Return (and \p Op, \p Mask) for compare instructions or
19024 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
19025 /// necessary casting or extending for \p Mask when lowering masking intrinsics
19026 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
19027 SDValue PreservedSrc,
19028 const X86Subtarget &Subtarget,
19029 SelectionDAG &DAG) {
19030 MVT VT = Op.getSimpleValueType();
19031 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
19032 unsigned OpcodeSelect = ISD::VSELECT;
19035 if (isAllOnesConstant(Mask))
19038 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
19040 switch (Op.getOpcode()) {
19042 case X86ISD::PCMPEQM:
19043 case X86ISD::PCMPGTM:
19045 case X86ISD::CMPMU:
19046 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
19047 case X86ISD::VFPCLASS:
19048 case X86ISD::VFPCLASSS:
19049 return DAG.getNode(ISD::OR, dl, VT, Op, VMask);
19050 case X86ISD::VTRUNC:
19051 case X86ISD::VTRUNCS:
19052 case X86ISD::VTRUNCUS:
19053 case X86ISD::CVTPS2PH:
19054 // We can't use ISD::VSELECT here because it is not always "Legal"
19055 // for the destination type. For example vpmovqb require only AVX512
19056 // and vselect that can operate on byte element type require BWI
19057 OpcodeSelect = X86ISD::SELECT;
19060 if (PreservedSrc.isUndef())
19061 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
19062 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
19065 /// \brief Creates an SDNode for a predicated scalar operation.
19066 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
19067 /// The mask is coming as MVT::i8 and it should be transformed
19068 /// to MVT::v1i1 while lowering masking intrinsics.
19069 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
19070 /// "X86select" instead of "vselect". We just can't create the "vselect" node
19071 /// for a scalar instruction.
19072 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
19073 SDValue PreservedSrc,
19074 const X86Subtarget &Subtarget,
19075 SelectionDAG &DAG) {
19077 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
19078 if (MaskConst->getZExtValue() & 0x1)
19081 MVT VT = Op.getSimpleValueType();
19084 SDValue IMask = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Mask);
19085 if (Op.getOpcode() == X86ISD::FSETCCM ||
19086 Op.getOpcode() == X86ISD::FSETCCM_RND)
19087 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
19088 if (Op.getOpcode() == X86ISD::VFPCLASSS)
19089 return DAG.getNode(ISD::OR, dl, VT, Op, IMask);
19091 if (PreservedSrc.isUndef())
19092 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
19093 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
19096 static int getSEHRegistrationNodeSize(const Function *Fn) {
19097 if (!Fn->hasPersonalityFn())
19098 report_fatal_error(
19099 "querying registration node size for function without personality");
19100 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
19101 // WinEHStatePass for the full struct definition.
19102 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
19103 case EHPersonality::MSVC_X86SEH: return 24;
19104 case EHPersonality::MSVC_CXX: return 16;
19107 report_fatal_error(
19108 "can only recover FP for 32-bit MSVC EH personality functions");
19111 /// When the MSVC runtime transfers control to us, either to an outlined
19112 /// function or when returning to a parent frame after catching an exception, we
19113 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
19114 /// Here's the math:
19115 /// RegNodeBase = EntryEBP - RegNodeSize
19116 /// ParentFP = RegNodeBase - ParentFrameOffset
19117 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
19118 /// subtracting the offset (negative on x86) takes us back to the parent FP.
19119 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
19120 SDValue EntryEBP) {
19121 MachineFunction &MF = DAG.getMachineFunction();
19124 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19125 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
19127 // It's possible that the parent function no longer has a personality function
19128 // if the exceptional code was optimized away, in which case we just return
19129 // the incoming EBP.
19130 if (!Fn->hasPersonalityFn())
19133 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
19134 // registration, or the .set_setframe offset.
19135 MCSymbol *OffsetSym =
19136 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
19137 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
19138 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
19139 SDValue ParentFrameOffset =
19140 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
19142 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
19143 // prologue to RBP in the parent function.
19144 const X86Subtarget &Subtarget =
19145 static_cast<const X86Subtarget &>(DAG.getSubtarget());
19146 if (Subtarget.is64Bit())
19147 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
19149 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
19150 // RegNodeBase = EntryEBP - RegNodeSize
19151 // ParentFP = RegNodeBase - ParentFrameOffset
19152 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
19153 DAG.getConstant(RegNodeSize, dl, PtrVT));
19154 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
19157 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
19158 SelectionDAG &DAG) {
19159 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
19160 auto isRoundModeCurDirection = [](SDValue Rnd) {
19161 if (!isa<ConstantSDNode>(Rnd))
19164 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
19165 return Round == X86::STATIC_ROUNDING::CUR_DIRECTION;
19169 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
19170 MVT VT = Op.getSimpleValueType();
19171 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
19173 switch(IntrData->Type) {
19174 case INTR_TYPE_1OP:
19175 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
19176 case INTR_TYPE_2OP:
19177 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
19179 case INTR_TYPE_3OP:
19180 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
19181 Op.getOperand(2), Op.getOperand(3));
19182 case INTR_TYPE_4OP:
19183 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
19184 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
19185 case INTR_TYPE_1OP_MASK_RM: {
19186 SDValue Src = Op.getOperand(1);
19187 SDValue PassThru = Op.getOperand(2);
19188 SDValue Mask = Op.getOperand(3);
19189 SDValue RoundingMode;
19190 // We always add rounding mode to the Node.
19191 // If the rounding mode is not specified, we add the
19192 // "current direction" mode.
19193 if (Op.getNumOperands() == 4)
19195 DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
19197 RoundingMode = Op.getOperand(4);
19198 assert(IntrData->Opc1 == 0 && "Unexpected second opcode!");
19199 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
19201 Mask, PassThru, Subtarget, DAG);
19203 case INTR_TYPE_1OP_MASK: {
19204 SDValue Src = Op.getOperand(1);
19205 SDValue PassThru = Op.getOperand(2);
19206 SDValue Mask = Op.getOperand(3);
19207 // We add rounding mode to the Node when
19208 // - RM Opcode is specified and
19209 // - RM is not "current direction".
19210 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
19211 if (IntrWithRoundingModeOpcode != 0) {
19212 SDValue Rnd = Op.getOperand(4);
19213 if (!isRoundModeCurDirection(Rnd)) {
19214 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
19215 dl, Op.getValueType(),
19217 Mask, PassThru, Subtarget, DAG);
19220 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
19221 Mask, PassThru, Subtarget, DAG);
19223 case INTR_TYPE_SCALAR_MASK: {
19224 SDValue Src1 = Op.getOperand(1);
19225 SDValue Src2 = Op.getOperand(2);
19226 SDValue passThru = Op.getOperand(3);
19227 SDValue Mask = Op.getOperand(4);
19228 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
19229 if (IntrWithRoundingModeOpcode != 0) {
19230 SDValue Rnd = Op.getOperand(5);
19231 if (!isRoundModeCurDirection(Rnd))
19232 return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
19233 dl, VT, Src1, Src2, Rnd),
19234 Mask, passThru, Subtarget, DAG);
19236 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2),
19237 Mask, passThru, Subtarget, DAG);
19239 case INTR_TYPE_SCALAR_MASK_RM: {
19240 SDValue Src1 = Op.getOperand(1);
19241 SDValue Src2 = Op.getOperand(2);
19242 SDValue Src0 = Op.getOperand(3);
19243 SDValue Mask = Op.getOperand(4);
19244 // There are 2 kinds of intrinsics in this group:
19245 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
19246 // (2) With rounding mode and sae - 7 operands.
19247 if (Op.getNumOperands() == 6) {
19248 SDValue Sae = Op.getOperand(5);
19249 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
19251 Mask, Src0, Subtarget, DAG);
19253 assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
19254 SDValue RoundingMode = Op.getOperand(5);
19255 SDValue Sae = Op.getOperand(6);
19256 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
19257 RoundingMode, Sae),
19258 Mask, Src0, Subtarget, DAG);
19260 case INTR_TYPE_2OP_MASK:
19261 case INTR_TYPE_2OP_IMM8_MASK: {
19262 SDValue Src1 = Op.getOperand(1);
19263 SDValue Src2 = Op.getOperand(2);
19264 SDValue PassThru = Op.getOperand(3);
19265 SDValue Mask = Op.getOperand(4);
19267 if (IntrData->Type == INTR_TYPE_2OP_IMM8_MASK)
19268 Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
19270 // We specify 2 possible opcodes for intrinsics with rounding modes.
19271 // First, we check if the intrinsic may have non-default rounding mode,
19272 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
19273 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
19274 if (IntrWithRoundingModeOpcode != 0) {
19275 SDValue Rnd = Op.getOperand(5);
19276 if (!isRoundModeCurDirection(Rnd)) {
19277 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
19278 dl, Op.getValueType(),
19280 Mask, PassThru, Subtarget, DAG);
19283 // TODO: Intrinsics should have fast-math-flags to propagate.
19284 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2),
19285 Mask, PassThru, Subtarget, DAG);
19287 case INTR_TYPE_2OP_MASK_RM: {
19288 SDValue Src1 = Op.getOperand(1);
19289 SDValue Src2 = Op.getOperand(2);
19290 SDValue PassThru = Op.getOperand(3);
19291 SDValue Mask = Op.getOperand(4);
19292 // We specify 2 possible modes for intrinsics, with/without rounding
19294 // First, we check if the intrinsic have rounding mode (6 operands),
19295 // if not, we set rounding mode to "current".
19297 if (Op.getNumOperands() == 6)
19298 Rnd = Op.getOperand(5);
19300 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
19301 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19303 Mask, PassThru, Subtarget, DAG);
19305 case INTR_TYPE_3OP_SCALAR_MASK_RM: {
19306 SDValue Src1 = Op.getOperand(1);
19307 SDValue Src2 = Op.getOperand(2);
19308 SDValue Src3 = Op.getOperand(3);
19309 SDValue PassThru = Op.getOperand(4);
19310 SDValue Mask = Op.getOperand(5);
19311 SDValue Sae = Op.getOperand(6);
19313 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
19315 Mask, PassThru, Subtarget, DAG);
19317 case INTR_TYPE_3OP_MASK_RM: {
19318 SDValue Src1 = Op.getOperand(1);
19319 SDValue Src2 = Op.getOperand(2);
19320 SDValue Imm = Op.getOperand(3);
19321 SDValue PassThru = Op.getOperand(4);
19322 SDValue Mask = Op.getOperand(5);
19323 // We specify 2 possible modes for intrinsics, with/without rounding
19325 // First, we check if the intrinsic have rounding mode (7 operands),
19326 // if not, we set rounding mode to "current".
19328 if (Op.getNumOperands() == 7)
19329 Rnd = Op.getOperand(6);
19331 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
19332 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19333 Src1, Src2, Imm, Rnd),
19334 Mask, PassThru, Subtarget, DAG);
19336 case INTR_TYPE_3OP_IMM8_MASK:
19337 case INTR_TYPE_3OP_MASK: {
19338 SDValue Src1 = Op.getOperand(1);
19339 SDValue Src2 = Op.getOperand(2);
19340 SDValue Src3 = Op.getOperand(3);
19341 SDValue PassThru = Op.getOperand(4);
19342 SDValue Mask = Op.getOperand(5);
19344 if (IntrData->Type == INTR_TYPE_3OP_IMM8_MASK)
19345 Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
19347 // We specify 2 possible opcodes for intrinsics with rounding modes.
19348 // First, we check if the intrinsic may have non-default rounding mode,
19349 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
19350 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
19351 if (IntrWithRoundingModeOpcode != 0) {
19352 SDValue Rnd = Op.getOperand(6);
19353 if (!isRoundModeCurDirection(Rnd)) {
19354 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
19355 dl, Op.getValueType(),
19356 Src1, Src2, Src3, Rnd),
19357 Mask, PassThru, Subtarget, DAG);
19360 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19362 Mask, PassThru, Subtarget, DAG);
19364 case VPERM_2OP_MASK : {
19365 SDValue Src1 = Op.getOperand(1);
19366 SDValue Src2 = Op.getOperand(2);
19367 SDValue PassThru = Op.getOperand(3);
19368 SDValue Mask = Op.getOperand(4);
19370 // Swap Src1 and Src2 in the node creation
19371 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1),
19372 Mask, PassThru, Subtarget, DAG);
19374 case VPERM_3OP_MASKZ:
19375 case VPERM_3OP_MASK:{
19376 MVT VT = Op.getSimpleValueType();
19377 // Src2 is the PassThru
19378 SDValue Src1 = Op.getOperand(1);
19379 // PassThru needs to be the same type as the destination in order
19380 // to pattern match correctly.
19381 SDValue Src2 = DAG.getBitcast(VT, Op.getOperand(2));
19382 SDValue Src3 = Op.getOperand(3);
19383 SDValue Mask = Op.getOperand(4);
19384 SDValue PassThru = SDValue();
19386 // set PassThru element
19387 if (IntrData->Type == VPERM_3OP_MASKZ)
19388 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
19392 // Swap Src1 and Src2 in the node creation
19393 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
19394 dl, Op.getValueType(),
19396 Mask, PassThru, Subtarget, DAG);
19400 case FMA_OP_MASK: {
19401 SDValue Src1 = Op.getOperand(1);
19402 SDValue Src2 = Op.getOperand(2);
19403 SDValue Src3 = Op.getOperand(3);
19404 SDValue Mask = Op.getOperand(4);
19405 MVT VT = Op.getSimpleValueType();
19406 SDValue PassThru = SDValue();
19408 // set PassThru element
19409 if (IntrData->Type == FMA_OP_MASKZ)
19410 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
19411 else if (IntrData->Type == FMA_OP_MASK3)
19416 // We specify 2 possible opcodes for intrinsics with rounding modes.
19417 // First, we check if the intrinsic may have non-default rounding mode,
19418 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
19419 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
19420 if (IntrWithRoundingModeOpcode != 0) {
19421 SDValue Rnd = Op.getOperand(5);
19422 if (!isRoundModeCurDirection(Rnd))
19423 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
19424 dl, Op.getValueType(),
19425 Src1, Src2, Src3, Rnd),
19426 Mask, PassThru, Subtarget, DAG);
19428 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
19429 dl, Op.getValueType(),
19431 Mask, PassThru, Subtarget, DAG);
19433 case FMA_OP_SCALAR_MASK:
19434 case FMA_OP_SCALAR_MASK3:
19435 case FMA_OP_SCALAR_MASKZ: {
19436 SDValue Src1 = Op.getOperand(1);
19437 SDValue Src2 = Op.getOperand(2);
19438 SDValue Src3 = Op.getOperand(3);
19439 SDValue Mask = Op.getOperand(4);
19440 MVT VT = Op.getSimpleValueType();
19441 SDValue PassThru = SDValue();
19443 // set PassThru element
19444 if (IntrData->Type == FMA_OP_SCALAR_MASKZ)
19445 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
19446 else if (IntrData->Type == FMA_OP_SCALAR_MASK3)
19451 SDValue Rnd = Op.getOperand(5);
19452 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl,
19453 Op.getValueType(), Src1, Src2,
19455 Mask, PassThru, Subtarget, DAG);
19457 case TERLOG_OP_MASK:
19458 case TERLOG_OP_MASKZ: {
19459 SDValue Src1 = Op.getOperand(1);
19460 SDValue Src2 = Op.getOperand(2);
19461 SDValue Src3 = Op.getOperand(3);
19462 SDValue Src4 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(4));
19463 SDValue Mask = Op.getOperand(5);
19464 MVT VT = Op.getSimpleValueType();
19465 SDValue PassThru = Src1;
19466 // Set PassThru element.
19467 if (IntrData->Type == TERLOG_OP_MASKZ)
19468 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
19470 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19471 Src1, Src2, Src3, Src4),
19472 Mask, PassThru, Subtarget, DAG);
19475 // ISD::FP_ROUND has a second argument that indicates if the truncation
19476 // does not change the value. Set it to 0 since it can change.
19477 return DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
19478 DAG.getIntPtrConstant(0, dl));
19479 case CVTPD2PS_MASK: {
19480 SDValue Src = Op.getOperand(1);
19481 SDValue PassThru = Op.getOperand(2);
19482 SDValue Mask = Op.getOperand(3);
19483 // We add rounding mode to the Node when
19484 // - RM Opcode is specified and
19485 // - RM is not "current direction".
19486 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
19487 if (IntrWithRoundingModeOpcode != 0) {
19488 SDValue Rnd = Op.getOperand(4);
19489 if (!isRoundModeCurDirection(Rnd)) {
19490 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
19491 dl, Op.getValueType(),
19493 Mask, PassThru, Subtarget, DAG);
19496 assert(IntrData->Opc0 == ISD::FP_ROUND && "Unexpected opcode!");
19497 // ISD::FP_ROUND has a second argument that indicates if the truncation
19498 // does not change the value. Set it to 0 since it can change.
19499 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
19500 DAG.getIntPtrConstant(0, dl)),
19501 Mask, PassThru, Subtarget, DAG);
19504 // FPclass intrinsics with mask
19505 SDValue Src1 = Op.getOperand(1);
19506 MVT VT = Src1.getSimpleValueType();
19507 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
19508 SDValue Imm = Op.getOperand(2);
19509 SDValue Mask = Op.getOperand(3);
19510 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
19511 Mask.getSimpleValueType().getSizeInBits());
19512 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm);
19513 SDValue FPclassMask = getVectorMaskingNode(FPclass, Mask,
19514 DAG.getTargetConstant(0, dl, MaskVT),
19516 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
19517 DAG.getUNDEF(BitcastVT), FPclassMask,
19518 DAG.getIntPtrConstant(0, dl));
19519 return DAG.getBitcast(Op.getValueType(), Res);
19522 SDValue Src1 = Op.getOperand(1);
19523 SDValue Imm = Op.getOperand(2);
19524 SDValue Mask = Op.getOperand(3);
19525 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
19526 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask,
19527 DAG.getTargetConstant(0, dl, MVT::i1), Subtarget, DAG);
19528 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i8, FPclassMask,
19529 DAG.getIntPtrConstant(0, dl));
19532 case CMP_MASK_CC: {
19533 // Comparison intrinsics with masks.
19534 // Example of transformation:
19535 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
19536 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
19538 // (v8i1 (insert_subvector undef,
19539 // (v2i1 (and (PCMPEQM %a, %b),
19540 // (extract_subvector
19541 // (v8i1 (bitcast %mask)), 0))), 0))))
19542 MVT VT = Op.getOperand(1).getSimpleValueType();
19543 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
19544 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
19545 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
19546 Mask.getSimpleValueType().getSizeInBits());
19548 if (IntrData->Type == CMP_MASK_CC) {
19549 SDValue CC = Op.getOperand(3);
19550 CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
19551 // We specify 2 possible opcodes for intrinsics with rounding modes.
19552 // First, we check if the intrinsic may have non-default rounding mode,
19553 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
19554 if (IntrData->Opc1 != 0) {
19555 SDValue Rnd = Op.getOperand(5);
19556 if (!isRoundModeCurDirection(Rnd))
19557 Cmp = DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
19558 Op.getOperand(2), CC, Rnd);
19560 //default rounding mode
19562 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
19563 Op.getOperand(2), CC);
19566 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
19567 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
19570 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
19571 DAG.getTargetConstant(0, dl,
19574 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
19575 DAG.getUNDEF(BitcastVT), CmpMask,
19576 DAG.getIntPtrConstant(0, dl));
19577 return DAG.getBitcast(Op.getValueType(), Res);
19579 case CMP_MASK_SCALAR_CC: {
19580 SDValue Src1 = Op.getOperand(1);
19581 SDValue Src2 = Op.getOperand(2);
19582 SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
19583 SDValue Mask = Op.getOperand(4);
19586 if (IntrData->Opc1 != 0) {
19587 SDValue Rnd = Op.getOperand(5);
19588 if (!isRoundModeCurDirection(Rnd))
19589 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Rnd);
19591 //default rounding mode
19593 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
19595 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask,
19596 DAG.getTargetConstant(0, dl,
19599 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i8, CmpMask,
19600 DAG.getIntPtrConstant(0, dl));
19602 case COMI: { // Comparison intrinsics
19603 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
19604 SDValue LHS = Op.getOperand(1);
19605 SDValue RHS = Op.getOperand(2);
19606 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
19607 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
19610 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
19611 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
19612 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
19613 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
19616 case ISD::SETNE: { // (ZF = 1 or PF = 1)
19617 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
19618 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
19619 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
19622 case ISD::SETGT: // (CF = 0 and ZF = 0)
19623 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
19625 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
19626 SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
19629 case ISD::SETGE: // CF = 0
19630 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
19632 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
19633 SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
19636 llvm_unreachable("Unexpected illegal condition!");
19638 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
19640 case COMI_RM: { // Comparison intrinsics with Sae
19641 SDValue LHS = Op.getOperand(1);
19642 SDValue RHS = Op.getOperand(2);
19643 unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
19644 SDValue Sae = Op.getOperand(4);
19647 if (isRoundModeCurDirection(Sae))
19648 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
19649 DAG.getConstant(CondVal, dl, MVT::i8));
19651 FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::v1i1, LHS, RHS,
19652 DAG.getConstant(CondVal, dl, MVT::i8), Sae);
19653 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i32, FCmp,
19654 DAG.getIntPtrConstant(0, dl));
19657 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
19658 Op.getOperand(1), Op.getOperand(2), Subtarget,
19660 case COMPRESS_EXPAND_IN_REG: {
19661 SDValue Mask = Op.getOperand(3);
19662 SDValue DataToCompress = Op.getOperand(1);
19663 SDValue PassThru = Op.getOperand(2);
19664 if (isAllOnesConstant(Mask)) // return data as is
19665 return Op.getOperand(1);
19667 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19669 Mask, PassThru, Subtarget, DAG);
19672 SDValue Mask = Op.getOperand(1);
19673 MVT MaskVT = MVT::getVectorVT(MVT::i1,
19674 Mask.getSimpleValueType().getSizeInBits());
19675 Mask = DAG.getBitcast(MaskVT, Mask);
19676 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Mask);
19679 MVT VT = Op.getSimpleValueType();
19680 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2);
19682 SDValue Src1 = getMaskNode(Op.getOperand(1), MaskVT, Subtarget, DAG, dl);
19683 SDValue Src2 = getMaskNode(Op.getOperand(2), MaskVT, Subtarget, DAG, dl);
19684 // Arguments should be swapped.
19685 SDValue Res = DAG.getNode(IntrData->Opc0, dl,
19686 MVT::getVectorVT(MVT::i1, VT.getSizeInBits()),
19688 return DAG.getBitcast(VT, Res);
19691 MVT VT = Op.getSimpleValueType();
19692 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits());
19694 SDValue Src1 = getMaskNode(Op.getOperand(1), MaskVT, Subtarget, DAG, dl);
19695 SDValue Src2 = getMaskNode(Op.getOperand(2), MaskVT, Subtarget, DAG, dl);
19696 SDValue Res = DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Src2);
19697 return DAG.getBitcast(VT, Res);
19700 case FIXUPIMMS_MASKZ:
19702 case FIXUPIMM_MASKZ:{
19703 SDValue Src1 = Op.getOperand(1);
19704 SDValue Src2 = Op.getOperand(2);
19705 SDValue Src3 = Op.getOperand(3);
19706 SDValue Imm = Op.getOperand(4);
19707 SDValue Mask = Op.getOperand(5);
19708 SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ?
19709 Src1 : getZeroVector(VT, Subtarget, DAG, dl);
19710 // We specify 2 possible modes for intrinsics, with/without rounding
19712 // First, we check if the intrinsic have rounding mode (7 operands),
19713 // if not, we set rounding mode to "current".
19715 if (Op.getNumOperands() == 7)
19716 Rnd = Op.getOperand(6);
19718 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
19719 if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ)
19720 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19721 Src1, Src2, Src3, Imm, Rnd),
19722 Mask, Passthru, Subtarget, DAG);
19723 else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ
19724 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19725 Src1, Src2, Src3, Imm, Rnd),
19726 Mask, Passthru, Subtarget, DAG);
19728 case CONVERT_TO_MASK: {
19729 MVT SrcVT = Op.getOperand(1).getSimpleValueType();
19730 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
19731 MVT BitcastVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits());
19733 SDValue CvtMask = DAG.getNode(IntrData->Opc0, dl, MaskVT,
19735 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
19736 DAG.getUNDEF(BitcastVT), CvtMask,
19737 DAG.getIntPtrConstant(0, dl));
19738 return DAG.getBitcast(Op.getValueType(), Res);
19740 case BRCST_SUBVEC_TO_VEC: {
19741 SDValue Src = Op.getOperand(1);
19742 SDValue Passthru = Op.getOperand(2);
19743 SDValue Mask = Op.getOperand(3);
19744 EVT resVT = Passthru.getValueType();
19745 SDValue subVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, resVT,
19746 DAG.getUNDEF(resVT), Src,
19747 DAG.getIntPtrConstant(0, dl));
19749 if (Src.getSimpleValueType().is256BitVector() && resVT.is512BitVector())
19750 immVal = DAG.getConstant(0x44, dl, MVT::i8);
19752 immVal = DAG.getConstant(0, dl, MVT::i8);
19753 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
19754 subVec, subVec, immVal),
19755 Mask, Passthru, Subtarget, DAG);
19757 case BRCST32x2_TO_VEC: {
19758 SDValue Src = Op.getOperand(1);
19759 SDValue PassThru = Op.getOperand(2);
19760 SDValue Mask = Op.getOperand(3);
19762 assert((VT.getScalarType() == MVT::i32 ||
19763 VT.getScalarType() == MVT::f32) && "Unexpected type!");
19764 //bitcast Src to packed 64
19765 MVT ScalarVT = VT.getScalarType() == MVT::i32 ? MVT::i64 : MVT::f64;
19766 MVT BitcastVT = MVT::getVectorVT(ScalarVT, Src.getValueSizeInBits()/64);
19767 Src = DAG.getBitcast(BitcastVT, Src);
19769 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
19770 Mask, PassThru, Subtarget, DAG);
19778 default: return SDValue(); // Don't custom lower most intrinsics.
19780 case Intrinsic::x86_avx2_permd:
19781 case Intrinsic::x86_avx2_permps:
19782 // Operands intentionally swapped. Mask is last operand to intrinsic,
19783 // but second operand for node/instruction.
19784 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(),
19785 Op.getOperand(2), Op.getOperand(1));
19787 // ptest and testp intrinsics. The intrinsic these come from are designed to
19788 // return an integer value, not just an instruction so lower it to the ptest
19789 // or testp pattern and a setcc for the result.
19790 case Intrinsic::x86_sse41_ptestz:
19791 case Intrinsic::x86_sse41_ptestc:
19792 case Intrinsic::x86_sse41_ptestnzc:
19793 case Intrinsic::x86_avx_ptestz_256:
19794 case Intrinsic::x86_avx_ptestc_256:
19795 case Intrinsic::x86_avx_ptestnzc_256:
19796 case Intrinsic::x86_avx_vtestz_ps:
19797 case Intrinsic::x86_avx_vtestc_ps:
19798 case Intrinsic::x86_avx_vtestnzc_ps:
19799 case Intrinsic::x86_avx_vtestz_pd:
19800 case Intrinsic::x86_avx_vtestc_pd:
19801 case Intrinsic::x86_avx_vtestnzc_pd:
19802 case Intrinsic::x86_avx_vtestz_ps_256:
19803 case Intrinsic::x86_avx_vtestc_ps_256:
19804 case Intrinsic::x86_avx_vtestnzc_ps_256:
19805 case Intrinsic::x86_avx_vtestz_pd_256:
19806 case Intrinsic::x86_avx_vtestc_pd_256:
19807 case Intrinsic::x86_avx_vtestnzc_pd_256: {
19808 bool IsTestPacked = false;
19809 X86::CondCode X86CC;
19811 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
19812 case Intrinsic::x86_avx_vtestz_ps:
19813 case Intrinsic::x86_avx_vtestz_pd:
19814 case Intrinsic::x86_avx_vtestz_ps_256:
19815 case Intrinsic::x86_avx_vtestz_pd_256:
19816 IsTestPacked = true;
19818 case Intrinsic::x86_sse41_ptestz:
19819 case Intrinsic::x86_avx_ptestz_256:
19821 X86CC = X86::COND_E;
19823 case Intrinsic::x86_avx_vtestc_ps:
19824 case Intrinsic::x86_avx_vtestc_pd:
19825 case Intrinsic::x86_avx_vtestc_ps_256:
19826 case Intrinsic::x86_avx_vtestc_pd_256:
19827 IsTestPacked = true;
19829 case Intrinsic::x86_sse41_ptestc:
19830 case Intrinsic::x86_avx_ptestc_256:
19832 X86CC = X86::COND_B;
19834 case Intrinsic::x86_avx_vtestnzc_ps:
19835 case Intrinsic::x86_avx_vtestnzc_pd:
19836 case Intrinsic::x86_avx_vtestnzc_ps_256:
19837 case Intrinsic::x86_avx_vtestnzc_pd_256:
19838 IsTestPacked = true;
19840 case Intrinsic::x86_sse41_ptestnzc:
19841 case Intrinsic::x86_avx_ptestnzc_256:
19843 X86CC = X86::COND_A;
19847 SDValue LHS = Op.getOperand(1);
19848 SDValue RHS = Op.getOperand(2);
19849 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
19850 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
19851 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
19852 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
19854 case Intrinsic::x86_avx512_kortestz_w:
19855 case Intrinsic::x86_avx512_kortestc_w: {
19856 X86::CondCode X86CC =
19857 (IntNo == Intrinsic::x86_avx512_kortestz_w) ? X86::COND_E : X86::COND_B;
19858 SDValue LHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(1));
19859 SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
19860 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
19861 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
19862 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
19865 case Intrinsic::x86_avx512_knot_w: {
19866 SDValue LHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(1));
19867 SDValue RHS = DAG.getConstant(1, dl, MVT::v16i1);
19868 SDValue Res = DAG.getNode(ISD::XOR, dl, MVT::v16i1, LHS, RHS);
19869 return DAG.getBitcast(MVT::i16, Res);
19872 case Intrinsic::x86_avx512_kandn_w: {
19873 SDValue LHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(1));
19874 // Invert LHS for the not.
19875 LHS = DAG.getNode(ISD::XOR, dl, MVT::v16i1, LHS,
19876 DAG.getConstant(1, dl, MVT::v16i1));
19877 SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
19878 SDValue Res = DAG.getNode(ISD::AND, dl, MVT::v16i1, LHS, RHS);
19879 return DAG.getBitcast(MVT::i16, Res);
19882 case Intrinsic::x86_avx512_kxnor_w: {
19883 SDValue LHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(1));
19884 SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
19885 SDValue Res = DAG.getNode(ISD::XOR, dl, MVT::v16i1, LHS, RHS);
19886 // Invert result for the not.
19887 Res = DAG.getNode(ISD::XOR, dl, MVT::v16i1, Res,
19888 DAG.getConstant(1, dl, MVT::v16i1));
19889 return DAG.getBitcast(MVT::i16, Res);
19892 case Intrinsic::x86_sse42_pcmpistria128:
19893 case Intrinsic::x86_sse42_pcmpestria128:
19894 case Intrinsic::x86_sse42_pcmpistric128:
19895 case Intrinsic::x86_sse42_pcmpestric128:
19896 case Intrinsic::x86_sse42_pcmpistrio128:
19897 case Intrinsic::x86_sse42_pcmpestrio128:
19898 case Intrinsic::x86_sse42_pcmpistris128:
19899 case Intrinsic::x86_sse42_pcmpestris128:
19900 case Intrinsic::x86_sse42_pcmpistriz128:
19901 case Intrinsic::x86_sse42_pcmpestriz128: {
19903 X86::CondCode X86CC;
19905 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
19906 case Intrinsic::x86_sse42_pcmpistria128:
19907 Opcode = X86ISD::PCMPISTRI;
19908 X86CC = X86::COND_A;
19910 case Intrinsic::x86_sse42_pcmpestria128:
19911 Opcode = X86ISD::PCMPESTRI;
19912 X86CC = X86::COND_A;
19914 case Intrinsic::x86_sse42_pcmpistric128:
19915 Opcode = X86ISD::PCMPISTRI;
19916 X86CC = X86::COND_B;
19918 case Intrinsic::x86_sse42_pcmpestric128:
19919 Opcode = X86ISD::PCMPESTRI;
19920 X86CC = X86::COND_B;
19922 case Intrinsic::x86_sse42_pcmpistrio128:
19923 Opcode = X86ISD::PCMPISTRI;
19924 X86CC = X86::COND_O;
19926 case Intrinsic::x86_sse42_pcmpestrio128:
19927 Opcode = X86ISD::PCMPESTRI;
19928 X86CC = X86::COND_O;
19930 case Intrinsic::x86_sse42_pcmpistris128:
19931 Opcode = X86ISD::PCMPISTRI;
19932 X86CC = X86::COND_S;
19934 case Intrinsic::x86_sse42_pcmpestris128:
19935 Opcode = X86ISD::PCMPESTRI;
19936 X86CC = X86::COND_S;
19938 case Intrinsic::x86_sse42_pcmpistriz128:
19939 Opcode = X86ISD::PCMPISTRI;
19940 X86CC = X86::COND_E;
19942 case Intrinsic::x86_sse42_pcmpestriz128:
19943 Opcode = X86ISD::PCMPESTRI;
19944 X86CC = X86::COND_E;
19947 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
19948 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
19949 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
19950 SDValue SetCC = getSETCC(X86CC, SDValue(PCMP.getNode(), 1), dl, DAG);
19951 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
19954 case Intrinsic::x86_sse42_pcmpistri128:
19955 case Intrinsic::x86_sse42_pcmpestri128: {
19957 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
19958 Opcode = X86ISD::PCMPISTRI;
19960 Opcode = X86ISD::PCMPESTRI;
19962 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
19963 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
19964 return DAG.getNode(Opcode, dl, VTs, NewOps);
19967 case Intrinsic::eh_sjlj_lsda: {
19968 MachineFunction &MF = DAG.getMachineFunction();
19969 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19970 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
19971 auto &Context = MF.getMMI().getContext();
19972 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
19973 Twine(MF.getFunctionNumber()));
19974 return DAG.getNode(X86ISD::Wrapper, dl, VT, DAG.getMCSymbol(S, PtrVT));
19977 case Intrinsic::x86_seh_lsda: {
19978 // Compute the symbol for the LSDA. We know it'll get emitted later.
19979 MachineFunction &MF = DAG.getMachineFunction();
19980 SDValue Op1 = Op.getOperand(1);
19981 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
19982 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
19983 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
19985 // Generate a simple absolute symbol reference. This intrinsic is only
19986 // supported on 32-bit Windows, which isn't PIC.
19987 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
19988 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
19991 case Intrinsic::x86_seh_recoverfp: {
19992 SDValue FnOp = Op.getOperand(1);
19993 SDValue IncomingFPOp = Op.getOperand(2);
19994 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
19995 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
19997 report_fatal_error(
19998 "llvm.x86.seh.recoverfp must take a function as the first argument");
19999 return recoverFramePointer(DAG, Fn, IncomingFPOp);
20002 case Intrinsic::localaddress: {
20003 // Returns one of the stack, base, or frame pointer registers, depending on
20004 // which is used to reference local variables.
20005 MachineFunction &MF = DAG.getMachineFunction();
20006 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
20008 if (RegInfo->hasBasePointer(MF))
20009 Reg = RegInfo->getBaseRegister();
20010 else // This function handles the SP or FP case.
20011 Reg = RegInfo->getPtrSizedFrameRegister(MF);
20012 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
20017 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
20018 SDValue Src, SDValue Mask, SDValue Base,
20019 SDValue Index, SDValue ScaleOp, SDValue Chain,
20020 const X86Subtarget &Subtarget) {
20022 auto *C = cast<ConstantSDNode>(ScaleOp);
20023 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
20024 EVT MaskVT = Mask.getValueType();
20025 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
20026 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
20027 SDValue Segment = DAG.getRegister(0, MVT::i32);
20028 // If source is undef or we know it won't be used, use a zero vector
20029 // to break register dependency.
20030 // TODO: use undef instead and let ExecutionDepsFix deal with it?
20031 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
20032 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
20033 SDValue Ops[] = {Src, Base, Scale, Index, Disp, Segment, Mask, Chain};
20034 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
20035 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
20036 return DAG.getMergeValues(RetOps, dl);
20039 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
20040 SDValue Src, SDValue Mask, SDValue Base,
20041 SDValue Index, SDValue ScaleOp, SDValue Chain,
20042 const X86Subtarget &Subtarget) {
20044 auto *C = cast<ConstantSDNode>(ScaleOp);
20045 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
20046 MVT MaskVT = MVT::getVectorVT(MVT::i1,
20047 Index.getSimpleValueType().getVectorNumElements());
20049 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20050 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
20051 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
20052 SDValue Segment = DAG.getRegister(0, MVT::i32);
20053 // If source is undef or we know it won't be used, use a zero vector
20054 // to break register dependency.
20055 // TODO: use undef instead and let ExecutionDepsFix deal with it?
20056 if (Src.isUndef() || ISD::isBuildVectorAllOnes(VMask.getNode()))
20057 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
20058 SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
20059 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
20060 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
20061 return DAG.getMergeValues(RetOps, dl);
20064 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
20065 SDValue Src, SDValue Mask, SDValue Base,
20066 SDValue Index, SDValue ScaleOp, SDValue Chain,
20067 const X86Subtarget &Subtarget) {
20069 auto *C = cast<ConstantSDNode>(ScaleOp);
20070 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
20071 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
20072 SDValue Segment = DAG.getRegister(0, MVT::i32);
20073 MVT MaskVT = MVT::getVectorVT(MVT::i1,
20074 Index.getSimpleValueType().getVectorNumElements());
20076 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20077 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
20078 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
20079 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
20080 return SDValue(Res, 1);
20083 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
20084 SDValue Mask, SDValue Base, SDValue Index,
20085 SDValue ScaleOp, SDValue Chain,
20086 const X86Subtarget &Subtarget) {
20088 auto *C = cast<ConstantSDNode>(ScaleOp);
20089 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
20090 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
20091 SDValue Segment = DAG.getRegister(0, MVT::i32);
20093 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
20094 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20095 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
20096 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
20097 return SDValue(Res, 0);
20100 /// Handles the lowering of builtin intrinsic that return the value
20101 /// of the extended control register.
20102 static void getExtendedControlRegister(SDNode *N, const SDLoc &DL,
20104 const X86Subtarget &Subtarget,
20105 SmallVectorImpl<SDValue> &Results) {
20106 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
20107 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20110 // The ECX register is used to select the index of the XCR register to
20113 DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX, N->getOperand(2));
20114 SDNode *N1 = DAG.getMachineNode(X86::XGETBV, DL, Tys, Chain);
20115 Chain = SDValue(N1, 0);
20117 // Reads the content of XCR and returns it in registers EDX:EAX.
20118 if (Subtarget.is64Bit()) {
20119 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
20120 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
20123 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
20124 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
20127 Chain = HI.getValue(1);
20129 if (Subtarget.is64Bit()) {
20130 // Merge the two 32-bit values into a 64-bit one..
20131 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
20132 DAG.getConstant(32, DL, MVT::i8));
20133 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
20134 Results.push_back(Chain);
20138 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
20139 SDValue Ops[] = { LO, HI };
20140 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
20141 Results.push_back(Pair);
20142 Results.push_back(Chain);
20145 /// Handles the lowering of builtin intrinsics that read performance monitor
20146 /// counters (x86_rdpmc).
20147 static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL,
20149 const X86Subtarget &Subtarget,
20150 SmallVectorImpl<SDValue> &Results) {
20151 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
20152 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20155 // The ECX register is used to select the index of the performance counter
20157 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
20159 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
20161 // Reads the content of a 64-bit performance counter and returns it in the
20162 // registers EDX:EAX.
20163 if (Subtarget.is64Bit()) {
20164 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
20165 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
20168 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
20169 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
20172 Chain = HI.getValue(1);
20174 if (Subtarget.is64Bit()) {
20175 // The EAX register is loaded with the low-order 32 bits. The EDX register
20176 // is loaded with the supported high-order bits of the counter.
20177 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
20178 DAG.getConstant(32, DL, MVT::i8));
20179 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
20180 Results.push_back(Chain);
20184 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
20185 SDValue Ops[] = { LO, HI };
20186 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
20187 Results.push_back(Pair);
20188 Results.push_back(Chain);
20191 /// Handles the lowering of builtin intrinsics that read the time stamp counter
20192 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
20193 /// READCYCLECOUNTER nodes.
20194 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
20196 const X86Subtarget &Subtarget,
20197 SmallVectorImpl<SDValue> &Results) {
20198 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20199 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
20202 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
20203 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
20204 // and the EAX register is loaded with the low-order 32 bits.
20205 if (Subtarget.is64Bit()) {
20206 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
20207 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
20210 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
20211 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
20214 SDValue Chain = HI.getValue(1);
20216 if (Opcode == X86ISD::RDTSCP_DAG) {
20217 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
20219 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
20220 // the ECX register. Add 'ecx' explicitly to the chain.
20221 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
20223 // Explicitly store the content of ECX at the location passed in input
20224 // to the 'rdtscp' intrinsic.
20225 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
20226 MachinePointerInfo());
20229 if (Subtarget.is64Bit()) {
20230 // The EDX register is loaded with the high-order 32 bits of the MSR, and
20231 // the EAX register is loaded with the low-order 32 bits.
20232 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
20233 DAG.getConstant(32, DL, MVT::i8));
20234 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
20235 Results.push_back(Chain);
20239 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
20240 SDValue Ops[] = { LO, HI };
20241 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
20242 Results.push_back(Pair);
20243 Results.push_back(Chain);
20246 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
20247 SelectionDAG &DAG) {
20248 SmallVector<SDValue, 2> Results;
20250 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
20252 return DAG.getMergeValues(Results, DL);
20255 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
20256 MachineFunction &MF = DAG.getMachineFunction();
20257 SDValue Chain = Op.getOperand(0);
20258 SDValue RegNode = Op.getOperand(2);
20259 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
20261 report_fatal_error("EH registrations only live in functions using WinEH");
20263 // Cast the operand to an alloca, and remember the frame index.
20264 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
20266 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
20267 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
20269 // Return the chain operand without making any DAG nodes.
20273 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
20274 MachineFunction &MF = DAG.getMachineFunction();
20275 SDValue Chain = Op.getOperand(0);
20276 SDValue EHGuard = Op.getOperand(2);
20277 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
20279 report_fatal_error("EHGuard only live in functions using WinEH");
20281 // Cast the operand to an alloca, and remember the frame index.
20282 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
20284 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
20285 EHInfo->EHGuardFrameIndex = FINode->getIndex();
20287 // Return the chain operand without making any DAG nodes.
20291 /// Emit Truncating Store with signed or unsigned saturation.
20293 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
20294 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
20295 SelectionDAG &DAG) {
20297 SDVTList VTs = DAG.getVTList(MVT::Other);
20298 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
20299 SDValue Ops[] = { Chain, Val, Ptr, Undef };
20301 DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
20302 DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
20305 /// Emit Masked Truncating Store with signed or unsigned saturation.
20307 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
20308 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
20309 MachineMemOperand *MMO, SelectionDAG &DAG) {
20311 SDVTList VTs = DAG.getVTList(MVT::Other);
20312 SDValue Ops[] = { Chain, Ptr, Mask, Val };
20314 DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
20315 DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
20318 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
20319 SelectionDAG &DAG) {
20320 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
20322 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
20325 case llvm::Intrinsic::x86_seh_ehregnode:
20326 return MarkEHRegistrationNode(Op, DAG);
20327 case llvm::Intrinsic::x86_seh_ehguard:
20328 return MarkEHGuard(Op, DAG);
20329 case llvm::Intrinsic::x86_flags_read_u32:
20330 case llvm::Intrinsic::x86_flags_read_u64:
20331 case llvm::Intrinsic::x86_flags_write_u32:
20332 case llvm::Intrinsic::x86_flags_write_u64: {
20333 // We need a frame pointer because this will get lowered to a PUSH/POP
20335 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20336 MFI.setHasCopyImplyingStackAdjustment(true);
20337 // Don't do anything here, we will expand these intrinsics out later
20338 // during ExpandISelPseudos in EmitInstrWithCustomInserter.
20341 case Intrinsic::x86_lwpins32:
20342 case Intrinsic::x86_lwpins64: {
20344 SDValue Chain = Op->getOperand(0);
20345 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
20347 DAG.getNode(X86ISD::LWPINS, dl, VTs, Chain, Op->getOperand(2),
20348 Op->getOperand(3), Op->getOperand(4));
20349 SDValue SetCC = getSETCC(X86::COND_B, LwpIns.getValue(0), dl, DAG);
20350 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, SetCC);
20351 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
20352 LwpIns.getValue(1));
20359 switch(IntrData->Type) {
20360 default: llvm_unreachable("Unknown Intrinsic Type");
20363 // Emit the node with the right value type.
20364 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
20365 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
20367 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
20368 // Otherwise return the value from Rand, which is always 0, casted to i32.
20369 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
20370 DAG.getConstant(1, dl, Op->getValueType(1)),
20371 DAG.getConstant(X86::COND_B, dl, MVT::i32),
20372 SDValue(Result.getNode(), 1) };
20373 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
20374 DAG.getVTList(Op->getValueType(1), MVT::Glue),
20377 // Return { result, isValid, chain }.
20378 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
20379 SDValue(Result.getNode(), 2));
20381 case GATHER_AVX2: {
20382 SDValue Chain = Op.getOperand(0);
20383 SDValue Src = Op.getOperand(2);
20384 SDValue Base = Op.getOperand(3);
20385 SDValue Index = Op.getOperand(4);
20386 SDValue Mask = Op.getOperand(5);
20387 SDValue Scale = Op.getOperand(6);
20388 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
20389 Scale, Chain, Subtarget);
20392 //gather(v1, mask, index, base, scale);
20393 SDValue Chain = Op.getOperand(0);
20394 SDValue Src = Op.getOperand(2);
20395 SDValue Base = Op.getOperand(3);
20396 SDValue Index = Op.getOperand(4);
20397 SDValue Mask = Op.getOperand(5);
20398 SDValue Scale = Op.getOperand(6);
20399 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale,
20403 //scatter(base, mask, index, v1, scale);
20404 SDValue Chain = Op.getOperand(0);
20405 SDValue Base = Op.getOperand(2);
20406 SDValue Mask = Op.getOperand(3);
20407 SDValue Index = Op.getOperand(4);
20408 SDValue Src = Op.getOperand(5);
20409 SDValue Scale = Op.getOperand(6);
20410 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
20411 Scale, Chain, Subtarget);
20414 SDValue Hint = Op.getOperand(6);
20415 unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
20416 assert((HintVal == 2 || HintVal == 3) &&
20417 "Wrong prefetch hint in intrinsic: should be 2 or 3");
20418 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
20419 SDValue Chain = Op.getOperand(0);
20420 SDValue Mask = Op.getOperand(2);
20421 SDValue Index = Op.getOperand(3);
20422 SDValue Base = Op.getOperand(4);
20423 SDValue Scale = Op.getOperand(5);
20424 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
20427 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
20429 SmallVector<SDValue, 2> Results;
20430 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
20432 return DAG.getMergeValues(Results, dl);
20434 // Read Performance Monitoring Counters.
20436 SmallVector<SDValue, 2> Results;
20437 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
20438 return DAG.getMergeValues(Results, dl);
20440 // Get Extended Control Register.
20442 SmallVector<SDValue, 2> Results;
20443 getExtendedControlRegister(Op.getNode(), dl, DAG, Subtarget, Results);
20444 return DAG.getMergeValues(Results, dl);
20446 // XTEST intrinsics.
20448 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
20449 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
20451 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
20452 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
20453 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
20454 Ret, SDValue(InTrans.getNode(), 1));
20458 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
20459 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
20460 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
20461 DAG.getConstant(-1, dl, MVT::i8));
20462 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
20463 Op.getOperand(4), GenCF.getValue(1));
20464 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
20465 Op.getOperand(5), MachinePointerInfo());
20466 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
20467 SDValue Results[] = { SetCC, Store };
20468 return DAG.getMergeValues(Results, dl);
20470 case COMPRESS_TO_MEM: {
20471 SDValue Mask = Op.getOperand(4);
20472 SDValue DataToCompress = Op.getOperand(3);
20473 SDValue Addr = Op.getOperand(2);
20474 SDValue Chain = Op.getOperand(0);
20475 MVT VT = DataToCompress.getSimpleValueType();
20477 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
20478 assert(MemIntr && "Expected MemIntrinsicSDNode!");
20480 if (isAllOnesConstant(Mask)) // return just a store
20481 return DAG.getStore(Chain, dl, DataToCompress, Addr,
20482 MemIntr->getMemOperand());
20484 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
20485 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20487 return DAG.getMaskedStore(Chain, dl, DataToCompress, Addr, VMask, VT,
20488 MemIntr->getMemOperand(),
20489 false /* truncating */, true /* compressing */);
20491 case TRUNCATE_TO_MEM_VI8:
20492 case TRUNCATE_TO_MEM_VI16:
20493 case TRUNCATE_TO_MEM_VI32: {
20494 SDValue Mask = Op.getOperand(4);
20495 SDValue DataToTruncate = Op.getOperand(3);
20496 SDValue Addr = Op.getOperand(2);
20497 SDValue Chain = Op.getOperand(0);
20499 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
20500 assert(MemIntr && "Expected MemIntrinsicSDNode!");
20502 EVT MemVT = MemIntr->getMemoryVT();
20504 uint16_t TruncationOp = IntrData->Opc0;
20505 switch (TruncationOp) {
20506 case X86ISD::VTRUNC: {
20507 if (isAllOnesConstant(Mask)) // return just a truncate store
20508 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
20509 MemIntr->getMemOperand());
20511 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
20512 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20514 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
20515 MemIntr->getMemOperand(), true /* truncating */);
20517 case X86ISD::VTRUNCUS:
20518 case X86ISD::VTRUNCS: {
20519 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
20520 if (isAllOnesConstant(Mask))
20521 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
20522 MemIntr->getMemOperand(), DAG);
20524 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
20525 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20527 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
20528 VMask, MemVT, MemIntr->getMemOperand(), DAG);
20531 llvm_unreachable("Unsupported truncstore intrinsic");
20535 case EXPAND_FROM_MEM: {
20536 SDValue Mask = Op.getOperand(4);
20537 SDValue PassThru = Op.getOperand(3);
20538 SDValue Addr = Op.getOperand(2);
20539 SDValue Chain = Op.getOperand(0);
20540 MVT VT = Op.getSimpleValueType();
20542 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
20543 assert(MemIntr && "Expected MemIntrinsicSDNode!");
20545 if (isAllOnesConstant(Mask)) // Return a regular (unmasked) vector load.
20546 return DAG.getLoad(VT, dl, Chain, Addr, MemIntr->getMemOperand());
20547 if (X86::isZeroNode(Mask))
20548 return DAG.getUNDEF(VT);
20550 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
20551 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
20552 return DAG.getMaskedLoad(VT, dl, Chain, Addr, VMask, PassThru, VT,
20553 MemIntr->getMemOperand(), ISD::NON_EXTLOAD,
20554 true /* expanding */);
20559 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
20560 SelectionDAG &DAG) const {
20561 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20562 MFI.setReturnAddressIsTaken(true);
20564 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
20567 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
20569 EVT PtrVT = getPointerTy(DAG.getDataLayout());
20572 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
20573 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
20574 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
20575 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
20576 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
20577 MachinePointerInfo());
20580 // Just load the return address.
20581 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
20582 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
20583 MachinePointerInfo());
20586 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
20587 SelectionDAG &DAG) const {
20588 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
20589 return getReturnAddressFrameIndex(DAG);
20592 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
20593 MachineFunction &MF = DAG.getMachineFunction();
20594 MachineFrameInfo &MFI = MF.getFrameInfo();
20595 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
20596 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
20597 EVT VT = Op.getValueType();
20599 MFI.setFrameAddressIsTaken(true);
20601 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
20602 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
20603 // is not possible to crawl up the stack without looking at the unwind codes
20605 int FrameAddrIndex = FuncInfo->getFAIndex();
20606 if (!FrameAddrIndex) {
20607 // Set up a frame object for the return address.
20608 unsigned SlotSize = RegInfo->getSlotSize();
20609 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
20610 SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
20611 FuncInfo->setFAIndex(FrameAddrIndex);
20613 return DAG.getFrameIndex(FrameAddrIndex, VT);
20616 unsigned FrameReg =
20617 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
20618 SDLoc dl(Op); // FIXME probably not meaningful
20619 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
20620 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
20621 (FrameReg == X86::EBP && VT == MVT::i32)) &&
20622 "Invalid Frame Register!");
20623 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
20625 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
20626 MachinePointerInfo());
20630 // FIXME? Maybe this could be a TableGen attribute on some registers and
20631 // this table could be generated automatically from RegInfo.
20632 unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
20633 SelectionDAG &DAG) const {
20634 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
20635 const MachineFunction &MF = DAG.getMachineFunction();
20637 unsigned Reg = StringSwitch<unsigned>(RegName)
20638 .Case("esp", X86::ESP)
20639 .Case("rsp", X86::RSP)
20640 .Case("ebp", X86::EBP)
20641 .Case("rbp", X86::RBP)
20644 if (Reg == X86::EBP || Reg == X86::RBP) {
20645 if (!TFI.hasFP(MF))
20646 report_fatal_error("register " + StringRef(RegName) +
20647 " is allocatable: function has no frame pointer");
20650 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
20651 unsigned FrameReg =
20652 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
20653 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
20654 "Invalid Frame Register!");
20662 report_fatal_error("Invalid register name global variable");
20665 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
20666 SelectionDAG &DAG) const {
20667 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
20668 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
20671 unsigned X86TargetLowering::getExceptionPointerRegister(
20672 const Constant *PersonalityFn) const {
20673 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
20674 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
20676 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
20679 unsigned X86TargetLowering::getExceptionSelectorRegister(
20680 const Constant *PersonalityFn) const {
20681 // Funclet personalities don't use selectors (the runtime does the selection).
20682 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
20683 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
20686 bool X86TargetLowering::needsFixedCatchObjects() const {
20687 return Subtarget.isTargetWin64();
20690 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
20691 SDValue Chain = Op.getOperand(0);
20692 SDValue Offset = Op.getOperand(1);
20693 SDValue Handler = Op.getOperand(2);
20696 EVT PtrVT = getPointerTy(DAG.getDataLayout());
20697 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
20698 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
20699 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
20700 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
20701 "Invalid Frame Register!");
20702 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
20703 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
20705 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
20706 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
20708 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
20709 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
20710 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
20712 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
20713 DAG.getRegister(StoreAddrReg, PtrVT));
20716 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
20717 SelectionDAG &DAG) const {
20719 // If the subtarget is not 64bit, we may need the global base reg
20720 // after isel expand pseudo, i.e., after CGBR pass ran.
20721 // Therefore, ask for the GlobalBaseReg now, so that the pass
20722 // inserts the code for us in case we need it.
20723 // Otherwise, we will end up in a situation where we will
20724 // reference a virtual register that is not defined!
20725 if (!Subtarget.is64Bit()) {
20726 const X86InstrInfo *TII = Subtarget.getInstrInfo();
20727 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
20729 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
20730 DAG.getVTList(MVT::i32, MVT::Other),
20731 Op.getOperand(0), Op.getOperand(1));
20734 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
20735 SelectionDAG &DAG) const {
20737 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
20738 Op.getOperand(0), Op.getOperand(1));
20741 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
20742 SelectionDAG &DAG) const {
20744 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
20748 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
20749 return Op.getOperand(0);
20752 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
20753 SelectionDAG &DAG) const {
20754 SDValue Root = Op.getOperand(0);
20755 SDValue Trmp = Op.getOperand(1); // trampoline
20756 SDValue FPtr = Op.getOperand(2); // nested function
20757 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
20760 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
20761 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
20763 if (Subtarget.is64Bit()) {
20764 SDValue OutChains[6];
20766 // Large code-model.
20767 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
20768 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
20770 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
20771 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
20773 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
20775 // Load the pointer to the nested function into R11.
20776 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
20777 SDValue Addr = Trmp;
20778 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
20779 Addr, MachinePointerInfo(TrmpAddr));
20781 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
20782 DAG.getConstant(2, dl, MVT::i64));
20784 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
20785 /* Alignment = */ 2);
20787 // Load the 'nest' parameter value into R10.
20788 // R10 is specified in X86CallingConv.td
20789 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
20790 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
20791 DAG.getConstant(10, dl, MVT::i64));
20792 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
20793 Addr, MachinePointerInfo(TrmpAddr, 10));
20795 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
20796 DAG.getConstant(12, dl, MVT::i64));
20798 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
20799 /* Alignment = */ 2);
20801 // Jump to the nested function.
20802 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
20803 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
20804 DAG.getConstant(20, dl, MVT::i64));
20805 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
20806 Addr, MachinePointerInfo(TrmpAddr, 20));
20808 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
20809 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
20810 DAG.getConstant(22, dl, MVT::i64));
20811 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
20812 Addr, MachinePointerInfo(TrmpAddr, 22));
20814 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
20816 const Function *Func =
20817 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
20818 CallingConv::ID CC = Func->getCallingConv();
20823 llvm_unreachable("Unsupported calling convention");
20824 case CallingConv::C:
20825 case CallingConv::X86_StdCall: {
20826 // Pass 'nest' parameter in ECX.
20827 // Must be kept in sync with X86CallingConv.td
20828 NestReg = X86::ECX;
20830 // Check that ECX wasn't needed by an 'inreg' parameter.
20831 FunctionType *FTy = Func->getFunctionType();
20832 const AttributeList &Attrs = Func->getAttributes();
20834 if (!Attrs.isEmpty() && !Func->isVarArg()) {
20835 unsigned InRegCount = 0;
20838 for (FunctionType::param_iterator I = FTy->param_begin(),
20839 E = FTy->param_end(); I != E; ++I, ++Idx)
20840 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
20841 auto &DL = DAG.getDataLayout();
20842 // FIXME: should only count parameters that are lowered to integers.
20843 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
20846 if (InRegCount > 2) {
20847 report_fatal_error("Nest register in use - reduce number of inreg"
20853 case CallingConv::X86_FastCall:
20854 case CallingConv::X86_ThisCall:
20855 case CallingConv::Fast:
20856 // Pass 'nest' parameter in EAX.
20857 // Must be kept in sync with X86CallingConv.td
20858 NestReg = X86::EAX;
20862 SDValue OutChains[4];
20863 SDValue Addr, Disp;
20865 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
20866 DAG.getConstant(10, dl, MVT::i32));
20867 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
20869 // This is storing the opcode for MOV32ri.
20870 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
20871 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
20873 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
20874 Trmp, MachinePointerInfo(TrmpAddr));
20876 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
20877 DAG.getConstant(1, dl, MVT::i32));
20879 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
20880 /* Alignment = */ 1);
20882 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
20883 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
20884 DAG.getConstant(5, dl, MVT::i32));
20885 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
20886 Addr, MachinePointerInfo(TrmpAddr, 5),
20887 /* Alignment = */ 1);
20889 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
20890 DAG.getConstant(6, dl, MVT::i32));
20892 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
20893 /* Alignment = */ 1);
20895 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
20899 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
20900 SelectionDAG &DAG) const {
20902 The rounding mode is in bits 11:10 of FPSR, and has the following
20904 00 Round to nearest
20909 FLT_ROUNDS, on the other hand, expects the following:
20916 To perform the conversion, we do:
20917 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
20920 MachineFunction &MF = DAG.getMachineFunction();
20921 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
20922 unsigned StackAlignment = TFI.getStackAlignment();
20923 MVT VT = Op.getSimpleValueType();
20926 // Save FP Control Word to stack slot
20927 int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
20928 SDValue StackSlot =
20929 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
20931 MachineMemOperand *MMO =
20932 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
20933 MachineMemOperand::MOStore, 2, 2);
20935 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
20936 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
20937 DAG.getVTList(MVT::Other),
20938 Ops, MVT::i16, MMO);
20940 // Load FP Control Word from stack slot
20942 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
20944 // Transform as necessary
20946 DAG.getNode(ISD::SRL, DL, MVT::i16,
20947 DAG.getNode(ISD::AND, DL, MVT::i16,
20948 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
20949 DAG.getConstant(11, DL, MVT::i8));
20951 DAG.getNode(ISD::SRL, DL, MVT::i16,
20952 DAG.getNode(ISD::AND, DL, MVT::i16,
20953 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
20954 DAG.getConstant(9, DL, MVT::i8));
20957 DAG.getNode(ISD::AND, DL, MVT::i16,
20958 DAG.getNode(ISD::ADD, DL, MVT::i16,
20959 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
20960 DAG.getConstant(1, DL, MVT::i16)),
20961 DAG.getConstant(3, DL, MVT::i16));
20963 return DAG.getNode((VT.getSizeInBits() < 16 ?
20964 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
20967 // Split an unary integer op into 2 half sized ops.
20968 static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
20969 MVT VT = Op.getSimpleValueType();
20970 unsigned NumElems = VT.getVectorNumElements();
20971 unsigned SizeInBits = VT.getSizeInBits();
20973 // Extract the Lo/Hi vectors
20975 SDValue Src = Op.getOperand(0);
20976 SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
20977 SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
20979 MVT EltVT = VT.getVectorElementType();
20980 MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
20981 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
20982 DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
20983 DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
20986 // Decompose 256-bit ops into smaller 128-bit ops.
20987 static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
20988 assert(Op.getSimpleValueType().is256BitVector() &&
20989 Op.getSimpleValueType().isInteger() &&
20990 "Only handle AVX 256-bit vector integer operation");
20991 return LowerVectorIntUnary(Op, DAG);
20994 // Decompose 512-bit ops into smaller 256-bit ops.
20995 static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
20996 assert(Op.getSimpleValueType().is512BitVector() &&
20997 Op.getSimpleValueType().isInteger() &&
20998 "Only handle AVX 512-bit vector integer operation");
20999 return LowerVectorIntUnary(Op, DAG);
21002 /// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
21004 // i8/i16 vector implemented using dword LZCNT vector instruction
21005 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
21006 // split the vector, perform operation on it's Lo a Hi part and
21007 // concatenate the results.
21008 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG) {
21009 assert(Op.getOpcode() == ISD::CTLZ);
21011 MVT VT = Op.getSimpleValueType();
21012 MVT EltVT = VT.getVectorElementType();
21013 unsigned NumElems = VT.getVectorNumElements();
21015 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
21016 "Unsupported element type");
21018 // Split vector, it's Lo and Hi parts will be handled in next iteration.
21020 return LowerVectorIntUnary(Op, DAG);
21022 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
21023 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
21024 "Unsupported value type for operation");
21026 // Use native supported vector instruction vplzcntd.
21027 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
21028 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
21029 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
21030 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
21032 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
21035 // Lower CTLZ using a PSHUFB lookup table implementation.
21036 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
21037 const X86Subtarget &Subtarget,
21038 SelectionDAG &DAG) {
21039 MVT VT = Op.getSimpleValueType();
21040 int NumElts = VT.getVectorNumElements();
21041 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
21042 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
21044 // Per-nibble leading zero PSHUFB lookup table.
21045 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
21046 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
21047 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
21048 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
21050 SmallVector<SDValue, 64> LUTVec;
21051 for (int i = 0; i < NumBytes; ++i)
21052 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
21053 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
21055 // Begin by bitcasting the input to byte vector, then split those bytes
21056 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
21057 // If the hi input nibble is zero then we add both results together, otherwise
21058 // we just take the hi result (by masking the lo result to zero before the
21060 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
21061 SDValue Zero = getZeroVector(CurrVT, Subtarget, DAG, DL);
21063 SDValue NibbleMask = DAG.getConstant(0xF, DL, CurrVT);
21064 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
21065 SDValue Lo = DAG.getNode(ISD::AND, DL, CurrVT, Op0, NibbleMask);
21066 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
21067 SDValue HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
21069 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
21070 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
21071 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
21072 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
21074 // Merge result back from vXi8 back to VT, working on the lo/hi halves
21075 // of the current vector width in the same way we did for the nibbles.
21076 // If the upper half of the input element is zero then add the halves'
21077 // leading zero counts together, otherwise just use the upper half's.
21078 // Double the width of the result until we are at target width.
21079 while (CurrVT != VT) {
21080 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
21081 int CurrNumElts = CurrVT.getVectorNumElements();
21082 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
21083 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
21084 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
21086 // Check if the upper half of the input element is zero.
21087 SDValue HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
21088 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
21089 HiZ = DAG.getBitcast(NextVT, HiZ);
21091 // Move the upper/lower halves to the lower bits as we'll be extending to
21092 // NextVT. Mask the lower result to zero if HiZ is true and add the results
21094 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
21095 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
21096 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
21097 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
21098 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
21105 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
21106 const X86Subtarget &Subtarget,
21107 SelectionDAG &DAG) {
21108 MVT VT = Op.getSimpleValueType();
21110 if (Subtarget.hasCDI())
21111 return LowerVectorCTLZ_AVX512CDI(Op, DAG);
21113 // Decompose 256-bit ops into smaller 128-bit ops.
21114 if (VT.is256BitVector() && !Subtarget.hasInt256())
21115 return Lower256IntUnary(Op, DAG);
21117 // Decompose 512-bit ops into smaller 256-bit ops.
21118 if (VT.is512BitVector() && !Subtarget.hasBWI())
21119 return Lower512IntUnary(Op, DAG);
21121 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
21122 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
21125 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
21126 SelectionDAG &DAG) {
21127 MVT VT = Op.getSimpleValueType();
21129 unsigned NumBits = VT.getSizeInBits();
21131 unsigned Opc = Op.getOpcode();
21134 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
21136 Op = Op.getOperand(0);
21137 if (VT == MVT::i8) {
21138 // Zero extend to i32 since there is not an i8 bsr.
21140 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
21143 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
21144 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
21145 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
21147 if (Opc == ISD::CTLZ) {
21148 // If src is zero (i.e. bsr sets ZF), returns NumBits.
21151 DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
21152 DAG.getConstant(X86::COND_E, dl, MVT::i8),
21155 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
21158 // Finally xor with NumBits-1.
21159 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
21160 DAG.getConstant(NumBits - 1, dl, OpVT));
21163 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
21167 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
21168 MVT VT = Op.getSimpleValueType();
21169 unsigned NumBits = VT.getScalarSizeInBits();
21172 if (VT.isVector()) {
21173 SDValue N0 = Op.getOperand(0);
21174 SDValue Zero = DAG.getConstant(0, dl, VT);
21176 // lsb(x) = (x & -x)
21177 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, N0,
21178 DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
21180 // cttz_undef(x) = (width - 1) - ctlz(lsb)
21181 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
21182 SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
21183 return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
21184 DAG.getNode(ISD::CTLZ, dl, VT, LSB));
21187 // cttz(x) = ctpop(lsb - 1)
21188 SDValue One = DAG.getConstant(1, dl, VT);
21189 return DAG.getNode(ISD::CTPOP, dl, VT,
21190 DAG.getNode(ISD::SUB, dl, VT, LSB, One));
21193 assert(Op.getOpcode() == ISD::CTTZ &&
21194 "Only scalar CTTZ requires custom lowering");
21196 // Issue a bsf (scan bits forward) which also sets EFLAGS.
21197 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
21198 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op.getOperand(0));
21200 // If src is zero (i.e. bsf sets ZF), returns NumBits.
21203 DAG.getConstant(NumBits, dl, VT),
21204 DAG.getConstant(X86::COND_E, dl, MVT::i8),
21207 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
21210 /// Break a 256-bit integer operation into two new 128-bit ones and then
21211 /// concatenate the result back.
21212 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
21213 MVT VT = Op.getSimpleValueType();
21215 assert(VT.is256BitVector() && VT.isInteger() &&
21216 "Unsupported value type for operation");
21218 unsigned NumElems = VT.getVectorNumElements();
21221 // Extract the LHS vectors
21222 SDValue LHS = Op.getOperand(0);
21223 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
21224 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
21226 // Extract the RHS vectors
21227 SDValue RHS = Op.getOperand(1);
21228 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
21229 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
21231 MVT EltVT = VT.getVectorElementType();
21232 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
21234 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
21235 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
21236 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
21239 /// Break a 512-bit integer operation into two new 256-bit ones and then
21240 /// concatenate the result back.
21241 static SDValue Lower512IntArith(SDValue Op, SelectionDAG &DAG) {
21242 MVT VT = Op.getSimpleValueType();
21244 assert(VT.is512BitVector() && VT.isInteger() &&
21245 "Unsupported value type for operation");
21247 unsigned NumElems = VT.getVectorNumElements();
21250 // Extract the LHS vectors
21251 SDValue LHS = Op.getOperand(0);
21252 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
21253 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
21255 // Extract the RHS vectors
21256 SDValue RHS = Op.getOperand(1);
21257 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
21258 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
21260 MVT EltVT = VT.getVectorElementType();
21261 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
21263 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
21264 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
21265 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
21268 static SDValue LowerADD_SUB(SDValue Op, SelectionDAG &DAG) {
21269 MVT VT = Op.getSimpleValueType();
21270 if (VT.getScalarType() == MVT::i1)
21271 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
21272 Op.getOperand(0), Op.getOperand(1));
21273 assert(Op.getSimpleValueType().is256BitVector() &&
21274 Op.getSimpleValueType().isInteger() &&
21275 "Only handle AVX 256-bit vector integer operation");
21276 return Lower256IntArith(Op, DAG);
21279 static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
21280 assert(Op.getSimpleValueType().is256BitVector() &&
21281 Op.getSimpleValueType().isInteger() &&
21282 "Only handle AVX 256-bit vector integer operation");
21283 return Lower256IntUnary(Op, DAG);
21286 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
21287 assert(Op.getSimpleValueType().is256BitVector() &&
21288 Op.getSimpleValueType().isInteger() &&
21289 "Only handle AVX 256-bit vector integer operation");
21290 return Lower256IntArith(Op, DAG);
21293 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
21294 SelectionDAG &DAG) {
21296 MVT VT = Op.getSimpleValueType();
21298 if (VT.getScalarType() == MVT::i1)
21299 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
21301 // Decompose 256-bit ops into smaller 128-bit ops.
21302 if (VT.is256BitVector() && !Subtarget.hasInt256())
21303 return Lower256IntArith(Op, DAG);
21305 SDValue A = Op.getOperand(0);
21306 SDValue B = Op.getOperand(1);
21308 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
21309 // vector pairs, multiply and truncate.
21310 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
21311 if (Subtarget.hasInt256()) {
21312 // For 512-bit vectors, split into 256-bit vectors to allow the
21313 // sign-extension to occur.
21314 if (VT == MVT::v64i8)
21315 return Lower512IntArith(Op, DAG);
21317 // For 256-bit vectors, split into 128-bit vectors to allow the
21318 // sign-extension to occur. We don't need this on AVX512BW as we can
21319 // safely sign-extend to v32i16.
21320 if (VT == MVT::v32i8 && !Subtarget.hasBWI())
21321 return Lower256IntArith(Op, DAG);
21323 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
21324 return DAG.getNode(
21325 ISD::TRUNCATE, dl, VT,
21326 DAG.getNode(ISD::MUL, dl, ExVT,
21327 DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, A),
21328 DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, B)));
21331 assert(VT == MVT::v16i8 &&
21332 "Pre-AVX2 support only supports v16i8 multiplication");
21333 MVT ExVT = MVT::v8i16;
21335 // Extract the lo parts and sign extend to i16
21337 if (Subtarget.hasSSE41()) {
21338 ALo = DAG.getSignExtendVectorInReg(A, dl, ExVT);
21339 BLo = DAG.getSignExtendVectorInReg(B, dl, ExVT);
21341 const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
21342 -1, 4, -1, 5, -1, 6, -1, 7};
21343 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
21344 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
21345 ALo = DAG.getBitcast(ExVT, ALo);
21346 BLo = DAG.getBitcast(ExVT, BLo);
21347 ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
21348 BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
21351 // Extract the hi parts and sign extend to i16
21353 if (Subtarget.hasSSE41()) {
21354 const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
21355 -1, -1, -1, -1, -1, -1, -1, -1};
21356 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
21357 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
21358 AHi = DAG.getSignExtendVectorInReg(AHi, dl, ExVT);
21359 BHi = DAG.getSignExtendVectorInReg(BHi, dl, ExVT);
21361 const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
21362 -1, 12, -1, 13, -1, 14, -1, 15};
21363 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
21364 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
21365 AHi = DAG.getBitcast(ExVT, AHi);
21366 BHi = DAG.getBitcast(ExVT, BHi);
21367 AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
21368 BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
21371 // Multiply, mask the lower 8bits of the lo/hi results and pack
21372 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
21373 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
21374 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
21375 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
21376 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
21379 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
21380 if (VT == MVT::v4i32) {
21381 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
21382 "Should not custom lower when pmuldq is available!");
21384 // Extract the odd parts.
21385 static const int UnpackMask[] = { 1, -1, 3, -1 };
21386 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
21387 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
21389 // Multiply the even parts.
21390 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
21391 // Now multiply odd parts.
21392 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
21394 Evens = DAG.getBitcast(VT, Evens);
21395 Odds = DAG.getBitcast(VT, Odds);
21397 // Merge the two vectors back together with a shuffle. This expands into 2
21399 static const int ShufMask[] = { 0, 4, 2, 6 };
21400 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
21403 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
21404 "Only know how to lower V2I64/V4I64/V8I64 multiply");
21406 // 32-bit vector types used for MULDQ/MULUDQ.
21407 MVT MulVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
21409 // MULDQ returns the 64-bit result of the signed multiplication of the lower
21410 // 32-bits. We can lower with this if the sign bits stretch that far.
21411 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(A) > 32 &&
21412 DAG.ComputeNumSignBits(B) > 32) {
21413 return DAG.getNode(X86ISD::PMULDQ, dl, VT, DAG.getBitcast(MulVT, A),
21414 DAG.getBitcast(MulVT, B));
21417 // Ahi = psrlqi(a, 32);
21418 // Bhi = psrlqi(b, 32);
21420 // AloBlo = pmuludq(a, b);
21421 // AloBhi = pmuludq(a, Bhi);
21422 // AhiBlo = pmuludq(Ahi, b);
21424 // Hi = psllqi(AloBhi + AhiBlo, 32);
21425 // return AloBlo + Hi;
21426 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
21427 bool ALoIsZero = DAG.MaskedValueIsZero(A, LowerBitsMask);
21428 bool BLoIsZero = DAG.MaskedValueIsZero(B, LowerBitsMask);
21430 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
21431 bool AHiIsZero = DAG.MaskedValueIsZero(A, UpperBitsMask);
21432 bool BHiIsZero = DAG.MaskedValueIsZero(B, UpperBitsMask);
21434 // Bit cast to 32-bit vectors for MULUDQ.
21435 SDValue Alo = DAG.getBitcast(MulVT, A);
21436 SDValue Blo = DAG.getBitcast(MulVT, B);
21438 SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl);
21440 // Only multiply lo/hi halves that aren't known to be zero.
21441 SDValue AloBlo = Zero;
21442 if (!ALoIsZero && !BLoIsZero)
21443 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Alo, Blo);
21445 SDValue AloBhi = Zero;
21446 if (!ALoIsZero && !BHiIsZero) {
21447 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
21448 Bhi = DAG.getBitcast(MulVT, Bhi);
21449 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Alo, Bhi);
21452 SDValue AhiBlo = Zero;
21453 if (!AHiIsZero && !BLoIsZero) {
21454 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
21455 Ahi = DAG.getBitcast(MulVT, Ahi);
21456 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, Blo);
21459 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
21460 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
21462 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
21465 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
21466 SelectionDAG &DAG) {
21468 MVT VT = Op.getSimpleValueType();
21470 // Decompose 256-bit ops into smaller 128-bit ops.
21471 if (VT.is256BitVector() && !Subtarget.hasInt256())
21472 return Lower256IntArith(Op, DAG);
21474 // Only i8 vectors should need custom lowering after this.
21475 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
21476 "Unsupported vector type");
21478 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
21479 // logical shift down the upper half and pack back to i8.
21480 SDValue A = Op.getOperand(0);
21481 SDValue B = Op.getOperand(1);
21483 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
21484 // and then ashr/lshr the upper bits down to the lower bits before multiply.
21485 unsigned Opcode = Op.getOpcode();
21486 unsigned ExShift = (ISD::MULHU == Opcode ? ISD::SRL : ISD::SRA);
21487 unsigned ExSSE41 = (ISD::MULHU == Opcode ? X86ISD::VZEXT : X86ISD::VSEXT);
21489 // AVX2 implementations - extend xmm subvectors to ymm.
21490 if (Subtarget.hasInt256()) {
21491 SDValue Lo = DAG.getIntPtrConstant(0, dl);
21492 SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
21494 if (VT == MVT::v32i8) {
21495 SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo);
21496 SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo);
21497 SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi);
21498 SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Hi);
21499 ALo = DAG.getNode(ExSSE41, dl, MVT::v16i16, ALo);
21500 BLo = DAG.getNode(ExSSE41, dl, MVT::v16i16, BLo);
21501 AHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, AHi);
21502 BHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, BHi);
21503 Lo = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
21504 DAG.getNode(ISD::MUL, dl, MVT::v16i16, ALo, BLo),
21505 DAG.getConstant(8, dl, MVT::v16i16));
21506 Hi = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
21507 DAG.getNode(ISD::MUL, dl, MVT::v16i16, AHi, BHi),
21508 DAG.getConstant(8, dl, MVT::v16i16));
21509 // The ymm variant of PACKUS treats the 128-bit lanes separately, so before
21510 // using PACKUS we need to permute the inputs to the correct lo/hi xmm lane.
21511 const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7,
21512 16, 17, 18, 19, 20, 21, 22, 23};
21513 const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
21514 24, 25, 26, 27, 28, 29, 30, 31};
21515 return DAG.getNode(X86ISD::PACKUS, dl, VT,
21516 DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, LoMask),
21517 DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, HiMask));
21520 SDValue ExA = getExtendInVec(ExSSE41, dl, MVT::v16i16, A, DAG);
21521 SDValue ExB = getExtendInVec(ExSSE41, dl, MVT::v16i16, B, DAG);
21522 SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB);
21523 SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
21524 DAG.getConstant(8, dl, MVT::v16i16));
21525 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo);
21526 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi);
21527 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
21530 assert(VT == MVT::v16i8 &&
21531 "Pre-AVX2 support only supports v16i8 multiplication");
21532 MVT ExVT = MVT::v8i16;
21534 // Extract the lo parts and zero/sign extend to i16.
21536 if (Subtarget.hasSSE41()) {
21537 ALo = getExtendInVec(ExSSE41, dl, ExVT, A, DAG);
21538 BLo = getExtendInVec(ExSSE41, dl, ExVT, B, DAG);
21540 const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
21541 -1, 4, -1, 5, -1, 6, -1, 7};
21542 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
21543 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
21544 ALo = DAG.getBitcast(ExVT, ALo);
21545 BLo = DAG.getBitcast(ExVT, BLo);
21546 ALo = DAG.getNode(ExShift, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
21547 BLo = DAG.getNode(ExShift, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
21550 // Extract the hi parts and zero/sign extend to i16.
21552 if (Subtarget.hasSSE41()) {
21553 const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
21554 -1, -1, -1, -1, -1, -1, -1, -1};
21555 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
21556 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
21557 AHi = getExtendInVec(ExSSE41, dl, ExVT, AHi, DAG);
21558 BHi = getExtendInVec(ExSSE41, dl, ExVT, BHi, DAG);
21560 const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
21561 -1, 12, -1, 13, -1, 14, -1, 15};
21562 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
21563 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
21564 AHi = DAG.getBitcast(ExVT, AHi);
21565 BHi = DAG.getBitcast(ExVT, BHi);
21566 AHi = DAG.getNode(ExShift, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
21567 BHi = DAG.getNode(ExShift, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
21570 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
21571 // pack back to v16i8.
21572 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
21573 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
21574 RLo = DAG.getNode(ISD::SRL, dl, ExVT, RLo, DAG.getConstant(8, dl, ExVT));
21575 RHi = DAG.getNode(ISD::SRL, dl, ExVT, RHi, DAG.getConstant(8, dl, ExVT));
21576 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
21579 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
21580 assert(Subtarget.isTargetWin64() && "Unexpected target");
21581 EVT VT = Op.getValueType();
21582 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
21583 "Unexpected return type for lowering");
21587 switch (Op->getOpcode()) {
21588 default: llvm_unreachable("Unexpected request for libcall!");
21589 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
21590 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
21591 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
21592 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
21593 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
21594 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
21598 SDValue InChain = DAG.getEntryNode();
21600 TargetLowering::ArgListTy Args;
21601 TargetLowering::ArgListEntry Entry;
21602 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
21603 EVT ArgVT = Op->getOperand(i).getValueType();
21604 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
21605 "Unexpected argument type for lowering");
21606 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
21607 Entry.Node = StackPtr;
21608 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
21609 MachinePointerInfo(), /* Alignment = */ 16);
21610 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
21611 Entry.Ty = PointerType::get(ArgTy,0);
21612 Entry.IsSExt = false;
21613 Entry.IsZExt = false;
21614 Args.push_back(Entry);
21617 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
21618 getPointerTy(DAG.getDataLayout()));
21620 TargetLowering::CallLoweringInfo CLI(DAG);
21621 CLI.setDebugLoc(dl)
21624 getLibcallCallingConv(LC),
21625 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
21628 .setSExtResult(isSigned)
21629 .setZExtResult(!isSigned);
21631 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
21632 return DAG.getBitcast(VT, CallInfo.first);
21635 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget &Subtarget,
21636 SelectionDAG &DAG) {
21637 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
21638 MVT VT = Op0.getSimpleValueType();
21641 // Decompose 256-bit ops into smaller 128-bit ops.
21642 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
21643 unsigned Opcode = Op.getOpcode();
21644 unsigned NumElems = VT.getVectorNumElements();
21645 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
21646 SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
21647 SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
21648 SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
21649 SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
21650 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
21651 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
21653 DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
21654 DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
21656 return DAG.getMergeValues(Ops, dl);
21659 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
21660 (VT == MVT::v8i32 && Subtarget.hasInt256()));
21662 // PMULxD operations multiply each even value (starting at 0) of LHS with
21663 // the related value of RHS and produce a widen result.
21664 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
21665 // => <2 x i64> <ae|cg>
21667 // In other word, to have all the results, we need to perform two PMULxD:
21668 // 1. one with the even values.
21669 // 2. one with the odd values.
21670 // To achieve #2, with need to place the odd values at an even position.
21672 // Place the odd value at an even position (basically, shift all values 1
21673 // step to the left):
21674 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
21675 // <a|b|c|d> => <b|undef|d|undef>
21676 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0,
21677 makeArrayRef(&Mask[0], VT.getVectorNumElements()));
21678 // <e|f|g|h> => <f|undef|h|undef>
21679 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1,
21680 makeArrayRef(&Mask[0], VT.getVectorNumElements()));
21682 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
21684 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
21685 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
21687 (!IsSigned || !Subtarget.hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
21688 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
21689 // => <2 x i64> <ae|cg>
21690 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
21691 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
21692 // => <2 x i64> <bf|dh>
21693 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
21695 // Shuffle it back into the right order.
21696 SDValue Highs, Lows;
21697 if (VT == MVT::v8i32) {
21698 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
21699 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
21700 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
21701 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
21703 const int HighMask[] = {1, 5, 3, 7};
21704 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
21705 const int LowMask[] = {0, 4, 2, 6};
21706 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
21709 // If we have a signed multiply but no PMULDQ fix up the high parts of a
21710 // unsigned multiply.
21711 if (IsSigned && !Subtarget.hasSSE41()) {
21712 SDValue ShAmt = DAG.getConstant(
21714 DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout()));
21715 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
21716 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
21717 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
21718 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
21720 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
21721 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
21724 // The first result of MUL_LOHI is actually the low value, followed by the
21726 SDValue Ops[] = {Lows, Highs};
21727 return DAG.getMergeValues(Ops, dl);
21730 // Return true if the required (according to Opcode) shift-imm form is natively
21731 // supported by the Subtarget
21732 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
21734 if (VT.getScalarSizeInBits() < 16)
21737 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
21738 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
21741 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
21742 (VT.is256BitVector() && Subtarget.hasInt256());
21744 bool AShift = LShift && (Subtarget.hasAVX512() ||
21745 (VT != MVT::v2i64 && VT != MVT::v4i64));
21746 return (Opcode == ISD::SRA) ? AShift : LShift;
21749 // The shift amount is a variable, but it is the same for all vector lanes.
21750 // These instructions are defined together with shift-immediate.
21752 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
21754 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
21757 // Return true if the required (according to Opcode) variable-shift form is
21758 // natively supported by the Subtarget
21759 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
21762 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
21765 // vXi16 supported only on AVX-512, BWI
21766 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
21769 if (Subtarget.hasAVX512())
21772 bool LShift = VT.is128BitVector() || VT.is256BitVector();
21773 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
21774 return (Opcode == ISD::SRA) ? AShift : LShift;
21777 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
21778 const X86Subtarget &Subtarget) {
21779 MVT VT = Op.getSimpleValueType();
21781 SDValue R = Op.getOperand(0);
21782 SDValue Amt = Op.getOperand(1);
21784 unsigned X86Opc = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
21785 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
21787 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
21788 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
21789 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
21790 SDValue Ex = DAG.getBitcast(ExVT, R);
21792 // ashr(R, 63) === cmp_slt(R, 0)
21793 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
21794 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
21795 "Unsupported PCMPGT op");
21796 return DAG.getNode(X86ISD::PCMPGT, dl, VT,
21797 getZeroVector(VT, Subtarget, DAG, dl), R);
21800 if (ShiftAmt >= 32) {
21801 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
21803 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
21804 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
21805 ShiftAmt - 32, DAG);
21806 if (VT == MVT::v2i64)
21807 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
21808 if (VT == MVT::v4i64)
21809 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
21810 {9, 1, 11, 3, 13, 5, 15, 7});
21812 // SRA upper i32, SHL whole i64 and select lower i32.
21813 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
21816 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
21817 Lower = DAG.getBitcast(ExVT, Lower);
21818 if (VT == MVT::v2i64)
21819 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
21820 if (VT == MVT::v4i64)
21821 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
21822 {8, 1, 10, 3, 12, 5, 14, 7});
21824 return DAG.getBitcast(VT, Ex);
21827 // Optimize shl/srl/sra with constant shift amount.
21828 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
21829 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
21830 uint64_t ShiftAmt = ShiftConst->getZExtValue();
21832 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
21833 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
21835 // i64 SRA needs to be performed as partial shifts.
21836 if ((VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
21837 Op.getOpcode() == ISD::SRA && !Subtarget.hasXOP())
21838 return ArithmeticShiftRight64(ShiftAmt);
21840 if (VT == MVT::v16i8 ||
21841 (Subtarget.hasInt256() && VT == MVT::v32i8) ||
21842 VT == MVT::v64i8) {
21843 unsigned NumElts = VT.getVectorNumElements();
21844 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
21846 // Simple i8 add case
21847 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
21848 return DAG.getNode(ISD::ADD, dl, VT, R, R);
21850 // ashr(R, 7) === cmp_slt(R, 0)
21851 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
21852 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
21853 if (VT.is512BitVector()) {
21854 assert(VT == MVT::v64i8 && "Unexpected element type!");
21855 SDValue CMP = DAG.getNode(X86ISD::PCMPGTM, dl, MVT::v64i1, Zeros, R);
21856 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
21858 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
21861 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
21862 if (VT == MVT::v16i8 && Subtarget.hasXOP())
21865 if (Op.getOpcode() == ISD::SHL) {
21866 // Make a large shift.
21867 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT,
21869 SHL = DAG.getBitcast(VT, SHL);
21870 // Zero out the rightmost bits.
21871 return DAG.getNode(ISD::AND, dl, VT, SHL,
21872 DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
21874 if (Op.getOpcode() == ISD::SRL) {
21875 // Make a large shift.
21876 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT,
21878 SRL = DAG.getBitcast(VT, SRL);
21879 // Zero out the leftmost bits.
21880 return DAG.getNode(ISD::AND, dl, VT, SRL,
21881 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
21883 if (Op.getOpcode() == ISD::SRA) {
21884 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
21885 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
21887 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
21888 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
21889 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
21892 llvm_unreachable("Unknown shift opcode.");
21897 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
21898 // TODO: Replace constant extraction with getTargetConstantBitsFromNode.
21899 if (!Subtarget.is64Bit() && !Subtarget.hasXOP() &&
21900 (VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64) ||
21901 (Subtarget.hasAVX512() && VT == MVT::v8i64))) {
21903 // AVX1 targets maybe extracting a 128-bit vector from a 256-bit constant.
21904 unsigned SubVectorScale = 1;
21905 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
21907 Amt.getOperand(0).getValueSizeInBits() / Amt.getValueSizeInBits();
21908 Amt = Amt.getOperand(0);
21911 // Peek through any splat that was introduced for i64 shift vectorization.
21912 int SplatIndex = -1;
21913 if (ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt.getNode()))
21914 if (SVN->isSplat()) {
21915 SplatIndex = SVN->getSplatIndex();
21916 Amt = Amt.getOperand(0);
21917 assert(SplatIndex < (int)VT.getVectorNumElements() &&
21918 "Splat shuffle referencing second operand");
21921 if (Amt.getOpcode() != ISD::BITCAST ||
21922 Amt.getOperand(0).getOpcode() != ISD::BUILD_VECTOR)
21925 Amt = Amt.getOperand(0);
21926 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
21927 (SubVectorScale * VT.getVectorNumElements());
21928 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
21929 uint64_t ShiftAmt = 0;
21930 unsigned BaseOp = (SplatIndex < 0 ? 0 : SplatIndex * Ratio);
21931 for (unsigned i = 0; i != Ratio; ++i) {
21932 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + BaseOp));
21936 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
21939 // Check remaining shift amounts (if not a splat).
21940 if (SplatIndex < 0) {
21941 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
21942 uint64_t ShAmt = 0;
21943 for (unsigned j = 0; j != Ratio; ++j) {
21944 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
21948 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
21950 if (ShAmt != ShiftAmt)
21955 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
21956 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
21958 if (Op.getOpcode() == ISD::SRA)
21959 return ArithmeticShiftRight64(ShiftAmt);
21965 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
21966 const X86Subtarget &Subtarget) {
21967 MVT VT = Op.getSimpleValueType();
21969 SDValue R = Op.getOperand(0);
21970 SDValue Amt = Op.getOperand(1);
21972 unsigned X86OpcI = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
21973 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
21975 unsigned X86OpcV = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHL :
21976 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRL : X86ISD::VSRA;
21978 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) {
21980 MVT EltVT = VT.getVectorElementType();
21982 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
21983 // Check if this build_vector node is doing a splat.
21984 // If so, then set BaseShAmt equal to the splat value.
21985 BaseShAmt = BV->getSplatValue();
21986 if (BaseShAmt && BaseShAmt.isUndef())
21987 BaseShAmt = SDValue();
21989 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
21990 Amt = Amt.getOperand(0);
21992 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
21993 if (SVN && SVN->isSplat()) {
21994 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
21995 SDValue InVec = Amt.getOperand(0);
21996 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
21997 assert((SplatIdx < InVec.getSimpleValueType().getVectorNumElements()) &&
21998 "Unexpected shuffle index found!");
21999 BaseShAmt = InVec.getOperand(SplatIdx);
22000 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
22001 if (ConstantSDNode *C =
22002 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
22003 if (C->getZExtValue() == SplatIdx)
22004 BaseShAmt = InVec.getOperand(1);
22009 // Avoid introducing an extract element from a shuffle.
22010 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
22011 DAG.getIntPtrConstant(SplatIdx, dl));
22015 if (BaseShAmt.getNode()) {
22016 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
22017 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
22018 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
22019 else if (EltVT.bitsLT(MVT::i32))
22020 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
22022 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
22026 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
22027 if (!Subtarget.is64Bit() && VT == MVT::v2i64 &&
22028 Amt.getOpcode() == ISD::BITCAST &&
22029 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
22030 Amt = Amt.getOperand(0);
22031 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
22032 VT.getVectorNumElements();
22033 std::vector<SDValue> Vals(Ratio);
22034 for (unsigned i = 0; i != Ratio; ++i)
22035 Vals[i] = Amt.getOperand(i);
22036 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
22037 for (unsigned j = 0; j != Ratio; ++j)
22038 if (Vals[j] != Amt.getOperand(i + j))
22042 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
22043 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
22048 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
22049 SelectionDAG &DAG) {
22050 MVT VT = Op.getSimpleValueType();
22052 SDValue R = Op.getOperand(0);
22053 SDValue Amt = Op.getOperand(1);
22054 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
22056 assert(VT.isVector() && "Custom lowering only for vector shifts!");
22057 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
22059 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
22062 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
22065 if (SupportedVectorVarShift(VT, Subtarget, Op.getOpcode()))
22068 // XOP has 128-bit variable logical/arithmetic shifts.
22069 // +ve/-ve Amt = shift left/right.
22070 if (Subtarget.hasXOP() &&
22071 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
22072 VT == MVT::v8i16 || VT == MVT::v16i8)) {
22073 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
22074 SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl);
22075 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
22077 if (Op.getOpcode() == ISD::SHL || Op.getOpcode() == ISD::SRL)
22078 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
22079 if (Op.getOpcode() == ISD::SRA)
22080 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
22083 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
22084 // shifts per-lane and then shuffle the partial results back together.
22085 if (VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) {
22086 // Splat the shift amounts so the scalar shifts above will catch it.
22087 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
22088 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
22089 SDValue R0 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt0);
22090 SDValue R1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt1);
22091 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
22094 // i64 vector arithmetic shift can be emulated with the transform:
22095 // M = lshr(SIGN_MASK, Amt)
22096 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
22097 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
22098 Op.getOpcode() == ISD::SRA) {
22099 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
22100 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
22101 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
22102 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
22103 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
22107 // If possible, lower this packed shift into a vector multiply instead of
22108 // expanding it into a sequence of scalar shifts.
22109 // Do this only if the vector shift count is a constant build_vector.
22110 if (ConstantAmt && Op.getOpcode() == ISD::SHL &&
22111 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
22112 (Subtarget.hasInt256() && VT == MVT::v16i16))) {
22113 SmallVector<SDValue, 8> Elts;
22114 MVT SVT = VT.getVectorElementType();
22115 unsigned SVTBits = SVT.getSizeInBits();
22116 APInt One(SVTBits, 1);
22117 unsigned NumElems = VT.getVectorNumElements();
22119 for (unsigned i=0; i !=NumElems; ++i) {
22120 SDValue Op = Amt->getOperand(i);
22121 if (Op->isUndef()) {
22122 Elts.push_back(Op);
22126 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
22127 APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
22128 uint64_t ShAmt = C.getZExtValue();
22129 if (ShAmt >= SVTBits) {
22130 Elts.push_back(DAG.getUNDEF(SVT));
22133 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
22135 SDValue BV = DAG.getBuildVector(VT, dl, Elts);
22136 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
22139 // Lower SHL with variable shift amount.
22140 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
22141 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
22143 Op = DAG.getNode(ISD::ADD, dl, VT, Op,
22144 DAG.getConstant(0x3f800000U, dl, VT));
22145 Op = DAG.getBitcast(MVT::v4f32, Op);
22146 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
22147 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
22150 // If possible, lower this shift as a sequence of two shifts by
22151 // constant plus a MOVSS/MOVSD/PBLEND instead of scalarizing it.
22153 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
22155 // Could be rewritten as:
22156 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
22158 // The advantage is that the two shifts from the example would be
22159 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
22160 // the vector shift into four scalar shifts plus four pairs of vector
22162 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32)) {
22163 unsigned TargetOpcode = X86ISD::MOVSS;
22164 bool CanBeSimplified;
22165 // The splat value for the first packed shift (the 'X' from the example).
22166 SDValue Amt1 = Amt->getOperand(0);
22167 // The splat value for the second packed shift (the 'Y' from the example).
22168 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) : Amt->getOperand(2);
22170 // See if it is possible to replace this node with a sequence of
22171 // two shifts followed by a MOVSS/MOVSD/PBLEND.
22172 if (VT == MVT::v4i32) {
22173 // Check if it is legal to use a MOVSS.
22174 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
22175 Amt2 == Amt->getOperand(3);
22176 if (!CanBeSimplified) {
22177 // Otherwise, check if we can still simplify this node using a MOVSD.
22178 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
22179 Amt->getOperand(2) == Amt->getOperand(3);
22180 TargetOpcode = X86ISD::MOVSD;
22181 Amt2 = Amt->getOperand(2);
22184 // Do similar checks for the case where the machine value type
22186 CanBeSimplified = Amt1 == Amt->getOperand(1);
22187 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
22188 CanBeSimplified = Amt2 == Amt->getOperand(i);
22190 if (!CanBeSimplified) {
22191 TargetOpcode = X86ISD::MOVSD;
22192 CanBeSimplified = true;
22193 Amt2 = Amt->getOperand(4);
22194 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
22195 CanBeSimplified = Amt1 == Amt->getOperand(i);
22196 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
22197 CanBeSimplified = Amt2 == Amt->getOperand(j);
22201 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
22202 isa<ConstantSDNode>(Amt2)) {
22203 // Replace this node with two shifts followed by a MOVSS/MOVSD/PBLEND.
22204 MVT CastVT = MVT::v4i32;
22206 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT);
22207 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
22209 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT);
22210 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
22211 SDValue BitCast1 = DAG.getBitcast(CastVT, Shift1);
22212 SDValue BitCast2 = DAG.getBitcast(CastVT, Shift2);
22213 if (TargetOpcode == X86ISD::MOVSD)
22214 return DAG.getBitcast(VT, DAG.getVectorShuffle(CastVT, dl, BitCast1,
22215 BitCast2, {0, 1, 6, 7}));
22216 return DAG.getBitcast(VT, DAG.getVectorShuffle(CastVT, dl, BitCast1,
22217 BitCast2, {0, 5, 6, 7}));
22221 // v4i32 Non Uniform Shifts.
22222 // If the shift amount is constant we can shift each lane using the SSE2
22223 // immediate shifts, else we need to zero-extend each lane to the lower i64
22224 // and shift using the SSE2 variable shifts.
22225 // The separate results can then be blended together.
22226 if (VT == MVT::v4i32) {
22227 unsigned Opc = Op.getOpcode();
22228 SDValue Amt0, Amt1, Amt2, Amt3;
22230 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
22231 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
22232 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
22233 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
22235 // ISD::SHL is handled above but we include it here for completeness.
22238 llvm_unreachable("Unknown target vector shift node");
22240 Opc = X86ISD::VSHL;
22243 Opc = X86ISD::VSRL;
22246 Opc = X86ISD::VSRA;
22249 // The SSE2 shifts use the lower i64 as the same shift amount for
22250 // all lanes and the upper i64 is ignored. These shuffle masks
22251 // optimally zero-extend each lanes on SSE2/SSE41/AVX targets.
22252 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
22253 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
22254 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
22255 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
22256 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
22259 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
22260 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
22261 SDValue R2 = DAG.getNode(Opc, dl, VT, R, Amt2);
22262 SDValue R3 = DAG.getNode(Opc, dl, VT, R, Amt3);
22263 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
22264 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
22265 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
22268 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
22269 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
22270 // make the existing SSE solution better.
22271 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
22272 (Subtarget.hasAVX512() && VT == MVT::v16i16) ||
22273 (Subtarget.hasAVX512() && VT == MVT::v16i8) ||
22274 (Subtarget.hasBWI() && VT == MVT::v32i8)) {
22275 MVT EvtSVT = (VT == MVT::v32i8 ? MVT::i16 : MVT::i32);
22276 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
22278 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
22279 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
22280 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, ExtVT, Amt);
22281 return DAG.getNode(ISD::TRUNCATE, dl, VT,
22282 DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
22285 if (VT == MVT::v16i8 ||
22286 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
22287 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
22288 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
22289 unsigned ShiftOpcode = Op->getOpcode();
22291 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
22292 if (VT.is512BitVector()) {
22293 // On AVX512BW targets we make use of the fact that VSELECT lowers
22294 // to a masked blend which selects bytes based just on the sign bit
22295 // extracted to a mask.
22296 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
22297 V0 = DAG.getBitcast(VT, V0);
22298 V1 = DAG.getBitcast(VT, V1);
22299 Sel = DAG.getBitcast(VT, Sel);
22300 Sel = DAG.getNode(X86ISD::CVT2MASK, dl, MaskVT, Sel);
22301 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
22302 } else if (Subtarget.hasSSE41()) {
22303 // On SSE41 targets we make use of the fact that VSELECT lowers
22304 // to PBLENDVB which selects bytes based just on the sign bit.
22305 V0 = DAG.getBitcast(VT, V0);
22306 V1 = DAG.getBitcast(VT, V1);
22307 Sel = DAG.getBitcast(VT, Sel);
22308 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
22310 // On pre-SSE41 targets we test for the sign bit by comparing to
22311 // zero - a negative value will set all bits of the lanes to true
22312 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
22313 SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
22314 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
22315 return DAG.getSelect(dl, SelVT, C, V0, V1);
22318 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
22319 // We can safely do this using i16 shifts as we're only interested in
22320 // the 3 lower bits of each byte.
22321 Amt = DAG.getBitcast(ExtVT, Amt);
22322 Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
22323 Amt = DAG.getBitcast(VT, Amt);
22325 if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
22326 // r = VSELECT(r, shift(r, 4), a);
22328 DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
22329 R = SignBitSelect(VT, Amt, M, R);
22332 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
22334 // r = VSELECT(r, shift(r, 2), a);
22335 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
22336 R = SignBitSelect(VT, Amt, M, R);
22339 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
22341 // return VSELECT(r, shift(r, 1), a);
22342 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
22343 R = SignBitSelect(VT, Amt, M, R);
22347 if (Op->getOpcode() == ISD::SRA) {
22348 // For SRA we need to unpack each byte to the higher byte of a i16 vector
22349 // so we can correctly sign extend. We don't care what happens to the
22351 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
22352 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
22353 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
22354 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
22355 ALo = DAG.getBitcast(ExtVT, ALo);
22356 AHi = DAG.getBitcast(ExtVT, AHi);
22357 RLo = DAG.getBitcast(ExtVT, RLo);
22358 RHi = DAG.getBitcast(ExtVT, RHi);
22360 // r = VSELECT(r, shift(r, 4), a);
22361 SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
22362 DAG.getConstant(4, dl, ExtVT));
22363 SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
22364 DAG.getConstant(4, dl, ExtVT));
22365 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
22366 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
22369 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
22370 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
22372 // r = VSELECT(r, shift(r, 2), a);
22373 MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
22374 DAG.getConstant(2, dl, ExtVT));
22375 MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
22376 DAG.getConstant(2, dl, ExtVT));
22377 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
22378 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
22381 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
22382 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
22384 // r = VSELECT(r, shift(r, 1), a);
22385 MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
22386 DAG.getConstant(1, dl, ExtVT));
22387 MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
22388 DAG.getConstant(1, dl, ExtVT));
22389 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
22390 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
22392 // Logical shift the result back to the lower byte, leaving a zero upper
22394 // meaning that we can safely pack with PACKUSWB.
22396 DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
22398 DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
22399 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
22403 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
22404 MVT ExtVT = MVT::v8i32;
22405 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
22406 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
22407 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
22408 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R);
22409 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R);
22410 ALo = DAG.getBitcast(ExtVT, ALo);
22411 AHi = DAG.getBitcast(ExtVT, AHi);
22412 RLo = DAG.getBitcast(ExtVT, RLo);
22413 RHi = DAG.getBitcast(ExtVT, RHi);
22414 SDValue Lo = DAG.getNode(Op.getOpcode(), dl, ExtVT, RLo, ALo);
22415 SDValue Hi = DAG.getNode(Op.getOpcode(), dl, ExtVT, RHi, AHi);
22416 Lo = DAG.getNode(ISD::SRL, dl, ExtVT, Lo, DAG.getConstant(16, dl, ExtVT));
22417 Hi = DAG.getNode(ISD::SRL, dl, ExtVT, Hi, DAG.getConstant(16, dl, ExtVT));
22418 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
22421 if (VT == MVT::v8i16) {
22422 unsigned ShiftOpcode = Op->getOpcode();
22424 // If we have a constant shift amount, the non-SSE41 path is best as
22425 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
22426 bool UseSSE41 = Subtarget.hasSSE41() &&
22427 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
22429 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
22430 // On SSE41 targets we make use of the fact that VSELECT lowers
22431 // to PBLENDVB which selects bytes based just on the sign bit.
22433 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
22434 V0 = DAG.getBitcast(ExtVT, V0);
22435 V1 = DAG.getBitcast(ExtVT, V1);
22436 Sel = DAG.getBitcast(ExtVT, Sel);
22437 return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
22439 // On pre-SSE41 targets we splat the sign bit - a negative value will
22440 // set all bits of the lanes to true and VSELECT uses that in
22441 // its OR(AND(V0,C),AND(V1,~C)) lowering.
22443 DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
22444 return DAG.getSelect(dl, VT, C, V0, V1);
22447 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
22449 // On SSE41 targets we need to replicate the shift mask in both
22450 // bytes for PBLENDVB.
22453 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
22454 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
22456 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
22459 // r = VSELECT(r, shift(r, 8), a);
22460 SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
22461 R = SignBitSelect(Amt, M, R);
22464 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
22466 // r = VSELECT(r, shift(r, 4), a);
22467 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
22468 R = SignBitSelect(Amt, M, R);
22471 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
22473 // r = VSELECT(r, shift(r, 2), a);
22474 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
22475 R = SignBitSelect(Amt, M, R);
22478 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
22480 // return VSELECT(r, shift(r, 1), a);
22481 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
22482 R = SignBitSelect(Amt, M, R);
22486 // Decompose 256-bit shifts into smaller 128-bit shifts.
22487 if (VT.is256BitVector())
22488 return Lower256IntArith(Op, DAG);
22493 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
22494 SelectionDAG &DAG) {
22495 MVT VT = Op.getSimpleValueType();
22497 SDValue R = Op.getOperand(0);
22498 SDValue Amt = Op.getOperand(1);
22500 assert(VT.isVector() && "Custom lowering only for vector rotates!");
22501 assert(Subtarget.hasXOP() && "XOP support required for vector rotates!");
22502 assert((Op.getOpcode() == ISD::ROTL) && "Only ROTL supported");
22504 // XOP has 128-bit vector variable + immediate rotates.
22505 // +ve/-ve Amt = rotate left/right.
22507 // Split 256-bit integers.
22508 if (VT.is256BitVector())
22509 return Lower256IntArith(Op, DAG);
22511 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
22513 // Attempt to rotate by immediate.
22514 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
22515 if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
22516 uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
22517 assert(RotateAmt < VT.getScalarSizeInBits() && "Rotation out of range");
22518 return DAG.getNode(X86ISD::VPROTI, DL, VT, R,
22519 DAG.getConstant(RotateAmt, DL, MVT::i8));
22523 // Use general rotate by variable (per-element).
22524 return DAG.getNode(X86ISD::VPROT, DL, VT, R, Amt);
22527 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
22528 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
22529 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
22530 // looks for this combo and may remove the "setcc" instruction if the "setcc"
22531 // has only one use.
22532 SDNode *N = Op.getNode();
22533 SDValue LHS = N->getOperand(0);
22534 SDValue RHS = N->getOperand(1);
22535 unsigned BaseOp = 0;
22536 X86::CondCode Cond;
22538 switch (Op.getOpcode()) {
22539 default: llvm_unreachable("Unknown ovf instruction!");
22541 // A subtract of one will be selected as a INC. Note that INC doesn't
22542 // set CF, so we can't do this for UADDO.
22543 if (isOneConstant(RHS)) {
22544 BaseOp = X86ISD::INC;
22545 Cond = X86::COND_O;
22548 BaseOp = X86ISD::ADD;
22549 Cond = X86::COND_O;
22552 BaseOp = X86ISD::ADD;
22553 Cond = X86::COND_B;
22556 // A subtract of one will be selected as a DEC. Note that DEC doesn't
22557 // set CF, so we can't do this for USUBO.
22558 if (isOneConstant(RHS)) {
22559 BaseOp = X86ISD::DEC;
22560 Cond = X86::COND_O;
22563 BaseOp = X86ISD::SUB;
22564 Cond = X86::COND_O;
22567 BaseOp = X86ISD::SUB;
22568 Cond = X86::COND_B;
22571 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
22572 Cond = X86::COND_O;
22574 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
22575 if (N->getValueType(0) == MVT::i8) {
22576 BaseOp = X86ISD::UMUL8;
22577 Cond = X86::COND_O;
22580 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
22582 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
22584 SDValue SetCC = getSETCC(X86::COND_O, SDValue(Sum.getNode(), 2), DL, DAG);
22586 if (N->getValueType(1) == MVT::i1)
22587 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
22589 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
22593 // Also sets EFLAGS.
22594 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
22595 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
22597 SDValue SetCC = getSETCC(Cond, SDValue(Sum.getNode(), 1), DL, DAG);
22599 if (N->getValueType(1) == MVT::i1)
22600 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
22602 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
22605 /// Returns true if the operand type is exactly twice the native width, and
22606 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
22607 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
22608 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
22609 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
22610 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
22613 return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
22614 else if (OpWidth == 128)
22615 return Subtarget.hasCmpxchg16b();
22620 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
22621 return needsCmpXchgNb(SI->getValueOperand()->getType());
22624 // Note: this turns large loads into lock cmpxchg8b/16b.
22625 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
22626 TargetLowering::AtomicExpansionKind
22627 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
22628 auto PTy = cast<PointerType>(LI->getPointerOperandType());
22629 return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
22630 : AtomicExpansionKind::None;
22633 TargetLowering::AtomicExpansionKind
22634 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
22635 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
22636 Type *MemType = AI->getType();
22638 // If the operand is too big, we must see if cmpxchg8/16b is available
22639 // and default to library calls otherwise.
22640 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
22641 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
22642 : AtomicExpansionKind::None;
22645 AtomicRMWInst::BinOp Op = AI->getOperation();
22648 llvm_unreachable("Unknown atomic operation");
22649 case AtomicRMWInst::Xchg:
22650 case AtomicRMWInst::Add:
22651 case AtomicRMWInst::Sub:
22652 // It's better to use xadd, xsub or xchg for these in all cases.
22653 return AtomicExpansionKind::None;
22654 case AtomicRMWInst::Or:
22655 case AtomicRMWInst::And:
22656 case AtomicRMWInst::Xor:
22657 // If the atomicrmw's result isn't actually used, we can just add a "lock"
22658 // prefix to a normal instruction for these operations.
22659 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
22660 : AtomicExpansionKind::None;
22661 case AtomicRMWInst::Nand:
22662 case AtomicRMWInst::Max:
22663 case AtomicRMWInst::Min:
22664 case AtomicRMWInst::UMax:
22665 case AtomicRMWInst::UMin:
22666 // These always require a non-trivial set of data operations on x86. We must
22667 // use a cmpxchg loop.
22668 return AtomicExpansionKind::CmpXChg;
22673 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
22674 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
22675 Type *MemType = AI->getType();
22676 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
22677 // there is no benefit in turning such RMWs into loads, and it is actually
22678 // harmful as it introduces a mfence.
22679 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
22682 auto Builder = IRBuilder<>(AI);
22683 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
22684 auto SynchScope = AI->getSynchScope();
22685 // We must restrict the ordering to avoid generating loads with Release or
22686 // ReleaseAcquire orderings.
22687 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
22688 auto Ptr = AI->getPointerOperand();
22690 // Before the load we need a fence. Here is an example lifted from
22691 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
22694 // x.store(1, relaxed);
22695 // r1 = y.fetch_add(0, release);
22697 // y.fetch_add(42, acquire);
22698 // r2 = x.load(relaxed);
22699 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
22700 // lowered to just a load without a fence. A mfence flushes the store buffer,
22701 // making the optimization clearly correct.
22702 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
22703 // otherwise, we might be able to be more aggressive on relaxed idempotent
22704 // rmw. In practice, they do not look useful, so we don't try to be
22705 // especially clever.
22706 if (SynchScope == SingleThread)
22707 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
22708 // the IR level, so we must wrap it in an intrinsic.
22711 if (!Subtarget.hasMFence())
22712 // FIXME: it might make sense to use a locked operation here but on a
22713 // different cache-line to prevent cache-line bouncing. In practice it
22714 // is probably a small win, and x86 processors without mfence are rare
22715 // enough that we do not bother.
22719 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
22720 Builder.CreateCall(MFence, {});
22722 // Finally we can emit the atomic load.
22723 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
22724 AI->getType()->getPrimitiveSizeInBits());
22725 Loaded->setAtomic(Order, SynchScope);
22726 AI->replaceAllUsesWith(Loaded);
22727 AI->eraseFromParent();
22731 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
22732 SelectionDAG &DAG) {
22734 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
22735 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
22736 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
22737 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
22739 // The only fence that needs an instruction is a sequentially-consistent
22740 // cross-thread fence.
22741 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
22742 FenceScope == CrossThread) {
22743 if (Subtarget.hasMFence())
22744 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
22746 SDValue Chain = Op.getOperand(0);
22747 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
22749 DAG.getRegister(X86::ESP, MVT::i32), // Base
22750 DAG.getTargetConstant(1, dl, MVT::i8), // Scale
22751 DAG.getRegister(0, MVT::i32), // Index
22752 DAG.getTargetConstant(0, dl, MVT::i32), // Disp
22753 DAG.getRegister(0, MVT::i32), // Segment.
22757 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
22758 return SDValue(Res, 0);
22761 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
22762 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
22765 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
22766 SelectionDAG &DAG) {
22767 MVT T = Op.getSimpleValueType();
22771 switch(T.SimpleTy) {
22772 default: llvm_unreachable("Invalid value type!");
22773 case MVT::i8: Reg = X86::AL; size = 1; break;
22774 case MVT::i16: Reg = X86::AX; size = 2; break;
22775 case MVT::i32: Reg = X86::EAX; size = 4; break;
22777 assert(Subtarget.is64Bit() && "Node not type legal!");
22778 Reg = X86::RAX; size = 8;
22781 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
22782 Op.getOperand(2), SDValue());
22783 SDValue Ops[] = { cpIn.getValue(0),
22786 DAG.getTargetConstant(size, DL, MVT::i8),
22787 cpIn.getValue(1) };
22788 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
22789 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
22790 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
22794 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
22795 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
22796 MVT::i32, cpOut.getValue(2));
22797 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
22799 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
22800 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
22801 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
22805 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
22806 SelectionDAG &DAG) {
22807 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
22808 MVT DstVT = Op.getSimpleValueType();
22810 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
22811 SrcVT == MVT::i64) {
22812 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
22813 if (DstVT != MVT::f64)
22814 // This conversion needs to be expanded.
22817 SDValue Op0 = Op->getOperand(0);
22818 SmallVector<SDValue, 16> Elts;
22822 if (SrcVT.isVector()) {
22823 NumElts = SrcVT.getVectorNumElements();
22824 SVT = SrcVT.getVectorElementType();
22826 // Widen the vector in input in the case of MVT::v2i32.
22827 // Example: from MVT::v2i32 to MVT::v4i32.
22828 for (unsigned i = 0, e = NumElts; i != e; ++i)
22829 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, Op0,
22830 DAG.getIntPtrConstant(i, dl)));
22832 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
22833 "Unexpected source type in LowerBITCAST");
22834 Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op0,
22835 DAG.getIntPtrConstant(0, dl)));
22836 Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op0,
22837 DAG.getIntPtrConstant(1, dl)));
22841 // Explicitly mark the extra elements as Undef.
22842 Elts.append(NumElts, DAG.getUNDEF(SVT));
22844 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
22845 SDValue BV = DAG.getBuildVector(NewVT, dl, Elts);
22846 SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
22847 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
22848 DAG.getIntPtrConstant(0, dl));
22851 assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
22852 Subtarget.hasMMX() && "Unexpected custom BITCAST");
22853 assert((DstVT == MVT::i64 ||
22854 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
22855 "Unexpected custom BITCAST");
22856 // i64 <=> MMX conversions are Legal.
22857 if (SrcVT==MVT::i64 && DstVT.isVector())
22859 if (DstVT==MVT::i64 && SrcVT.isVector())
22861 // MMX <=> MMX conversions are Legal.
22862 if (SrcVT.isVector() && DstVT.isVector())
22864 // All other conversions need to be expanded.
22868 /// Compute the horizontal sum of bytes in V for the elements of VT.
22870 /// Requires V to be a byte vector and VT to be an integer vector type with
22871 /// wider elements than V's type. The width of the elements of VT determines
22872 /// how many bytes of V are summed horizontally to produce each element of the
22874 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
22875 const X86Subtarget &Subtarget,
22876 SelectionDAG &DAG) {
22878 MVT ByteVecVT = V.getSimpleValueType();
22879 MVT EltVT = VT.getVectorElementType();
22880 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
22881 "Expected value to have byte element type.");
22882 assert(EltVT != MVT::i8 &&
22883 "Horizontal byte sum only makes sense for wider elements!");
22884 unsigned VecSize = VT.getSizeInBits();
22885 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
22887 // PSADBW instruction horizontally add all bytes and leave the result in i64
22888 // chunks, thus directly computes the pop count for v2i64 and v4i64.
22889 if (EltVT == MVT::i64) {
22890 SDValue Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
22891 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
22892 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
22893 return DAG.getBitcast(VT, V);
22896 if (EltVT == MVT::i32) {
22897 // We unpack the low half and high half into i32s interleaved with zeros so
22898 // that we can use PSADBW to horizontally sum them. The most useful part of
22899 // this is that it lines up the results of two PSADBW instructions to be
22900 // two v2i64 vectors which concatenated are the 4 population counts. We can
22901 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
22902 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, DL);
22903 SDValue V32 = DAG.getBitcast(VT, V);
22904 SDValue Low = DAG.getNode(X86ISD::UNPCKL, DL, VT, V32, Zeros);
22905 SDValue High = DAG.getNode(X86ISD::UNPCKH, DL, VT, V32, Zeros);
22907 // Do the horizontal sums into two v2i64s.
22908 Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
22909 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
22910 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
22911 DAG.getBitcast(ByteVecVT, Low), Zeros);
22912 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
22913 DAG.getBitcast(ByteVecVT, High), Zeros);
22915 // Merge them together.
22916 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
22917 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
22918 DAG.getBitcast(ShortVecVT, Low),
22919 DAG.getBitcast(ShortVecVT, High));
22921 return DAG.getBitcast(VT, V);
22924 // The only element type left is i16.
22925 assert(EltVT == MVT::i16 && "Unknown how to handle type");
22927 // To obtain pop count for each i16 element starting from the pop count for
22928 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
22929 // right by 8. It is important to shift as i16s as i8 vector shift isn't
22930 // directly supported.
22931 SDValue ShifterV = DAG.getConstant(8, DL, VT);
22932 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
22933 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
22934 DAG.getBitcast(ByteVecVT, V));
22935 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
22938 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
22939 const X86Subtarget &Subtarget,
22940 SelectionDAG &DAG) {
22941 MVT VT = Op.getSimpleValueType();
22942 MVT EltVT = VT.getVectorElementType();
22943 unsigned VecSize = VT.getSizeInBits();
22945 // Implement a lookup table in register by using an algorithm based on:
22946 // http://wm.ite.pl/articles/sse-popcount.html
22948 // The general idea is that every lower byte nibble in the input vector is an
22949 // index into a in-register pre-computed pop count table. We then split up the
22950 // input vector in two new ones: (1) a vector with only the shifted-right
22951 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
22952 // masked out higher ones) for each byte. PSHUFB is used separately with both
22953 // to index the in-register table. Next, both are added and the result is a
22954 // i8 vector where each element contains the pop count for input byte.
22956 // To obtain the pop count for elements != i8, we follow up with the same
22957 // approach and use additional tricks as described below.
22959 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
22960 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
22961 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
22962 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
22964 int NumByteElts = VecSize / 8;
22965 MVT ByteVecVT = MVT::getVectorVT(MVT::i8, NumByteElts);
22966 SDValue In = DAG.getBitcast(ByteVecVT, Op);
22967 SmallVector<SDValue, 64> LUTVec;
22968 for (int i = 0; i < NumByteElts; ++i)
22969 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
22970 SDValue InRegLUT = DAG.getBuildVector(ByteVecVT, DL, LUTVec);
22971 SDValue M0F = DAG.getConstant(0x0F, DL, ByteVecVT);
22974 SDValue FourV = DAG.getConstant(4, DL, ByteVecVT);
22975 SDValue HighNibbles = DAG.getNode(ISD::SRL, DL, ByteVecVT, In, FourV);
22978 SDValue LowNibbles = DAG.getNode(ISD::AND, DL, ByteVecVT, In, M0F);
22980 // The input vector is used as the shuffle mask that index elements into the
22981 // LUT. After counting low and high nibbles, add the vector to obtain the
22982 // final pop count per i8 element.
22983 SDValue HighPopCnt =
22984 DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, HighNibbles);
22985 SDValue LowPopCnt =
22986 DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, LowNibbles);
22987 SDValue PopCnt = DAG.getNode(ISD::ADD, DL, ByteVecVT, HighPopCnt, LowPopCnt);
22989 if (EltVT == MVT::i8)
22992 return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG);
22995 static SDValue LowerVectorCTPOPBitmath(SDValue Op, const SDLoc &DL,
22996 const X86Subtarget &Subtarget,
22997 SelectionDAG &DAG) {
22998 MVT VT = Op.getSimpleValueType();
22999 assert(VT.is128BitVector() &&
23000 "Only 128-bit vector bitmath lowering supported.");
23002 int VecSize = VT.getSizeInBits();
23003 MVT EltVT = VT.getVectorElementType();
23004 int Len = EltVT.getSizeInBits();
23006 // This is the vectorized version of the "best" algorithm from
23007 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
23008 // with a minor tweak to use a series of adds + shifts instead of vector
23009 // multiplications. Implemented for all integer vector types. We only use
23010 // this when we don't have SSSE3 which allows a LUT-based lowering that is
23011 // much faster, even faster than using native popcnt instructions.
23013 auto GetShift = [&](unsigned OpCode, SDValue V, int Shifter) {
23014 MVT VT = V.getSimpleValueType();
23015 SDValue ShifterV = DAG.getConstant(Shifter, DL, VT);
23016 return DAG.getNode(OpCode, DL, VT, V, ShifterV);
23018 auto GetMask = [&](SDValue V, APInt Mask) {
23019 MVT VT = V.getSimpleValueType();
23020 SDValue MaskV = DAG.getConstant(Mask, DL, VT);
23021 return DAG.getNode(ISD::AND, DL, VT, V, MaskV);
23024 // We don't want to incur the implicit masks required to SRL vNi8 vectors on
23025 // x86, so set the SRL type to have elements at least i16 wide. This is
23026 // correct because all of our SRLs are followed immediately by a mask anyways
23027 // that handles any bits that sneak into the high bits of the byte elements.
23028 MVT SrlVT = Len > 8 ? VT : MVT::getVectorVT(MVT::i16, VecSize / 16);
23032 // v = v - ((v >> 1) & 0x55555555...)
23034 DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 1));
23035 SDValue And = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x55)));
23036 V = DAG.getNode(ISD::SUB, DL, VT, V, And);
23038 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
23039 SDValue AndLHS = GetMask(V, APInt::getSplat(Len, APInt(8, 0x33)));
23040 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 2));
23041 SDValue AndRHS = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x33)));
23042 V = DAG.getNode(ISD::ADD, DL, VT, AndLHS, AndRHS);
23044 // v = (v + (v >> 4)) & 0x0F0F0F0F...
23045 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 4));
23046 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, V, Srl);
23047 V = GetMask(Add, APInt::getSplat(Len, APInt(8, 0x0F)));
23049 // At this point, V contains the byte-wise population count, and we are
23050 // merely doing a horizontal sum if necessary to get the wider element
23052 if (EltVT == MVT::i8)
23055 return LowerHorizontalByteSum(
23056 DAG.getBitcast(MVT::getVectorVT(MVT::i8, VecSize / 8), V), VT, Subtarget,
23060 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
23061 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
23062 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
23063 SelectionDAG &DAG) {
23064 MVT VT = Op.getSimpleValueType();
23065 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
23066 "Unknown CTPOP type to handle");
23067 SDLoc DL(Op.getNode());
23068 SDValue Op0 = Op.getOperand(0);
23070 if (!Subtarget.hasSSSE3()) {
23071 // We can't use the fast LUT approach, so fall back on vectorized bitmath.
23072 assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
23073 return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
23076 // Decompose 256-bit ops into smaller 128-bit ops.
23077 if (VT.is256BitVector() && !Subtarget.hasInt256())
23078 return Lower256IntUnary(Op, DAG);
23080 // Decompose 512-bit ops into smaller 256-bit ops.
23081 if (VT.is512BitVector() && !Subtarget.hasBWI())
23082 return Lower512IntUnary(Op, DAG);
23084 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
23087 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
23088 SelectionDAG &DAG) {
23089 assert(Op.getSimpleValueType().isVector() &&
23090 "We only do custom lowering for vector population count.");
23091 return LowerVectorCTPOP(Op, Subtarget, DAG);
23094 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
23095 MVT VT = Op.getSimpleValueType();
23096 SDValue In = Op.getOperand(0);
23099 // For scalars, its still beneficial to transfer to/from the SIMD unit to
23100 // perform the BITREVERSE.
23101 if (!VT.isVector()) {
23102 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
23103 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
23104 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
23105 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
23106 DAG.getIntPtrConstant(0, DL));
23109 int NumElts = VT.getVectorNumElements();
23110 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
23112 // Decompose 256-bit ops into smaller 128-bit ops.
23113 if (VT.is256BitVector())
23114 return Lower256IntUnary(Op, DAG);
23116 assert(VT.is128BitVector() &&
23117 "Only 128-bit vector bitreverse lowering supported.");
23119 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
23120 // perform the BSWAP in the shuffle.
23121 // Its best to shuffle using the second operand as this will implicitly allow
23122 // memory folding for multiple vectors.
23123 SmallVector<SDValue, 16> MaskElts;
23124 for (int i = 0; i != NumElts; ++i) {
23125 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
23126 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
23127 int PermuteByte = SourceByte | (2 << 5);
23128 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
23132 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
23133 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
23134 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
23136 return DAG.getBitcast(VT, Res);
23139 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
23140 SelectionDAG &DAG) {
23141 if (Subtarget.hasXOP())
23142 return LowerBITREVERSE_XOP(Op, DAG);
23144 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
23146 MVT VT = Op.getSimpleValueType();
23147 SDValue In = Op.getOperand(0);
23150 unsigned NumElts = VT.getVectorNumElements();
23151 assert(VT.getScalarType() == MVT::i8 &&
23152 "Only byte vector BITREVERSE supported");
23154 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
23155 if (VT.is256BitVector() && !Subtarget.hasInt256())
23156 return Lower256IntUnary(Op, DAG);
23158 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
23159 // two nibbles and a PSHUFB lookup to find the bitreverse of each
23160 // 0-15 value (moved to the other nibble).
23161 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
23162 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
23163 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
23165 const int LoLUT[16] = {
23166 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
23167 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
23168 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
23169 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
23170 const int HiLUT[16] = {
23171 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
23172 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
23173 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
23174 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
23176 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
23177 for (unsigned i = 0; i < NumElts; ++i) {
23178 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
23179 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
23182 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
23183 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
23184 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
23185 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
23186 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
23189 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG) {
23190 unsigned NewOpc = 0;
23191 switch (N->getOpcode()) {
23192 case ISD::ATOMIC_LOAD_ADD:
23193 NewOpc = X86ISD::LADD;
23195 case ISD::ATOMIC_LOAD_SUB:
23196 NewOpc = X86ISD::LSUB;
23198 case ISD::ATOMIC_LOAD_OR:
23199 NewOpc = X86ISD::LOR;
23201 case ISD::ATOMIC_LOAD_XOR:
23202 NewOpc = X86ISD::LXOR;
23204 case ISD::ATOMIC_LOAD_AND:
23205 NewOpc = X86ISD::LAND;
23208 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
23211 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
23212 return DAG.getMemIntrinsicNode(
23213 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
23214 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
23215 /*MemVT=*/N->getSimpleValueType(0), MMO);
23218 /// Lower atomic_load_ops into LOCK-prefixed operations.
23219 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
23220 const X86Subtarget &Subtarget) {
23221 SDValue Chain = N->getOperand(0);
23222 SDValue LHS = N->getOperand(1);
23223 SDValue RHS = N->getOperand(2);
23224 unsigned Opc = N->getOpcode();
23225 MVT VT = N->getSimpleValueType(0);
23228 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
23229 // can only be lowered when the result is unused. They should have already
23230 // been transformed into a cmpxchg loop in AtomicExpand.
23231 if (N->hasAnyUseOfValue(0)) {
23232 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
23233 // select LXADD if LOCK_SUB can't be selected.
23234 if (Opc == ISD::ATOMIC_LOAD_SUB) {
23235 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
23236 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
23237 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
23238 RHS, AN->getMemOperand());
23240 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
23241 "Used AtomicRMW ops other than Add should have been expanded!");
23245 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG);
23246 // RAUW the chain, but don't worry about the result, as it's unused.
23247 assert(!N->hasAnyUseOfValue(0));
23248 DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1));
23252 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
23253 SDNode *Node = Op.getNode();
23255 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
23257 // Convert seq_cst store -> xchg
23258 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
23259 // FIXME: On 32-bit, store -> fist or movq would be more efficient
23260 // (The only way to get a 16-byte store is cmpxchg16b)
23261 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
23262 if (cast<AtomicSDNode>(Node)->getOrdering() ==
23263 AtomicOrdering::SequentiallyConsistent ||
23264 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
23265 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
23266 cast<AtomicSDNode>(Node)->getMemoryVT(),
23267 Node->getOperand(0),
23268 Node->getOperand(1), Node->getOperand(2),
23269 cast<AtomicSDNode>(Node)->getMemOperand());
23270 return Swap.getValue(1);
23272 // Other atomic stores have a simple pattern.
23276 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
23277 SDNode *N = Op.getNode();
23278 MVT VT = N->getSimpleValueType(0);
23280 // Let legalize expand this if it isn't a legal type yet.
23281 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
23284 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
23287 // Set the carry flag.
23288 SDValue Carry = Op.getOperand(2);
23289 EVT CarryVT = Carry.getValueType();
23290 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
23291 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
23292 Carry, DAG.getConstant(NegOne, DL, CarryVT));
23294 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
23295 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
23296 Op.getOperand(1), Carry.getValue(1));
23298 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
23299 if (N->getValueType(1) == MVT::i1)
23300 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
23302 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
23305 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
23306 SelectionDAG &DAG) {
23307 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
23309 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
23310 // which returns the values as { float, float } (in XMM0) or
23311 // { double, double } (which is returned in XMM0, XMM1).
23313 SDValue Arg = Op.getOperand(0);
23314 EVT ArgVT = Arg.getValueType();
23315 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
23317 TargetLowering::ArgListTy Args;
23318 TargetLowering::ArgListEntry Entry;
23322 Entry.IsSExt = false;
23323 Entry.IsZExt = false;
23324 Args.push_back(Entry);
23326 bool isF64 = ArgVT == MVT::f64;
23327 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
23328 // the small struct {f32, f32} is returned in (eax, edx). For f64,
23329 // the results are returned via SRet in memory.
23330 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
23331 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23333 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
23335 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
23336 : (Type *)VectorType::get(ArgTy, 4);
23338 TargetLowering::CallLoweringInfo CLI(DAG);
23339 CLI.setDebugLoc(dl)
23340 .setChain(DAG.getEntryNode())
23341 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
23343 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
23346 // Returned in xmm0 and xmm1.
23347 return CallResult.first;
23349 // Returned in bits 0:31 and 32:64 xmm0.
23350 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
23351 CallResult.first, DAG.getIntPtrConstant(0, dl));
23352 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
23353 CallResult.first, DAG.getIntPtrConstant(1, dl));
23354 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
23355 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
23358 /// Widen a vector input to a vector of NVT. The
23359 /// input vector must have the same element type as NVT.
23360 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
23361 bool FillWithZeroes = false) {
23362 // Check if InOp already has the right width.
23363 MVT InVT = InOp.getSimpleValueType();
23367 if (InOp.isUndef())
23368 return DAG.getUNDEF(NVT);
23370 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
23371 "input and widen element type must match");
23373 unsigned InNumElts = InVT.getVectorNumElements();
23374 unsigned WidenNumElts = NVT.getVectorNumElements();
23375 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
23376 "Unexpected request for vector widening");
23379 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
23380 InOp.getNumOperands() == 2) {
23381 SDValue N1 = InOp.getOperand(1);
23382 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
23384 InOp = InOp.getOperand(0);
23385 InVT = InOp.getSimpleValueType();
23386 InNumElts = InVT.getVectorNumElements();
23389 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
23390 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
23391 SmallVector<SDValue, 16> Ops;
23392 for (unsigned i = 0; i < InNumElts; ++i)
23393 Ops.push_back(InOp.getOperand(i));
23395 EVT EltVT = InOp.getOperand(0).getValueType();
23397 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
23398 DAG.getUNDEF(EltVT);
23399 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
23400 Ops.push_back(FillVal);
23401 return DAG.getBuildVector(NVT, dl, Ops);
23403 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
23405 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
23406 InOp, DAG.getIntPtrConstant(0, dl));
23409 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
23410 SelectionDAG &DAG) {
23411 assert(Subtarget.hasAVX512() &&
23412 "MGATHER/MSCATTER are supported on AVX-512 arch only");
23414 // X86 scatter kills mask register, so its type should be added to
23415 // the list of return values.
23416 // If the "scatter" has 2 return values, it is already handled.
23417 if (Op.getNode()->getNumValues() == 2)
23420 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
23421 SDValue Src = N->getValue();
23422 MVT VT = Src.getSimpleValueType();
23423 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
23426 SDValue NewScatter;
23427 SDValue Index = N->getIndex();
23428 SDValue Mask = N->getMask();
23429 SDValue Chain = N->getChain();
23430 SDValue BasePtr = N->getBasePtr();
23431 MVT MemVT = N->getMemoryVT().getSimpleVT();
23432 MVT IndexVT = Index.getSimpleValueType();
23433 MVT MaskVT = Mask.getSimpleValueType();
23435 if (MemVT.getScalarSizeInBits() < VT.getScalarSizeInBits()) {
23436 // The v2i32 value was promoted to v2i64.
23437 // Now we "redo" the type legalizer's work and widen the original
23438 // v2i32 value to v4i32. The original v2i32 is retrieved from v2i64
23440 assert((MemVT == MVT::v2i32 && VT == MVT::v2i64) &&
23441 "Unexpected memory type");
23442 int ShuffleMask[] = {0, 2, -1, -1};
23443 Src = DAG.getVectorShuffle(MVT::v4i32, dl, DAG.getBitcast(MVT::v4i32, Src),
23444 DAG.getUNDEF(MVT::v4i32), ShuffleMask);
23445 // Now we have 4 elements instead of 2.
23446 // Expand the index.
23447 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), 4);
23448 Index = ExtendToType(Index, NewIndexVT, DAG);
23450 // Expand the mask with zeroes
23451 // Mask may be <2 x i64> or <2 x i1> at this moment
23452 assert((MaskVT == MVT::v2i1 || MaskVT == MVT::v2i64) &&
23453 "Unexpected mask type");
23454 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), 4);
23455 Mask = ExtendToType(Mask, ExtMaskVT, DAG, true);
23459 unsigned NumElts = VT.getVectorNumElements();
23460 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
23461 !Index.getSimpleValueType().is512BitVector()) {
23462 // AVX512F supports only 512-bit vectors. Or data or index should
23463 // be 512 bit wide. If now the both index and data are 256-bit, but
23464 // the vector contains 8 elements, we just sign-extend the index
23465 if (IndexVT == MVT::v8i32)
23466 // Just extend index
23467 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
23469 // The minimal number of elts in scatter is 8
23472 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), NumElts);
23473 // Use original index here, do not modify the index twice
23474 Index = ExtendToType(N->getIndex(), NewIndexVT, DAG);
23475 if (IndexVT.getScalarType() == MVT::i32)
23476 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
23479 // At this point we have promoted mask operand
23480 assert(MaskVT.getScalarSizeInBits() >= 32 && "unexpected mask type");
23481 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), NumElts);
23482 // Use the original mask here, do not modify the mask twice
23483 Mask = ExtendToType(N->getMask(), ExtMaskVT, DAG, true);
23485 // The value that should be stored
23486 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts);
23487 Src = ExtendToType(Src, NewVT, DAG);
23490 // If the mask is "wide" at this point - truncate it to i1 vector
23491 MVT BitMaskVT = MVT::getVectorVT(MVT::i1, NumElts);
23492 Mask = DAG.getNode(ISD::TRUNCATE, dl, BitMaskVT, Mask);
23494 // The mask is killed by scatter, add it to the values
23495 SDVTList VTs = DAG.getVTList(BitMaskVT, MVT::Other);
23496 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index};
23497 NewScatter = DAG.getMaskedScatter(VTs, N->getMemoryVT(), dl, Ops,
23498 N->getMemOperand());
23499 DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
23500 return SDValue(NewScatter.getNode(), 1);
23503 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
23504 SelectionDAG &DAG) {
23506 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
23507 MVT VT = Op.getSimpleValueType();
23508 MVT ScalarVT = VT.getScalarType();
23509 SDValue Mask = N->getMask();
23512 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
23513 "Expanding masked load is supported on AVX-512 target only!");
23515 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
23516 "Expanding masked load is supported for 32 and 64-bit types only!");
23518 // 4x32, 4x64 and 2x64 vectors of non-expanding loads are legal regardless of
23519 // VLX. These types for exp-loads are handled here.
23520 if (!N->isExpandingLoad() && VT.getVectorNumElements() <= 4)
23523 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
23524 "Cannot lower masked load op.");
23526 assert((ScalarVT.getSizeInBits() >= 32 ||
23527 (Subtarget.hasBWI() &&
23528 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
23529 "Unsupported masked load op.");
23531 // This operation is legal for targets with VLX, but without
23532 // VLX the vector should be widened to 512 bit
23533 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
23534 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
23535 SDValue Src0 = N->getSrc0();
23536 Src0 = ExtendToType(Src0, WideDataVT, DAG);
23538 // Mask element has to be i1.
23539 MVT MaskEltTy = Mask.getSimpleValueType().getScalarType();
23540 assert((MaskEltTy == MVT::i1 || VT.getVectorNumElements() <= 4) &&
23541 "We handle 4x32, 4x64 and 2x64 vectors only in this case");
23543 MVT WideMaskVT = MVT::getVectorVT(MaskEltTy, NumEltsInWideVec);
23545 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
23546 if (MaskEltTy != MVT::i1)
23547 Mask = DAG.getNode(ISD::TRUNCATE, dl,
23548 MVT::getVectorVT(MVT::i1, NumEltsInWideVec), Mask);
23549 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
23550 N->getBasePtr(), Mask, Src0,
23551 N->getMemoryVT(), N->getMemOperand(),
23552 N->getExtensionType(),
23553 N->isExpandingLoad());
23555 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
23556 NewLoad.getValue(0),
23557 DAG.getIntPtrConstant(0, dl));
23558 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
23559 return DAG.getMergeValues(RetOps, dl);
23562 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
23563 SelectionDAG &DAG) {
23564 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
23565 SDValue DataToStore = N->getValue();
23566 MVT VT = DataToStore.getSimpleValueType();
23567 MVT ScalarVT = VT.getScalarType();
23568 SDValue Mask = N->getMask();
23571 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
23572 "Expanding masked load is supported on AVX-512 target only!");
23574 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
23575 "Expanding masked load is supported for 32 and 64-bit types only!");
23577 // 4x32 and 2x64 vectors of non-compressing stores are legal regardless to VLX.
23578 if (!N->isCompressingStore() && VT.getVectorNumElements() <= 4)
23581 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
23582 "Cannot lower masked store op.");
23584 assert((ScalarVT.getSizeInBits() >= 32 ||
23585 (Subtarget.hasBWI() &&
23586 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
23587 "Unsupported masked store op.");
23589 // This operation is legal for targets with VLX, but without
23590 // VLX the vector should be widened to 512 bit
23591 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
23592 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
23594 // Mask element has to be i1.
23595 MVT MaskEltTy = Mask.getSimpleValueType().getScalarType();
23596 assert((MaskEltTy == MVT::i1 || VT.getVectorNumElements() <= 4) &&
23597 "We handle 4x32, 4x64 and 2x64 vectors only in this case");
23599 MVT WideMaskVT = MVT::getVectorVT(MaskEltTy, NumEltsInWideVec);
23601 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
23602 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
23603 if (MaskEltTy != MVT::i1)
23604 Mask = DAG.getNode(ISD::TRUNCATE, dl,
23605 MVT::getVectorVT(MVT::i1, NumEltsInWideVec), Mask);
23606 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
23607 Mask, N->getMemoryVT(), N->getMemOperand(),
23608 N->isTruncatingStore(), N->isCompressingStore());
23611 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
23612 SelectionDAG &DAG) {
23613 assert(Subtarget.hasAVX512() &&
23614 "MGATHER/MSCATTER are supported on AVX-512 arch only");
23616 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
23618 MVT VT = Op.getSimpleValueType();
23619 SDValue Index = N->getIndex();
23620 SDValue Mask = N->getMask();
23621 SDValue Src0 = N->getValue();
23622 MVT IndexVT = Index.getSimpleValueType();
23623 MVT MaskVT = Mask.getSimpleValueType();
23625 unsigned NumElts = VT.getVectorNumElements();
23626 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
23628 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
23629 !Index.getSimpleValueType().is512BitVector()) {
23630 // AVX512F supports only 512-bit vectors. Or data or index should
23631 // be 512 bit wide. If now the both index and data are 256-bit, but
23632 // the vector contains 8 elements, we just sign-extend the index
23633 if (NumElts == 8) {
23634 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
23635 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
23636 N->getOperand(3), Index };
23637 DAG.UpdateNodeOperands(N, Ops);
23641 // Minimal number of elements in Gather
23644 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), NumElts);
23645 Index = ExtendToType(Index, NewIndexVT, DAG);
23646 if (IndexVT.getScalarType() == MVT::i32)
23647 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
23650 MVT MaskBitVT = MVT::getVectorVT(MVT::i1, NumElts);
23651 // At this point we have promoted mask operand
23652 assert(MaskVT.getScalarSizeInBits() >= 32 && "unexpected mask type");
23653 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), NumElts);
23654 Mask = ExtendToType(Mask, ExtMaskVT, DAG, true);
23655 Mask = DAG.getNode(ISD::TRUNCATE, dl, MaskBitVT, Mask);
23657 // The pass-through value
23658 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts);
23659 Src0 = ExtendToType(Src0, NewVT, DAG);
23661 SDValue Ops[] = { N->getChain(), Src0, Mask, N->getBasePtr(), Index };
23662 SDValue NewGather = DAG.getMaskedGather(DAG.getVTList(NewVT, MVT::Other),
23663 N->getMemoryVT(), dl, Ops,
23664 N->getMemOperand());
23665 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
23666 NewGather.getValue(0),
23667 DAG.getIntPtrConstant(0, dl));
23668 SDValue RetOps[] = {Exract, NewGather.getValue(1)};
23669 return DAG.getMergeValues(RetOps, dl);
23674 SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
23675 SelectionDAG &DAG) const {
23676 // TODO: Eventually, the lowering of these nodes should be informed by or
23677 // deferred to the GC strategy for the function in which they appear. For
23678 // now, however, they must be lowered to something. Since they are logically
23679 // no-ops in the case of a null GC strategy (or a GC strategy which does not
23680 // require special handling for these nodes), lower them as literal NOOPs for
23682 SmallVector<SDValue, 2> Ops;
23684 Ops.push_back(Op.getOperand(0));
23685 if (Op->getGluedNode())
23686 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
23689 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
23690 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
23695 SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
23696 SelectionDAG &DAG) const {
23697 // TODO: Eventually, the lowering of these nodes should be informed by or
23698 // deferred to the GC strategy for the function in which they appear. For
23699 // now, however, they must be lowered to something. Since they are logically
23700 // no-ops in the case of a null GC strategy (or a GC strategy which does not
23701 // require special handling for these nodes), lower them as literal NOOPs for
23703 SmallVector<SDValue, 2> Ops;
23705 Ops.push_back(Op.getOperand(0));
23706 if (Op->getGluedNode())
23707 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
23710 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
23711 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
23716 /// Provide custom lowering hooks for some operations.
23717 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
23718 switch (Op.getOpcode()) {
23719 default: llvm_unreachable("Should not custom lower this!");
23720 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
23721 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
23722 return LowerCMP_SWAP(Op, Subtarget, DAG);
23723 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
23724 case ISD::ATOMIC_LOAD_ADD:
23725 case ISD::ATOMIC_LOAD_SUB:
23726 case ISD::ATOMIC_LOAD_OR:
23727 case ISD::ATOMIC_LOAD_XOR:
23728 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
23729 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
23730 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
23731 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
23732 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
23733 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
23734 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
23735 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
23736 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
23737 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
23738 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
23739 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
23740 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
23741 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
23742 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
23743 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
23744 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
23745 case ISD::SHL_PARTS:
23746 case ISD::SRA_PARTS:
23747 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
23748 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
23749 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
23750 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
23751 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
23752 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
23753 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
23754 case ISD::ZERO_EXTEND_VECTOR_INREG:
23755 case ISD::SIGN_EXTEND_VECTOR_INREG:
23756 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
23757 case ISD::FP_TO_SINT:
23758 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
23759 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
23760 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
23762 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
23763 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
23764 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
23765 case ISD::SETCC: return LowerSETCC(Op, DAG);
23766 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
23767 case ISD::SELECT: return LowerSELECT(Op, DAG);
23768 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
23769 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
23770 case ISD::VASTART: return LowerVASTART(Op, DAG);
23771 case ISD::VAARG: return LowerVAARG(Op, DAG);
23772 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
23773 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
23774 case ISD::INTRINSIC_VOID:
23775 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
23776 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
23777 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
23778 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
23779 case ISD::FRAME_TO_ARGS_OFFSET:
23780 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
23781 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
23782 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
23783 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
23784 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
23785 case ISD::EH_SJLJ_SETUP_DISPATCH:
23786 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
23787 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
23788 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
23789 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
23791 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
23793 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
23794 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
23796 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
23797 case ISD::UMUL_LOHI:
23798 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
23799 case ISD::ROTL: return LowerRotate(Op, Subtarget, DAG);
23802 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
23808 case ISD::UMULO: return LowerXALUO(Op, DAG);
23809 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
23810 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
23811 case ISD::ADDCARRY:
23812 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
23814 case ISD::SUB: return LowerADD_SUB(Op, DAG);
23818 case ISD::UMIN: return LowerMINMAX(Op, DAG);
23819 case ISD::ABS: return LowerABS(Op, DAG);
23820 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
23821 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
23822 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
23823 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
23824 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
23825 case ISD::GC_TRANSITION_START:
23826 return LowerGC_TRANSITION_START(Op, DAG);
23827 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
23828 case ISD::STORE: return LowerTruncatingStore(Op, Subtarget, DAG);
23832 /// Places new result values for the node in Results (their number
23833 /// and types must exactly match those of the original return values of
23834 /// the node), or leaves Results empty, which indicates that the node is not
23835 /// to be custom lowered after all.
23836 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
23837 SmallVectorImpl<SDValue> &Results,
23838 SelectionDAG &DAG) const {
23839 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
23841 if (!Res.getNode())
23844 assert((N->getNumValues() <= Res->getNumValues()) &&
23845 "Lowering returned the wrong number of results!");
23847 // Places new result values base on N result number.
23848 // In some cases (LowerSINT_TO_FP for example) Res has more result values
23849 // than original node, chain should be dropped(last value).
23850 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
23851 Results.push_back(Res.getValue(I));
23854 /// Replace a node with an illegal result type with a new node built out of
23856 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
23857 SmallVectorImpl<SDValue>&Results,
23858 SelectionDAG &DAG) const {
23860 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23861 switch (N->getOpcode()) {
23863 llvm_unreachable("Do not know how to custom type legalize this operation!");
23864 case X86ISD::AVG: {
23865 // Legalize types for X86ISD::AVG by expanding vectors.
23866 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
23868 auto InVT = N->getValueType(0);
23869 auto InVTSize = InVT.getSizeInBits();
23870 const unsigned RegSize =
23871 (InVTSize > 128) ? ((InVTSize > 256) ? 512 : 256) : 128;
23872 assert((Subtarget.hasBWI() || RegSize < 512) &&
23873 "512-bit vector requires AVX512BW");
23874 assert((Subtarget.hasAVX2() || RegSize < 256) &&
23875 "256-bit vector requires AVX2");
23877 auto ElemVT = InVT.getVectorElementType();
23878 auto RegVT = EVT::getVectorVT(*DAG.getContext(), ElemVT,
23879 RegSize / ElemVT.getSizeInBits());
23880 assert(RegSize % InVT.getSizeInBits() == 0);
23881 unsigned NumConcat = RegSize / InVT.getSizeInBits();
23883 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
23884 Ops[0] = N->getOperand(0);
23885 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
23886 Ops[0] = N->getOperand(1);
23887 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
23889 SDValue Res = DAG.getNode(X86ISD::AVG, dl, RegVT, InVec0, InVec1);
23890 Results.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InVT, Res,
23891 DAG.getIntPtrConstant(0, dl)));
23894 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
23895 case X86ISD::FMINC:
23897 case X86ISD::FMAXC:
23898 case X86ISD::FMAX: {
23899 EVT VT = N->getValueType(0);
23900 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
23901 SDValue UNDEF = DAG.getUNDEF(VT);
23902 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
23903 N->getOperand(0), UNDEF);
23904 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
23905 N->getOperand(1), UNDEF);
23906 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
23914 case ISD::UDIVREM: {
23915 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
23916 Results.push_back(V);
23919 case ISD::FP_TO_SINT:
23920 case ISD::FP_TO_UINT: {
23921 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
23923 if (N->getValueType(0) == MVT::v2i32) {
23924 assert((IsSigned || Subtarget.hasAVX512()) &&
23925 "Can only handle signed conversion without AVX512");
23926 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
23927 SDValue Src = N->getOperand(0);
23928 if (Src.getValueType() == MVT::v2f64) {
23929 SDValue Idx = DAG.getIntPtrConstant(0, dl);
23930 SDValue Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI
23931 : X86ISD::CVTTP2UI,
23932 dl, MVT::v4i32, Src);
23933 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx);
23934 Results.push_back(Res);
23937 if (Src.getValueType() == MVT::v2f32) {
23938 SDValue Idx = DAG.getIntPtrConstant(0, dl);
23939 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
23940 DAG.getUNDEF(MVT::v2f32));
23941 Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT
23942 : ISD::FP_TO_UINT, dl, MVT::v4i32, Res);
23943 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx);
23944 Results.push_back(Res);
23948 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
23949 // so early out here.
23953 std::pair<SDValue,SDValue> Vals =
23954 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
23955 SDValue FIST = Vals.first, StackSlot = Vals.second;
23956 if (FIST.getNode()) {
23957 EVT VT = N->getValueType(0);
23958 // Return a load from the stack slot.
23959 if (StackSlot.getNode())
23961 DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo()));
23963 Results.push_back(FIST);
23967 case ISD::SINT_TO_FP: {
23968 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
23969 SDValue Src = N->getOperand(0);
23970 if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
23972 Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
23975 case ISD::UINT_TO_FP: {
23976 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
23977 EVT VT = N->getValueType(0);
23978 if (VT != MVT::v2f32)
23980 SDValue Src = N->getOperand(0);
23981 EVT SrcVT = Src.getValueType();
23982 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
23983 Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
23986 if (SrcVT != MVT::v2i32)
23988 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
23990 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
23991 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
23992 DAG.getBitcast(MVT::v2i64, VBias));
23993 Or = DAG.getBitcast(MVT::v2f64, Or);
23994 // TODO: Are there any fast-math-flags to propagate here?
23995 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
23996 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
23999 case ISD::FP_ROUND: {
24000 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
24002 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
24003 Results.push_back(V);
24006 case ISD::FP_EXTEND: {
24007 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
24008 // No other ValueType for FP_EXTEND should reach this point.
24009 assert(N->getValueType(0) == MVT::v2f32 &&
24010 "Do not know how to legalize this Node");
24013 case ISD::INTRINSIC_W_CHAIN: {
24014 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
24016 default : llvm_unreachable("Do not know how to custom type "
24017 "legalize this intrinsic operation!");
24018 case Intrinsic::x86_rdtsc:
24019 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
24021 case Intrinsic::x86_rdtscp:
24022 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
24024 case Intrinsic::x86_rdpmc:
24025 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
24027 case Intrinsic::x86_xgetbv:
24028 return getExtendedControlRegister(N, dl, DAG, Subtarget, Results);
24031 case ISD::INTRINSIC_WO_CHAIN: {
24032 if (SDValue V = LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), Subtarget, DAG))
24033 Results.push_back(V);
24036 case ISD::READCYCLECOUNTER: {
24037 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
24040 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
24041 EVT T = N->getValueType(0);
24042 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
24043 bool Regs64bit = T == MVT::i128;
24044 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
24045 SDValue cpInL, cpInH;
24046 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
24047 DAG.getConstant(0, dl, HalfT));
24048 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
24049 DAG.getConstant(1, dl, HalfT));
24050 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
24051 Regs64bit ? X86::RAX : X86::EAX,
24053 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
24054 Regs64bit ? X86::RDX : X86::EDX,
24055 cpInH, cpInL.getValue(1));
24056 SDValue swapInL, swapInH;
24057 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
24058 DAG.getConstant(0, dl, HalfT));
24059 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
24060 DAG.getConstant(1, dl, HalfT));
24062 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
24063 swapInH, cpInH.getValue(1));
24064 // If the current function needs the base pointer, RBX,
24065 // we shouldn't use cmpxchg directly.
24066 // Indeed the lowering of that instruction will clobber
24067 // that register and since RBX will be a reserved register
24068 // the register allocator will not make sure its value will
24069 // be properly saved and restored around this live-range.
24070 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
24072 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
24073 unsigned BasePtr = TRI->getBaseRegister();
24074 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
24075 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
24076 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
24077 // ISel prefers the LCMPXCHG64 variant.
24078 // If that assert breaks, that means it is not the case anymore,
24079 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
24080 // not just EBX. This is a matter of accepting i64 input for that
24081 // pseudo, and restoring into the register of the right wide
24082 // in expand pseudo. Everything else should just work.
24083 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
24084 "Saving only half of the RBX");
24085 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
24086 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
24087 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
24088 Regs64bit ? X86::RBX : X86::EBX,
24089 HalfT, swapInH.getValue(1));
24090 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
24092 /*Glue*/ RBXSave.getValue(2)};
24093 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
24096 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
24097 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
24098 Regs64bit ? X86::RBX : X86::EBX, swapInL,
24099 swapInH.getValue(1));
24100 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
24101 swapInL.getValue(1)};
24102 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
24104 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
24105 Regs64bit ? X86::RAX : X86::EAX,
24106 HalfT, Result.getValue(1));
24107 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
24108 Regs64bit ? X86::RDX : X86::EDX,
24109 HalfT, cpOutL.getValue(2));
24110 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
24112 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
24113 MVT::i32, cpOutH.getValue(2));
24114 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
24115 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
24117 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
24118 Results.push_back(Success);
24119 Results.push_back(EFLAGS.getValue(1));
24122 case ISD::ATOMIC_SWAP:
24123 case ISD::ATOMIC_LOAD_ADD:
24124 case ISD::ATOMIC_LOAD_SUB:
24125 case ISD::ATOMIC_LOAD_AND:
24126 case ISD::ATOMIC_LOAD_OR:
24127 case ISD::ATOMIC_LOAD_XOR:
24128 case ISD::ATOMIC_LOAD_NAND:
24129 case ISD::ATOMIC_LOAD_MIN:
24130 case ISD::ATOMIC_LOAD_MAX:
24131 case ISD::ATOMIC_LOAD_UMIN:
24132 case ISD::ATOMIC_LOAD_UMAX:
24133 case ISD::ATOMIC_LOAD: {
24134 // Delegate to generic TypeLegalization. Situations we can really handle
24135 // should have already been dealt with by AtomicExpandPass.cpp.
24138 case ISD::BITCAST: {
24139 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
24140 EVT DstVT = N->getValueType(0);
24141 EVT SrcVT = N->getOperand(0)->getValueType(0);
24143 if (SrcVT != MVT::f64 ||
24144 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
24147 unsigned NumElts = DstVT.getVectorNumElements();
24148 EVT SVT = DstVT.getVectorElementType();
24149 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
24150 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
24151 MVT::v2f64, N->getOperand(0));
24152 SDValue ToVecInt = DAG.getBitcast(WiderVT, Expanded);
24154 if (ExperimentalVectorWideningLegalization) {
24155 // If we are legalizing vectors by widening, we already have the desired
24156 // legal vector type, just return it.
24157 Results.push_back(ToVecInt);
24161 SmallVector<SDValue, 8> Elts;
24162 for (unsigned i = 0, e = NumElts; i != e; ++i)
24163 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
24164 ToVecInt, DAG.getIntPtrConstant(i, dl)));
24166 Results.push_back(DAG.getBuildVector(DstVT, dl, Elts));
24171 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
24172 switch ((X86ISD::NodeType)Opcode) {
24173 case X86ISD::FIRST_NUMBER: break;
24174 case X86ISD::BSF: return "X86ISD::BSF";
24175 case X86ISD::BSR: return "X86ISD::BSR";
24176 case X86ISD::SHLD: return "X86ISD::SHLD";
24177 case X86ISD::SHRD: return "X86ISD::SHRD";
24178 case X86ISD::FAND: return "X86ISD::FAND";
24179 case X86ISD::FANDN: return "X86ISD::FANDN";
24180 case X86ISD::FOR: return "X86ISD::FOR";
24181 case X86ISD::FXOR: return "X86ISD::FXOR";
24182 case X86ISD::FILD: return "X86ISD::FILD";
24183 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
24184 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
24185 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
24186 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
24187 case X86ISD::FLD: return "X86ISD::FLD";
24188 case X86ISD::FST: return "X86ISD::FST";
24189 case X86ISD::CALL: return "X86ISD::CALL";
24190 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
24191 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
24192 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
24193 case X86ISD::BT: return "X86ISD::BT";
24194 case X86ISD::CMP: return "X86ISD::CMP";
24195 case X86ISD::COMI: return "X86ISD::COMI";
24196 case X86ISD::UCOMI: return "X86ISD::UCOMI";
24197 case X86ISD::CMPM: return "X86ISD::CMPM";
24198 case X86ISD::CMPMU: return "X86ISD::CMPMU";
24199 case X86ISD::CMPM_RND: return "X86ISD::CMPM_RND";
24200 case X86ISD::SETCC: return "X86ISD::SETCC";
24201 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
24202 case X86ISD::FSETCC: return "X86ISD::FSETCC";
24203 case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
24204 case X86ISD::FSETCCM_RND: return "X86ISD::FSETCCM_RND";
24205 case X86ISD::CMOV: return "X86ISD::CMOV";
24206 case X86ISD::BRCOND: return "X86ISD::BRCOND";
24207 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
24208 case X86ISD::IRET: return "X86ISD::IRET";
24209 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
24210 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
24211 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
24212 case X86ISD::Wrapper: return "X86ISD::Wrapper";
24213 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
24214 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
24215 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
24216 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
24217 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
24218 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
24219 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
24220 case X86ISD::PINSRB: return "X86ISD::PINSRB";
24221 case X86ISD::PINSRW: return "X86ISD::PINSRW";
24222 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
24223 case X86ISD::ANDNP: return "X86ISD::ANDNP";
24224 case X86ISD::BLENDI: return "X86ISD::BLENDI";
24225 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
24226 case X86ISD::ADDUS: return "X86ISD::ADDUS";
24227 case X86ISD::SUBUS: return "X86ISD::SUBUS";
24228 case X86ISD::HADD: return "X86ISD::HADD";
24229 case X86ISD::HSUB: return "X86ISD::HSUB";
24230 case X86ISD::FHADD: return "X86ISD::FHADD";
24231 case X86ISD::FHSUB: return "X86ISD::FHSUB";
24232 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
24233 case X86ISD::FMAX: return "X86ISD::FMAX";
24234 case X86ISD::FMAXS: return "X86ISD::FMAXS";
24235 case X86ISD::FMAX_RND: return "X86ISD::FMAX_RND";
24236 case X86ISD::FMAXS_RND: return "X86ISD::FMAX_RND";
24237 case X86ISD::FMIN: return "X86ISD::FMIN";
24238 case X86ISD::FMINS: return "X86ISD::FMINS";
24239 case X86ISD::FMIN_RND: return "X86ISD::FMIN_RND";
24240 case X86ISD::FMINS_RND: return "X86ISD::FMINS_RND";
24241 case X86ISD::FMAXC: return "X86ISD::FMAXC";
24242 case X86ISD::FMINC: return "X86ISD::FMINC";
24243 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
24244 case X86ISD::FRSQRTS: return "X86ISD::FRSQRTS";
24245 case X86ISD::FRCP: return "X86ISD::FRCP";
24246 case X86ISD::FRCPS: return "X86ISD::FRCPS";
24247 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
24248 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
24249 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
24250 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
24251 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
24252 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
24253 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
24254 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
24255 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
24256 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
24257 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
24258 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
24259 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
24260 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
24261 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
24262 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
24263 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
24264 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
24265 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
24266 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
24267 case X86ISD::LADD: return "X86ISD::LADD";
24268 case X86ISD::LSUB: return "X86ISD::LSUB";
24269 case X86ISD::LOR: return "X86ISD::LOR";
24270 case X86ISD::LXOR: return "X86ISD::LXOR";
24271 case X86ISD::LAND: return "X86ISD::LAND";
24272 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
24273 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
24274 case X86ISD::VZEXT: return "X86ISD::VZEXT";
24275 case X86ISD::VSEXT: return "X86ISD::VSEXT";
24276 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
24277 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
24278 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
24279 case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
24280 case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
24281 case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
24282 case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
24283 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
24284 case X86ISD::VFPEXT_RND: return "X86ISD::VFPEXT_RND";
24285 case X86ISD::VFPEXTS_RND: return "X86ISD::VFPEXTS_RND";
24286 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
24287 case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
24288 case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
24289 case X86ISD::CVT2MASK: return "X86ISD::CVT2MASK";
24290 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
24291 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
24292 case X86ISD::VSHL: return "X86ISD::VSHL";
24293 case X86ISD::VSRL: return "X86ISD::VSRL";
24294 case X86ISD::VSRA: return "X86ISD::VSRA";
24295 case X86ISD::VSHLI: return "X86ISD::VSHLI";
24296 case X86ISD::VSRLI: return "X86ISD::VSRLI";
24297 case X86ISD::VSRAI: return "X86ISD::VSRAI";
24298 case X86ISD::VSRAV: return "X86ISD::VSRAV";
24299 case X86ISD::VROTLI: return "X86ISD::VROTLI";
24300 case X86ISD::VROTRI: return "X86ISD::VROTRI";
24301 case X86ISD::VPPERM: return "X86ISD::VPPERM";
24302 case X86ISD::CMPP: return "X86ISD::CMPP";
24303 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
24304 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
24305 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
24306 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
24307 case X86ISD::ADD: return "X86ISD::ADD";
24308 case X86ISD::SUB: return "X86ISD::SUB";
24309 case X86ISD::ADC: return "X86ISD::ADC";
24310 case X86ISD::SBB: return "X86ISD::SBB";
24311 case X86ISD::SMUL: return "X86ISD::SMUL";
24312 case X86ISD::UMUL: return "X86ISD::UMUL";
24313 case X86ISD::SMUL8: return "X86ISD::SMUL8";
24314 case X86ISD::UMUL8: return "X86ISD::UMUL8";
24315 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
24316 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
24317 case X86ISD::INC: return "X86ISD::INC";
24318 case X86ISD::DEC: return "X86ISD::DEC";
24319 case X86ISD::OR: return "X86ISD::OR";
24320 case X86ISD::XOR: return "X86ISD::XOR";
24321 case X86ISD::AND: return "X86ISD::AND";
24322 case X86ISD::BEXTR: return "X86ISD::BEXTR";
24323 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
24324 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
24325 case X86ISD::PTEST: return "X86ISD::PTEST";
24326 case X86ISD::TESTP: return "X86ISD::TESTP";
24327 case X86ISD::TESTM: return "X86ISD::TESTM";
24328 case X86ISD::TESTNM: return "X86ISD::TESTNM";
24329 case X86ISD::KORTEST: return "X86ISD::KORTEST";
24330 case X86ISD::KTEST: return "X86ISD::KTEST";
24331 case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
24332 case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
24333 case X86ISD::PACKSS: return "X86ISD::PACKSS";
24334 case X86ISD::PACKUS: return "X86ISD::PACKUS";
24335 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
24336 case X86ISD::VALIGN: return "X86ISD::VALIGN";
24337 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
24338 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
24339 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
24340 case X86ISD::SHUFP: return "X86ISD::SHUFP";
24341 case X86ISD::SHUF128: return "X86ISD::SHUF128";
24342 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
24343 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
24344 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
24345 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
24346 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
24347 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
24348 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
24349 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
24350 case X86ISD::MOVSD: return "X86ISD::MOVSD";
24351 case X86ISD::MOVSS: return "X86ISD::MOVSS";
24352 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
24353 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
24354 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
24355 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
24356 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
24357 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
24358 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
24359 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
24360 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
24361 case X86ISD::VPERMV: return "X86ISD::VPERMV";
24362 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
24363 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
24364 case X86ISD::VPERMI: return "X86ISD::VPERMI";
24365 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
24366 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
24367 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
24368 case X86ISD::VRANGE: return "X86ISD::VRANGE";
24369 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
24370 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
24371 case X86ISD::PSADBW: return "X86ISD::PSADBW";
24372 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
24373 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
24374 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
24375 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
24376 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
24377 case X86ISD::MFENCE: return "X86ISD::MFENCE";
24378 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
24379 case X86ISD::SAHF: return "X86ISD::SAHF";
24380 case X86ISD::RDRAND: return "X86ISD::RDRAND";
24381 case X86ISD::RDSEED: return "X86ISD::RDSEED";
24382 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
24383 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
24384 case X86ISD::VPROT: return "X86ISD::VPROT";
24385 case X86ISD::VPROTI: return "X86ISD::VPROTI";
24386 case X86ISD::VPSHA: return "X86ISD::VPSHA";
24387 case X86ISD::VPSHL: return "X86ISD::VPSHL";
24388 case X86ISD::VPCOM: return "X86ISD::VPCOM";
24389 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
24390 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
24391 case X86ISD::FMADD: return "X86ISD::FMADD";
24392 case X86ISD::FMSUB: return "X86ISD::FMSUB";
24393 case X86ISD::FNMADD: return "X86ISD::FNMADD";
24394 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
24395 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
24396 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
24397 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
24398 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
24399 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
24400 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
24401 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
24402 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
24403 case X86ISD::FMADDS1_RND: return "X86ISD::FMADDS1_RND";
24404 case X86ISD::FNMADDS1_RND: return "X86ISD::FNMADDS1_RND";
24405 case X86ISD::FMSUBS1_RND: return "X86ISD::FMSUBS1_RND";
24406 case X86ISD::FNMSUBS1_RND: return "X86ISD::FNMSUBS1_RND";
24407 case X86ISD::FMADDS3_RND: return "X86ISD::FMADDS3_RND";
24408 case X86ISD::FNMADDS3_RND: return "X86ISD::FNMADDS3_RND";
24409 case X86ISD::FMSUBS3_RND: return "X86ISD::FMSUBS3_RND";
24410 case X86ISD::FNMSUBS3_RND: return "X86ISD::FNMSUBS3_RND";
24411 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
24412 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
24413 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
24414 case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
24415 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
24416 case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
24417 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
24418 case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
24419 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
24420 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
24421 case X86ISD::XTEST: return "X86ISD::XTEST";
24422 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
24423 case X86ISD::EXPAND: return "X86ISD::EXPAND";
24424 case X86ISD::SELECT: return "X86ISD::SELECT";
24425 case X86ISD::SELECTS: return "X86ISD::SELECTS";
24426 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
24427 case X86ISD::RCP28: return "X86ISD::RCP28";
24428 case X86ISD::RCP28S: return "X86ISD::RCP28S";
24429 case X86ISD::EXP2: return "X86ISD::EXP2";
24430 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
24431 case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
24432 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
24433 case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
24434 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
24435 case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
24436 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
24437 case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
24438 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
24439 case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
24440 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
24441 case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
24442 case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
24443 case X86ISD::FGETEXPS_RND: return "X86ISD::FGETEXPS_RND";
24444 case X86ISD::SCALEF: return "X86ISD::SCALEF";
24445 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
24446 case X86ISD::ADDS: return "X86ISD::ADDS";
24447 case X86ISD::SUBS: return "X86ISD::SUBS";
24448 case X86ISD::AVG: return "X86ISD::AVG";
24449 case X86ISD::MULHRS: return "X86ISD::MULHRS";
24450 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
24451 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
24452 case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
24453 case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
24454 case X86ISD::CVTTP2SI_RND: return "X86ISD::CVTTP2SI_RND";
24455 case X86ISD::CVTTP2UI_RND: return "X86ISD::CVTTP2UI_RND";
24456 case X86ISD::CVTTS2SI_RND: return "X86ISD::CVTTS2SI_RND";
24457 case X86ISD::CVTTS2UI_RND: return "X86ISD::CVTTS2UI_RND";
24458 case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
24459 case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
24460 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
24461 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
24462 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
24463 case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
24464 case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
24465 case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
24466 case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
24467 case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
24468 case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
24469 case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
24470 case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
24471 case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
24472 case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
24473 case X86ISD::LWPINS: return "X86ISD::LWPINS";
24478 /// Return true if the addressing mode represented by AM is legal for this
24479 /// target, for a load/store of the specified type.
24480 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
24481 const AddrMode &AM, Type *Ty,
24482 unsigned AS) const {
24483 // X86 supports extremely general addressing modes.
24484 CodeModel::Model M = getTargetMachine().getCodeModel();
24486 // X86 allows a sign-extended 32-bit immediate field as a displacement.
24487 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
24491 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
24493 // If a reference to this global requires an extra load, we can't fold it.
24494 if (isGlobalStubReference(GVFlags))
24497 // If BaseGV requires a register for the PIC base, we cannot also have a
24498 // BaseReg specified.
24499 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
24502 // If lower 4G is not available, then we must use rip-relative addressing.
24503 if ((M != CodeModel::Small || isPositionIndependent()) &&
24504 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
24508 switch (AM.Scale) {
24514 // These scales always work.
24519 // These scales are formed with basereg+scalereg. Only accept if there is
24524 default: // Other stuff never works.
24531 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
24532 unsigned Bits = Ty->getScalarSizeInBits();
24534 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
24535 // particularly cheaper than those without.
24539 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
24540 // variable shifts just as cheap as scalar ones.
24541 if (Subtarget.hasInt256() && (Bits == 32 || Bits == 64))
24544 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
24545 // fully general vector.
24549 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
24550 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
24552 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
24553 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
24554 return NumBits1 > NumBits2;
24557 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
24558 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
24561 if (!isTypeLegal(EVT::getEVT(Ty1)))
24564 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
24566 // Assuming the caller doesn't have a zeroext or signext return parameter,
24567 // truncation all the way down to i1 is valid.
24571 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
24572 return isInt<32>(Imm);
24575 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
24576 // Can also use sub to handle negated immediates.
24577 return isInt<32>(Imm);
24580 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
24581 if (!VT1.isInteger() || !VT2.isInteger())
24583 unsigned NumBits1 = VT1.getSizeInBits();
24584 unsigned NumBits2 = VT2.getSizeInBits();
24585 return NumBits1 > NumBits2;
24588 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
24589 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
24590 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
24593 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
24594 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
24595 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
24598 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
24599 EVT VT1 = Val.getValueType();
24600 if (isZExtFree(VT1, VT2))
24603 if (Val.getOpcode() != ISD::LOAD)
24606 if (!VT1.isSimple() || !VT1.isInteger() ||
24607 !VT2.isSimple() || !VT2.isInteger())
24610 switch (VT1.getSimpleVT().SimpleTy) {
24615 // X86 has 8, 16, and 32-bit zero-extending loads.
24622 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
24625 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
24626 if (!Subtarget.hasAnyFMA())
24629 VT = VT.getScalarType();
24631 if (!VT.isSimple())
24634 switch (VT.getSimpleVT().SimpleTy) {
24645 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
24646 // i16 instructions are longer (0x66 prefix) and potentially slower.
24647 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
24650 /// Targets can use this to indicate that they only support *some*
24651 /// VECTOR_SHUFFLE operations, those with specific masks.
24652 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
24653 /// are assumed to be legal.
24655 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
24657 if (!VT.isSimple())
24660 // Not for i1 vectors
24661 if (VT.getSimpleVT().getScalarType() == MVT::i1)
24664 // Very little shuffling can be done for 64-bit vectors right now.
24665 if (VT.getSimpleVT().getSizeInBits() == 64)
24668 // We only care that the types being shuffled are legal. The lowering can
24669 // handle any possible shuffle mask that results.
24670 return isTypeLegal(VT.getSimpleVT());
24674 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
24676 // Just delegate to the generic legality, clear masks aren't special.
24677 return isShuffleMaskLegal(Mask, VT);
24680 //===----------------------------------------------------------------------===//
24681 // X86 Scheduler Hooks
24682 //===----------------------------------------------------------------------===//
24684 /// Utility function to emit xbegin specifying the start of an RTM region.
24685 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
24686 const TargetInstrInfo *TII) {
24687 DebugLoc DL = MI.getDebugLoc();
24689 const BasicBlock *BB = MBB->getBasicBlock();
24690 MachineFunction::iterator I = ++MBB->getIterator();
24692 // For the v = xbegin(), we generate
24701 // eax = # XABORT_DEF
24705 // v = phi(s0/mainBB, s1/fallBB)
24707 MachineBasicBlock *thisMBB = MBB;
24708 MachineFunction *MF = MBB->getParent();
24709 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
24710 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
24711 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
24712 MF->insert(I, mainMBB);
24713 MF->insert(I, fallMBB);
24714 MF->insert(I, sinkMBB);
24716 // Transfer the remainder of BB and its successor edges to sinkMBB.
24717 sinkMBB->splice(sinkMBB->begin(), MBB,
24718 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
24719 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
24721 MachineRegisterInfo &MRI = MF->getRegInfo();
24722 unsigned DstReg = MI.getOperand(0).getReg();
24723 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
24724 unsigned mainDstReg = MRI.createVirtualRegister(RC);
24725 unsigned fallDstReg = MRI.createVirtualRegister(RC);
24729 // # fallthrough to mainMBB
24730 // # abortion to fallMBB
24731 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
24732 thisMBB->addSuccessor(mainMBB);
24733 thisMBB->addSuccessor(fallMBB);
24736 // mainDstReg := -1
24737 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
24738 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
24739 mainMBB->addSuccessor(sinkMBB);
24742 // ; pseudo instruction to model hardware's definition from XABORT
24743 // EAX := XABORT_DEF
24744 // fallDstReg := EAX
24745 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
24746 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
24748 fallMBB->addSuccessor(sinkMBB);
24751 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
24752 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
24753 .addReg(mainDstReg).addMBB(mainMBB)
24754 .addReg(fallDstReg).addMBB(fallMBB);
24756 MI.eraseFromParent();
24760 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
24761 // or XMM0_V32I8 in AVX all of this code can be replaced with that
24762 // in the .td file.
24763 static MachineBasicBlock *emitPCMPSTRM(MachineInstr &MI, MachineBasicBlock *BB,
24764 const TargetInstrInfo *TII) {
24766 switch (MI.getOpcode()) {
24767 default: llvm_unreachable("illegal opcode!");
24768 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
24769 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
24770 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
24771 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
24772 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
24773 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
24774 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
24775 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
24778 DebugLoc dl = MI.getDebugLoc();
24779 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
24781 unsigned NumArgs = MI.getNumOperands();
24782 for (unsigned i = 1; i < NumArgs; ++i) {
24783 MachineOperand &Op = MI.getOperand(i);
24784 if (!(Op.isReg() && Op.isImplicit()))
24787 if (MI.hasOneMemOperand())
24788 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
24790 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
24791 .addReg(X86::XMM0);
24793 MI.eraseFromParent();
24797 // FIXME: Custom handling because TableGen doesn't support multiple implicit
24798 // defs in an instruction pattern
24799 static MachineBasicBlock *emitPCMPSTRI(MachineInstr &MI, MachineBasicBlock *BB,
24800 const TargetInstrInfo *TII) {
24802 switch (MI.getOpcode()) {
24803 default: llvm_unreachable("illegal opcode!");
24804 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
24805 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
24806 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
24807 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
24808 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
24809 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
24810 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
24811 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
24814 DebugLoc dl = MI.getDebugLoc();
24815 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
24817 unsigned NumArgs = MI.getNumOperands(); // remove the results
24818 for (unsigned i = 1; i < NumArgs; ++i) {
24819 MachineOperand &Op = MI.getOperand(i);
24820 if (!(Op.isReg() && Op.isImplicit()))
24823 if (MI.hasOneMemOperand())
24824 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
24826 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
24829 MI.eraseFromParent();
24833 static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB,
24834 const X86Subtarget &Subtarget) {
24835 DebugLoc dl = MI.getDebugLoc();
24836 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24838 // insert input VAL into EAX
24839 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
24840 .addReg(MI.getOperand(0).getReg());
24841 // insert zero to ECX
24842 BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
24844 // insert zero to EDX
24845 BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX);
24847 // insert WRPKRU instruction
24848 BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
24850 MI.eraseFromParent(); // The pseudo is gone now.
24854 static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB,
24855 const X86Subtarget &Subtarget) {
24856 DebugLoc dl = MI.getDebugLoc();
24857 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24859 // insert zero to ECX
24860 BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
24862 // insert RDPKRU instruction
24863 BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
24864 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
24867 MI.eraseFromParent(); // The pseudo is gone now.
24871 static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB,
24872 const X86Subtarget &Subtarget,
24874 DebugLoc dl = MI.getDebugLoc();
24875 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24876 // Address into RAX/EAX, other two args into ECX, EDX.
24877 unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
24878 unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
24879 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
24880 for (int i = 0; i < X86::AddrNumOperands; ++i)
24881 MIB.add(MI.getOperand(i));
24883 unsigned ValOps = X86::AddrNumOperands;
24884 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
24885 .addReg(MI.getOperand(ValOps).getReg());
24886 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
24887 .addReg(MI.getOperand(ValOps + 1).getReg());
24889 // The instruction doesn't actually take any operands though.
24890 BuildMI(*BB, MI, dl, TII->get(Opc));
24892 MI.eraseFromParent(); // The pseudo is gone now.
24896 static MachineBasicBlock *emitClzero(MachineInstr *MI, MachineBasicBlock *BB,
24897 const X86Subtarget &Subtarget) {
24898 DebugLoc dl = MI->getDebugLoc();
24899 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24900 // Address into RAX/EAX
24901 unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
24902 unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
24903 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
24904 for (int i = 0; i < X86::AddrNumOperands; ++i)
24905 MIB.add(MI->getOperand(i));
24907 // The instruction doesn't actually take any operands though.
24908 BuildMI(*BB, MI, dl, TII->get(X86::CLZEROr));
24910 MI->eraseFromParent(); // The pseudo is gone now.
24916 MachineBasicBlock *
24917 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
24918 MachineBasicBlock *MBB) const {
24919 // Emit va_arg instruction on X86-64.
24921 // Operands to this pseudo-instruction:
24922 // 0 ) Output : destination address (reg)
24923 // 1-5) Input : va_list address (addr, i64mem)
24924 // 6 ) ArgSize : Size (in bytes) of vararg type
24925 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
24926 // 8 ) Align : Alignment of type
24927 // 9 ) EFLAGS (implicit-def)
24929 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
24930 static_assert(X86::AddrNumOperands == 5,
24931 "VAARG_64 assumes 5 address operands");
24933 unsigned DestReg = MI.getOperand(0).getReg();
24934 MachineOperand &Base = MI.getOperand(1);
24935 MachineOperand &Scale = MI.getOperand(2);
24936 MachineOperand &Index = MI.getOperand(3);
24937 MachineOperand &Disp = MI.getOperand(4);
24938 MachineOperand &Segment = MI.getOperand(5);
24939 unsigned ArgSize = MI.getOperand(6).getImm();
24940 unsigned ArgMode = MI.getOperand(7).getImm();
24941 unsigned Align = MI.getOperand(8).getImm();
24943 // Memory Reference
24944 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
24945 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
24946 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
24948 // Machine Information
24949 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24950 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
24951 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
24952 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
24953 DebugLoc DL = MI.getDebugLoc();
24955 // struct va_list {
24958 // i64 overflow_area (address)
24959 // i64 reg_save_area (address)
24961 // sizeof(va_list) = 24
24962 // alignment(va_list) = 8
24964 unsigned TotalNumIntRegs = 6;
24965 unsigned TotalNumXMMRegs = 8;
24966 bool UseGPOffset = (ArgMode == 1);
24967 bool UseFPOffset = (ArgMode == 2);
24968 unsigned MaxOffset = TotalNumIntRegs * 8 +
24969 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
24971 /* Align ArgSize to a multiple of 8 */
24972 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
24973 bool NeedsAlign = (Align > 8);
24975 MachineBasicBlock *thisMBB = MBB;
24976 MachineBasicBlock *overflowMBB;
24977 MachineBasicBlock *offsetMBB;
24978 MachineBasicBlock *endMBB;
24980 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
24981 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
24982 unsigned OffsetReg = 0;
24984 if (!UseGPOffset && !UseFPOffset) {
24985 // If we only pull from the overflow region, we don't create a branch.
24986 // We don't need to alter control flow.
24987 OffsetDestReg = 0; // unused
24988 OverflowDestReg = DestReg;
24990 offsetMBB = nullptr;
24991 overflowMBB = thisMBB;
24994 // First emit code to check if gp_offset (or fp_offset) is below the bound.
24995 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
24996 // If not, pull from overflow_area. (branch to overflowMBB)
25001 // offsetMBB overflowMBB
25006 // Registers for the PHI in endMBB
25007 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
25008 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
25010 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
25011 MachineFunction *MF = MBB->getParent();
25012 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
25013 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
25014 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
25016 MachineFunction::iterator MBBIter = ++MBB->getIterator();
25018 // Insert the new basic blocks
25019 MF->insert(MBBIter, offsetMBB);
25020 MF->insert(MBBIter, overflowMBB);
25021 MF->insert(MBBIter, endMBB);
25023 // Transfer the remainder of MBB and its successor edges to endMBB.
25024 endMBB->splice(endMBB->begin(), thisMBB,
25025 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
25026 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
25028 // Make offsetMBB and overflowMBB successors of thisMBB
25029 thisMBB->addSuccessor(offsetMBB);
25030 thisMBB->addSuccessor(overflowMBB);
25032 // endMBB is a successor of both offsetMBB and overflowMBB
25033 offsetMBB->addSuccessor(endMBB);
25034 overflowMBB->addSuccessor(endMBB);
25036 // Load the offset value into a register
25037 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
25038 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
25042 .addDisp(Disp, UseFPOffset ? 4 : 0)
25044 .setMemRefs(MMOBegin, MMOEnd);
25046 // Check if there is enough room left to pull this argument.
25047 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
25049 .addImm(MaxOffset + 8 - ArgSizeA8);
25051 // Branch to "overflowMBB" if offset >= max
25052 // Fall through to "offsetMBB" otherwise
25053 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
25054 .addMBB(overflowMBB);
25057 // In offsetMBB, emit code to use the reg_save_area.
25059 assert(OffsetReg != 0);
25061 // Read the reg_save_area address.
25062 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
25063 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
25069 .setMemRefs(MMOBegin, MMOEnd);
25071 // Zero-extend the offset
25072 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
25073 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
25076 .addImm(X86::sub_32bit);
25078 // Add the offset to the reg_save_area to get the final address.
25079 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
25080 .addReg(OffsetReg64)
25081 .addReg(RegSaveReg);
25083 // Compute the offset for the next argument
25084 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
25085 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
25087 .addImm(UseFPOffset ? 16 : 8);
25089 // Store it back into the va_list.
25090 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
25094 .addDisp(Disp, UseFPOffset ? 4 : 0)
25096 .addReg(NextOffsetReg)
25097 .setMemRefs(MMOBegin, MMOEnd);
25100 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
25105 // Emit code to use overflow area
25108 // Load the overflow_area address into a register.
25109 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
25110 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
25116 .setMemRefs(MMOBegin, MMOEnd);
25118 // If we need to align it, do so. Otherwise, just copy the address
25119 // to OverflowDestReg.
25121 // Align the overflow address
25122 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
25123 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
25125 // aligned_addr = (addr + (align-1)) & ~(align-1)
25126 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
25127 .addReg(OverflowAddrReg)
25130 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
25132 .addImm(~(uint64_t)(Align-1));
25134 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
25135 .addReg(OverflowAddrReg);
25138 // Compute the next overflow address after this argument.
25139 // (the overflow address should be kept 8-byte aligned)
25140 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
25141 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
25142 .addReg(OverflowDestReg)
25143 .addImm(ArgSizeA8);
25145 // Store the new overflow address.
25146 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
25152 .addReg(NextAddrReg)
25153 .setMemRefs(MMOBegin, MMOEnd);
25155 // If we branched, emit the PHI to the front of endMBB.
25157 BuildMI(*endMBB, endMBB->begin(), DL,
25158 TII->get(X86::PHI), DestReg)
25159 .addReg(OffsetDestReg).addMBB(offsetMBB)
25160 .addReg(OverflowDestReg).addMBB(overflowMBB);
25163 // Erase the pseudo instruction
25164 MI.eraseFromParent();
25169 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
25170 MachineInstr &MI, MachineBasicBlock *MBB) const {
25171 // Emit code to save XMM registers to the stack. The ABI says that the
25172 // number of registers to save is given in %al, so it's theoretically
25173 // possible to do an indirect jump trick to avoid saving all of them,
25174 // however this code takes a simpler approach and just executes all
25175 // of the stores if %al is non-zero. It's less code, and it's probably
25176 // easier on the hardware branch predictor, and stores aren't all that
25177 // expensive anyway.
25179 // Create the new basic blocks. One block contains all the XMM stores,
25180 // and one block is the final destination regardless of whether any
25181 // stores were performed.
25182 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
25183 MachineFunction *F = MBB->getParent();
25184 MachineFunction::iterator MBBIter = ++MBB->getIterator();
25185 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
25186 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
25187 F->insert(MBBIter, XMMSaveMBB);
25188 F->insert(MBBIter, EndMBB);
25190 // Transfer the remainder of MBB and its successor edges to EndMBB.
25191 EndMBB->splice(EndMBB->begin(), MBB,
25192 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
25193 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
25195 // The original block will now fall through to the XMM save block.
25196 MBB->addSuccessor(XMMSaveMBB);
25197 // The XMMSaveMBB will fall through to the end block.
25198 XMMSaveMBB->addSuccessor(EndMBB);
25200 // Now add the instructions.
25201 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
25202 DebugLoc DL = MI.getDebugLoc();
25204 unsigned CountReg = MI.getOperand(0).getReg();
25205 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
25206 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
25208 if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
25209 // If %al is 0, branch around the XMM save block.
25210 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
25211 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
25212 MBB->addSuccessor(EndMBB);
25215 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
25216 // that was just emitted, but clearly shouldn't be "saved".
25217 assert((MI.getNumOperands() <= 3 ||
25218 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
25219 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
25220 "Expected last argument to be EFLAGS");
25221 unsigned MOVOpc = Subtarget.hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
25222 // In the XMM save block, save all the XMM argument registers.
25223 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
25224 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
25225 MachineMemOperand *MMO = F->getMachineMemOperand(
25226 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
25227 MachineMemOperand::MOStore,
25228 /*Size=*/16, /*Align=*/16);
25229 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
25230 .addFrameIndex(RegSaveFrameIndex)
25231 .addImm(/*Scale=*/1)
25232 .addReg(/*IndexReg=*/0)
25233 .addImm(/*Disp=*/Offset)
25234 .addReg(/*Segment=*/0)
25235 .addReg(MI.getOperand(i).getReg())
25236 .addMemOperand(MMO);
25239 MI.eraseFromParent(); // The pseudo instruction is gone now.
25244 // The EFLAGS operand of SelectItr might be missing a kill marker
25245 // because there were multiple uses of EFLAGS, and ISel didn't know
25246 // which to mark. Figure out whether SelectItr should have had a
25247 // kill marker, and set it if it should. Returns the correct kill
25249 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
25250 MachineBasicBlock* BB,
25251 const TargetRegisterInfo* TRI) {
25252 // Scan forward through BB for a use/def of EFLAGS.
25253 MachineBasicBlock::iterator miI(std::next(SelectItr));
25254 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
25255 const MachineInstr& mi = *miI;
25256 if (mi.readsRegister(X86::EFLAGS))
25258 if (mi.definesRegister(X86::EFLAGS))
25259 break; // Should have kill-flag - update below.
25262 // If we hit the end of the block, check whether EFLAGS is live into a
25264 if (miI == BB->end()) {
25265 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
25266 sEnd = BB->succ_end();
25267 sItr != sEnd; ++sItr) {
25268 MachineBasicBlock* succ = *sItr;
25269 if (succ->isLiveIn(X86::EFLAGS))
25274 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
25275 // out. SelectMI should have a kill flag on EFLAGS.
25276 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
25280 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
25281 // together with other CMOV pseudo-opcodes into a single basic-block with
25282 // conditional jump around it.
25283 static bool isCMOVPseudo(MachineInstr &MI) {
25284 switch (MI.getOpcode()) {
25285 case X86::CMOV_FR32:
25286 case X86::CMOV_FR64:
25287 case X86::CMOV_GR8:
25288 case X86::CMOV_GR16:
25289 case X86::CMOV_GR32:
25290 case X86::CMOV_RFP32:
25291 case X86::CMOV_RFP64:
25292 case X86::CMOV_RFP80:
25293 case X86::CMOV_V2F64:
25294 case X86::CMOV_V2I64:
25295 case X86::CMOV_V4F32:
25296 case X86::CMOV_V4F64:
25297 case X86::CMOV_V4I64:
25298 case X86::CMOV_V16F32:
25299 case X86::CMOV_V8F32:
25300 case X86::CMOV_V8F64:
25301 case X86::CMOV_V8I64:
25302 case X86::CMOV_V8I1:
25303 case X86::CMOV_V16I1:
25304 case X86::CMOV_V32I1:
25305 case X86::CMOV_V64I1:
25313 MachineBasicBlock *
25314 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
25315 MachineBasicBlock *BB) const {
25316 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
25317 DebugLoc DL = MI.getDebugLoc();
25319 // To "insert" a SELECT_CC instruction, we actually have to insert the
25320 // diamond control-flow pattern. The incoming instruction knows the
25321 // destination vreg to set, the condition code register to branch on, the
25322 // true/false values to select between, and a branch opcode to use.
25323 const BasicBlock *LLVM_BB = BB->getBasicBlock();
25324 MachineFunction::iterator It = ++BB->getIterator();
25329 // cmpTY ccX, r1, r2
25331 // fallthrough --> copy0MBB
25332 MachineBasicBlock *thisMBB = BB;
25333 MachineFunction *F = BB->getParent();
25335 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
25336 // as described above, by inserting a BB, and then making a PHI at the join
25337 // point to select the true and false operands of the CMOV in the PHI.
25339 // The code also handles two different cases of multiple CMOV opcodes
25343 // In this case, there are multiple CMOVs in a row, all which are based on
25344 // the same condition setting (or the exact opposite condition setting).
25345 // In this case we can lower all the CMOVs using a single inserted BB, and
25346 // then make a number of PHIs at the join point to model the CMOVs. The only
25347 // trickiness here, is that in a case like:
25349 // t2 = CMOV cond1 t1, f1
25350 // t3 = CMOV cond1 t2, f2
25352 // when rewriting this into PHIs, we have to perform some renaming on the
25353 // temps since you cannot have a PHI operand refer to a PHI result earlier
25354 // in the same block. The "simple" but wrong lowering would be:
25356 // t2 = PHI t1(BB1), f1(BB2)
25357 // t3 = PHI t2(BB1), f2(BB2)
25359 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
25360 // renaming is to note that on the path through BB1, t2 is really just a
25361 // copy of t1, and do that renaming, properly generating:
25363 // t2 = PHI t1(BB1), f1(BB2)
25364 // t3 = PHI t1(BB1), f2(BB2)
25366 // Case 2, we lower cascaded CMOVs such as
25368 // (CMOV (CMOV F, T, cc1), T, cc2)
25370 // to two successive branches. For that, we look for another CMOV as the
25371 // following instruction.
25373 // Without this, we would add a PHI between the two jumps, which ends up
25374 // creating a few copies all around. For instance, for
25376 // (sitofp (zext (fcmp une)))
25378 // we would generate:
25380 // ucomiss %xmm1, %xmm0
25381 // movss <1.0f>, %xmm0
25382 // movaps %xmm0, %xmm1
25384 // xorps %xmm1, %xmm1
25387 // movaps %xmm1, %xmm0
25391 // because this custom-inserter would have generated:
25403 // A: X = ...; Y = ...
25405 // C: Z = PHI [X, A], [Y, B]
25407 // E: PHI [X, C], [Z, D]
25409 // If we lower both CMOVs in a single step, we can instead generate:
25421 // A: X = ...; Y = ...
25423 // E: PHI [X, A], [X, C], [Y, D]
25425 // Which, in our sitofp/fcmp example, gives us something like:
25427 // ucomiss %xmm1, %xmm0
25428 // movss <1.0f>, %xmm0
25431 // xorps %xmm0, %xmm0
25435 MachineInstr *CascadedCMOV = nullptr;
25436 MachineInstr *LastCMOV = &MI;
25437 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
25438 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
25439 MachineBasicBlock::iterator NextMIIt =
25440 std::next(MachineBasicBlock::iterator(MI));
25442 // Check for case 1, where there are multiple CMOVs with the same condition
25443 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
25444 // number of jumps the most.
25446 if (isCMOVPseudo(MI)) {
25447 // See if we have a string of CMOVS with the same condition.
25448 while (NextMIIt != BB->end() && isCMOVPseudo(*NextMIIt) &&
25449 (NextMIIt->getOperand(3).getImm() == CC ||
25450 NextMIIt->getOperand(3).getImm() == OppCC)) {
25451 LastCMOV = &*NextMIIt;
25456 // This checks for case 2, but only do this if we didn't already find
25457 // case 1, as indicated by LastCMOV == MI.
25458 if (LastCMOV == &MI && NextMIIt != BB->end() &&
25459 NextMIIt->getOpcode() == MI.getOpcode() &&
25460 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
25461 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
25462 NextMIIt->getOperand(1).isKill()) {
25463 CascadedCMOV = &*NextMIIt;
25466 MachineBasicBlock *jcc1MBB = nullptr;
25468 // If we have a cascaded CMOV, we lower it to two successive branches to
25469 // the same block. EFLAGS is used by both, so mark it as live in the second.
25470 if (CascadedCMOV) {
25471 jcc1MBB = F->CreateMachineBasicBlock(LLVM_BB);
25472 F->insert(It, jcc1MBB);
25473 jcc1MBB->addLiveIn(X86::EFLAGS);
25476 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
25477 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
25478 F->insert(It, copy0MBB);
25479 F->insert(It, sinkMBB);
25481 // If the EFLAGS register isn't dead in the terminator, then claim that it's
25482 // live into the sink and copy blocks.
25483 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
25485 MachineInstr *LastEFLAGSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
25486 if (!LastEFLAGSUser->killsRegister(X86::EFLAGS) &&
25487 !checkAndUpdateEFLAGSKill(LastEFLAGSUser, BB, TRI)) {
25488 copy0MBB->addLiveIn(X86::EFLAGS);
25489 sinkMBB->addLiveIn(X86::EFLAGS);
25492 // Transfer the remainder of BB and its successor edges to sinkMBB.
25493 sinkMBB->splice(sinkMBB->begin(), BB,
25494 std::next(MachineBasicBlock::iterator(LastCMOV)), BB->end());
25495 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
25497 // Add the true and fallthrough blocks as its successors.
25498 if (CascadedCMOV) {
25499 // The fallthrough block may be jcc1MBB, if we have a cascaded CMOV.
25500 BB->addSuccessor(jcc1MBB);
25502 // In that case, jcc1MBB will itself fallthrough the copy0MBB, and
25503 // jump to the sinkMBB.
25504 jcc1MBB->addSuccessor(copy0MBB);
25505 jcc1MBB->addSuccessor(sinkMBB);
25507 BB->addSuccessor(copy0MBB);
25510 // The true block target of the first (or only) branch is always sinkMBB.
25511 BB->addSuccessor(sinkMBB);
25513 // Create the conditional branch instruction.
25514 unsigned Opc = X86::GetCondBranchFromCond(CC);
25515 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
25517 if (CascadedCMOV) {
25518 unsigned Opc2 = X86::GetCondBranchFromCond(
25519 (X86::CondCode)CascadedCMOV->getOperand(3).getImm());
25520 BuildMI(jcc1MBB, DL, TII->get(Opc2)).addMBB(sinkMBB);
25524 // %FalseValue = ...
25525 // # fallthrough to sinkMBB
25526 copy0MBB->addSuccessor(sinkMBB);
25529 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
25531 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
25532 MachineBasicBlock::iterator MIItEnd =
25533 std::next(MachineBasicBlock::iterator(LastCMOV));
25534 MachineBasicBlock::iterator SinkInsertionPoint = sinkMBB->begin();
25535 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
25536 MachineInstrBuilder MIB;
25538 // As we are creating the PHIs, we have to be careful if there is more than
25539 // one. Later CMOVs may reference the results of earlier CMOVs, but later
25540 // PHIs have to reference the individual true/false inputs from earlier PHIs.
25541 // That also means that PHI construction must work forward from earlier to
25542 // later, and that the code must maintain a mapping from earlier PHI's
25543 // destination registers, and the registers that went into the PHI.
25545 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
25546 unsigned DestReg = MIIt->getOperand(0).getReg();
25547 unsigned Op1Reg = MIIt->getOperand(1).getReg();
25548 unsigned Op2Reg = MIIt->getOperand(2).getReg();
25550 // If this CMOV we are generating is the opposite condition from
25551 // the jump we generated, then we have to swap the operands for the
25552 // PHI that is going to be generated.
25553 if (MIIt->getOperand(3).getImm() == OppCC)
25554 std::swap(Op1Reg, Op2Reg);
25556 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
25557 Op1Reg = RegRewriteTable[Op1Reg].first;
25559 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
25560 Op2Reg = RegRewriteTable[Op2Reg].second;
25562 MIB = BuildMI(*sinkMBB, SinkInsertionPoint, DL,
25563 TII->get(X86::PHI), DestReg)
25564 .addReg(Op1Reg).addMBB(copy0MBB)
25565 .addReg(Op2Reg).addMBB(thisMBB);
25567 // Add this PHI to the rewrite table.
25568 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
25571 // If we have a cascaded CMOV, the second Jcc provides the same incoming
25572 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
25573 if (CascadedCMOV) {
25574 MIB.addReg(MI.getOperand(2).getReg()).addMBB(jcc1MBB);
25575 // Copy the PHI result to the register defined by the second CMOV.
25576 BuildMI(*sinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
25577 DL, TII->get(TargetOpcode::COPY),
25578 CascadedCMOV->getOperand(0).getReg())
25579 .addReg(MI.getOperand(0).getReg());
25580 CascadedCMOV->eraseFromParent();
25583 // Now remove the CMOV(s).
25584 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; )
25585 (MIIt++)->eraseFromParent();
25590 MachineBasicBlock *
25591 X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI,
25592 MachineBasicBlock *BB) const {
25593 // Combine the following atomic floating-point modification pattern:
25594 // a.store(reg OP a.load(acquire), release)
25595 // Transform them into:
25596 // OPss (%gpr), %xmm
25597 // movss %xmm, (%gpr)
25598 // Or sd equivalent for 64-bit operations.
25600 switch (MI.getOpcode()) {
25601 default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
25602 case X86::RELEASE_FADD32mr:
25603 FOp = X86::ADDSSrm;
25604 MOp = X86::MOVSSmr;
25606 case X86::RELEASE_FADD64mr:
25607 FOp = X86::ADDSDrm;
25608 MOp = X86::MOVSDmr;
25611 const X86InstrInfo *TII = Subtarget.getInstrInfo();
25612 DebugLoc DL = MI.getDebugLoc();
25613 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
25614 unsigned ValOpIdx = X86::AddrNumOperands;
25615 unsigned VSrc = MI.getOperand(ValOpIdx).getReg();
25616 MachineInstrBuilder MIB =
25617 BuildMI(*BB, MI, DL, TII->get(FOp),
25618 MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
25620 for (int i = 0; i < X86::AddrNumOperands; ++i) {
25621 MachineOperand &Operand = MI.getOperand(i);
25622 // Clear any kill flags on register operands as we'll create a second
25623 // instruction using the same address operands.
25624 if (Operand.isReg())
25625 Operand.setIsKill(false);
25628 MachineInstr *FOpMI = MIB;
25629 MIB = BuildMI(*BB, MI, DL, TII->get(MOp));
25630 for (int i = 0; i < X86::AddrNumOperands; ++i)
25631 MIB.add(MI.getOperand(i));
25632 MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill);
25633 MI.eraseFromParent(); // The pseudo instruction is gone now.
25637 MachineBasicBlock *
25638 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
25639 MachineBasicBlock *BB) const {
25640 MachineFunction *MF = BB->getParent();
25641 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
25642 DebugLoc DL = MI.getDebugLoc();
25643 const BasicBlock *LLVM_BB = BB->getBasicBlock();
25645 assert(MF->shouldSplitStack());
25647 const bool Is64Bit = Subtarget.is64Bit();
25648 const bool IsLP64 = Subtarget.isTarget64BitLP64();
25650 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
25651 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
25654 // ... [Till the alloca]
25655 // If stacklet is not large enough, jump to mallocMBB
25658 // Allocate by subtracting from RSP
25659 // Jump to continueMBB
25662 // Allocate by call to runtime
25666 // [rest of original BB]
25669 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
25670 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
25671 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
25673 MachineRegisterInfo &MRI = MF->getRegInfo();
25674 const TargetRegisterClass *AddrRegClass =
25675 getRegClassFor(getPointerTy(MF->getDataLayout()));
25677 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
25678 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
25679 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
25680 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
25681 sizeVReg = MI.getOperand(1).getReg(),
25683 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
25685 MachineFunction::iterator MBBIter = ++BB->getIterator();
25687 MF->insert(MBBIter, bumpMBB);
25688 MF->insert(MBBIter, mallocMBB);
25689 MF->insert(MBBIter, continueMBB);
25691 continueMBB->splice(continueMBB->begin(), BB,
25692 std::next(MachineBasicBlock::iterator(MI)), BB->end());
25693 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
25695 // Add code to the main basic block to check if the stack limit has been hit,
25696 // and if so, jump to mallocMBB otherwise to bumpMBB.
25697 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
25698 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
25699 .addReg(tmpSPVReg).addReg(sizeVReg);
25700 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
25701 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
25702 .addReg(SPLimitVReg);
25703 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
25705 // bumpMBB simply decreases the stack pointer, since we know the current
25706 // stacklet has enough space.
25707 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
25708 .addReg(SPLimitVReg);
25709 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
25710 .addReg(SPLimitVReg);
25711 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
25713 // Calls into a routine in libgcc to allocate more space from the heap.
25714 const uint32_t *RegMask =
25715 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
25717 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
25719 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
25720 .addExternalSymbol("__morestack_allocate_stack_space")
25721 .addRegMask(RegMask)
25722 .addReg(X86::RDI, RegState::Implicit)
25723 .addReg(X86::RAX, RegState::ImplicitDefine);
25724 } else if (Is64Bit) {
25725 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
25727 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
25728 .addExternalSymbol("__morestack_allocate_stack_space")
25729 .addRegMask(RegMask)
25730 .addReg(X86::EDI, RegState::Implicit)
25731 .addReg(X86::EAX, RegState::ImplicitDefine);
25733 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
25735 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
25736 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
25737 .addExternalSymbol("__morestack_allocate_stack_space")
25738 .addRegMask(RegMask)
25739 .addReg(X86::EAX, RegState::ImplicitDefine);
25743 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
25746 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
25747 .addReg(IsLP64 ? X86::RAX : X86::EAX);
25748 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
25750 // Set up the CFG correctly.
25751 BB->addSuccessor(bumpMBB);
25752 BB->addSuccessor(mallocMBB);
25753 mallocMBB->addSuccessor(continueMBB);
25754 bumpMBB->addSuccessor(continueMBB);
25756 // Take care of the PHI nodes.
25757 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
25758 MI.getOperand(0).getReg())
25759 .addReg(mallocPtrVReg)
25761 .addReg(bumpSPPtrVReg)
25764 // Delete the original pseudo instruction.
25765 MI.eraseFromParent();
25768 return continueMBB;
25771 MachineBasicBlock *
25772 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
25773 MachineBasicBlock *BB) const {
25774 MachineFunction *MF = BB->getParent();
25775 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
25776 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
25777 DebugLoc DL = MI.getDebugLoc();
25779 assert(!isAsynchronousEHPersonality(
25780 classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
25781 "SEH does not use catchret!");
25783 // Only 32-bit EH needs to worry about manually restoring stack pointers.
25784 if (!Subtarget.is32Bit())
25787 // C++ EH creates a new target block to hold the restore code, and wires up
25788 // the new block to the return destination with a normal JMP_4.
25789 MachineBasicBlock *RestoreMBB =
25790 MF->CreateMachineBasicBlock(BB->getBasicBlock());
25791 assert(BB->succ_size() == 1);
25792 MF->insert(std::next(BB->getIterator()), RestoreMBB);
25793 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
25794 BB->addSuccessor(RestoreMBB);
25795 MI.getOperand(0).setMBB(RestoreMBB);
25797 auto RestoreMBBI = RestoreMBB->begin();
25798 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
25799 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
25803 MachineBasicBlock *
25804 X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
25805 MachineBasicBlock *BB) const {
25806 MachineFunction *MF = BB->getParent();
25807 const Constant *PerFn = MF->getFunction()->getPersonalityFn();
25808 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
25809 // Only 32-bit SEH requires special handling for catchpad.
25810 if (IsSEH && Subtarget.is32Bit()) {
25811 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
25812 DebugLoc DL = MI.getDebugLoc();
25813 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
25815 MI.eraseFromParent();
25819 MachineBasicBlock *
25820 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
25821 MachineBasicBlock *BB) const {
25822 // So, here we replace TLSADDR with the sequence:
25823 // adjust_stackdown -> TLSADDR -> adjust_stackup.
25824 // We need this because TLSADDR is lowered into calls
25825 // inside MC, therefore without the two markers shrink-wrapping
25826 // may push the prologue/epilogue pass them.
25827 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
25828 DebugLoc DL = MI.getDebugLoc();
25829 MachineFunction &MF = *BB->getParent();
25831 // Emit CALLSEQ_START right before the instruction.
25832 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
25833 MachineInstrBuilder CallseqStart =
25834 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
25835 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
25837 // Emit CALLSEQ_END right after the instruction.
25838 // We don't call erase from parent because we want to keep the
25839 // original instruction around.
25840 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
25841 MachineInstrBuilder CallseqEnd =
25842 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
25843 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
25848 MachineBasicBlock *
25849 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
25850 MachineBasicBlock *BB) const {
25851 // This is pretty easy. We're taking the value that we received from
25852 // our load from the relocation, sticking it in either RDI (x86-64)
25853 // or EAX and doing an indirect call. The return value will then
25854 // be in the normal return register.
25855 MachineFunction *F = BB->getParent();
25856 const X86InstrInfo *TII = Subtarget.getInstrInfo();
25857 DebugLoc DL = MI.getDebugLoc();
25859 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
25860 assert(MI.getOperand(3).isGlobal() && "This should be a global");
25862 // Get a register mask for the lowered call.
25863 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
25864 // proper register mask.
25865 const uint32_t *RegMask =
25866 Subtarget.is64Bit() ?
25867 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
25868 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
25869 if (Subtarget.is64Bit()) {
25870 MachineInstrBuilder MIB =
25871 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
25875 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
25876 MI.getOperand(3).getTargetFlags())
25878 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
25879 addDirectMem(MIB, X86::RDI);
25880 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
25881 } else if (!isPositionIndependent()) {
25882 MachineInstrBuilder MIB =
25883 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
25887 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
25888 MI.getOperand(3).getTargetFlags())
25890 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
25891 addDirectMem(MIB, X86::EAX);
25892 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
25894 MachineInstrBuilder MIB =
25895 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
25896 .addReg(TII->getGlobalBaseReg(F))
25899 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
25900 MI.getOperand(3).getTargetFlags())
25902 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
25903 addDirectMem(MIB, X86::EAX);
25904 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
25907 MI.eraseFromParent(); // The pseudo instruction is gone now.
25911 MachineBasicBlock *
25912 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
25913 MachineBasicBlock *MBB) const {
25914 DebugLoc DL = MI.getDebugLoc();
25915 MachineFunction *MF = MBB->getParent();
25916 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
25917 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
25918 MachineRegisterInfo &MRI = MF->getRegInfo();
25920 const BasicBlock *BB = MBB->getBasicBlock();
25921 MachineFunction::iterator I = ++MBB->getIterator();
25923 // Memory Reference
25924 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
25925 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
25928 unsigned MemOpndSlot = 0;
25930 unsigned CurOp = 0;
25932 DstReg = MI.getOperand(CurOp++).getReg();
25933 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
25934 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
25936 unsigned mainDstReg = MRI.createVirtualRegister(RC);
25937 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
25939 MemOpndSlot = CurOp;
25941 MVT PVT = getPointerTy(MF->getDataLayout());
25942 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
25943 "Invalid Pointer Size!");
25945 // For v = setjmp(buf), we generate
25948 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
25949 // SjLjSetup restoreMBB
25955 // v = phi(main, restore)
25958 // if base pointer being used, load it from frame
25961 MachineBasicBlock *thisMBB = MBB;
25962 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
25963 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
25964 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
25965 MF->insert(I, mainMBB);
25966 MF->insert(I, sinkMBB);
25967 MF->push_back(restoreMBB);
25968 restoreMBB->setHasAddressTaken();
25970 MachineInstrBuilder MIB;
25972 // Transfer the remainder of BB and its successor edges to sinkMBB.
25973 sinkMBB->splice(sinkMBB->begin(), MBB,
25974 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
25975 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
25978 unsigned PtrStoreOpc = 0;
25979 unsigned LabelReg = 0;
25980 const int64_t LabelOffset = 1 * PVT.getStoreSize();
25981 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
25982 !isPositionIndependent();
25984 // Prepare IP either in reg or imm.
25985 if (!UseImmLabel) {
25986 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
25987 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
25988 LabelReg = MRI.createVirtualRegister(PtrRC);
25989 if (Subtarget.is64Bit()) {
25990 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
25994 .addMBB(restoreMBB)
25997 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
25998 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
25999 .addReg(XII->getGlobalBaseReg(MF))
26002 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
26006 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
26008 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
26009 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
26010 if (i == X86::AddrDisp)
26011 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
26013 MIB.add(MI.getOperand(MemOpndSlot + i));
26016 MIB.addReg(LabelReg);
26018 MIB.addMBB(restoreMBB);
26019 MIB.setMemRefs(MMOBegin, MMOEnd);
26021 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
26022 .addMBB(restoreMBB);
26024 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26025 MIB.addRegMask(RegInfo->getNoPreservedMask());
26026 thisMBB->addSuccessor(mainMBB);
26027 thisMBB->addSuccessor(restoreMBB);
26031 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
26032 mainMBB->addSuccessor(sinkMBB);
26035 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
26036 TII->get(X86::PHI), DstReg)
26037 .addReg(mainDstReg).addMBB(mainMBB)
26038 .addReg(restoreDstReg).addMBB(restoreMBB);
26041 if (RegInfo->hasBasePointer(*MF)) {
26042 const bool Uses64BitFramePtr =
26043 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
26044 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
26045 X86FI->setRestoreBasePointer(MF);
26046 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
26047 unsigned BasePtr = RegInfo->getBaseRegister();
26048 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
26049 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
26050 FramePtr, true, X86FI->getRestoreBasePointerOffset())
26051 .setMIFlag(MachineInstr::FrameSetup);
26053 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
26054 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
26055 restoreMBB->addSuccessor(sinkMBB);
26057 MI.eraseFromParent();
26061 MachineBasicBlock *
26062 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
26063 MachineBasicBlock *MBB) const {
26064 DebugLoc DL = MI.getDebugLoc();
26065 MachineFunction *MF = MBB->getParent();
26066 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
26067 MachineRegisterInfo &MRI = MF->getRegInfo();
26069 // Memory Reference
26070 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
26071 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
26073 MVT PVT = getPointerTy(MF->getDataLayout());
26074 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
26075 "Invalid Pointer Size!");
26077 const TargetRegisterClass *RC =
26078 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
26079 unsigned Tmp = MRI.createVirtualRegister(RC);
26080 // Since FP is only updated here but NOT referenced, it's treated as GPR.
26081 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26082 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
26083 unsigned SP = RegInfo->getStackRegister();
26085 MachineInstrBuilder MIB;
26087 const int64_t LabelOffset = 1 * PVT.getStoreSize();
26088 const int64_t SPOffset = 2 * PVT.getStoreSize();
26090 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
26091 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
26094 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
26095 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
26096 MIB.add(MI.getOperand(i));
26097 MIB.setMemRefs(MMOBegin, MMOEnd);
26099 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
26100 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
26101 if (i == X86::AddrDisp)
26102 MIB.addDisp(MI.getOperand(i), LabelOffset);
26104 MIB.add(MI.getOperand(i));
26106 MIB.setMemRefs(MMOBegin, MMOEnd);
26108 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
26109 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
26110 if (i == X86::AddrDisp)
26111 MIB.addDisp(MI.getOperand(i), SPOffset);
26113 MIB.add(MI.getOperand(i));
26115 MIB.setMemRefs(MMOBegin, MMOEnd);
26117 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
26119 MI.eraseFromParent();
26123 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
26124 MachineBasicBlock *MBB,
26125 MachineBasicBlock *DispatchBB,
26127 DebugLoc DL = MI.getDebugLoc();
26128 MachineFunction *MF = MBB->getParent();
26129 MachineRegisterInfo *MRI = &MF->getRegInfo();
26130 const X86InstrInfo *TII = Subtarget.getInstrInfo();
26132 MVT PVT = getPointerTy(MF->getDataLayout());
26133 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
26138 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
26139 !isPositionIndependent();
26142 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
26144 const TargetRegisterClass *TRC =
26145 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
26146 VR = MRI->createVirtualRegister(TRC);
26147 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
26149 if (Subtarget.is64Bit())
26150 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
26154 .addMBB(DispatchBB)
26157 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
26158 .addReg(0) /* TII->getGlobalBaseReg(MF) */
26161 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
26165 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
26166 addFrameReference(MIB, FI, 36);
26168 MIB.addMBB(DispatchBB);
26173 MachineBasicBlock *
26174 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
26175 MachineBasicBlock *BB) const {
26176 DebugLoc DL = MI.getDebugLoc();
26177 MachineFunction *MF = BB->getParent();
26178 MachineFrameInfo &MFI = MF->getFrameInfo();
26179 MachineRegisterInfo *MRI = &MF->getRegInfo();
26180 const X86InstrInfo *TII = Subtarget.getInstrInfo();
26181 int FI = MFI.getFunctionContextIndex();
26183 // Get a mapping of the call site numbers to all of the landing pads they're
26184 // associated with.
26185 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
26186 unsigned MaxCSNum = 0;
26187 for (auto &MBB : *MF) {
26188 if (!MBB.isEHPad())
26191 MCSymbol *Sym = nullptr;
26192 for (const auto &MI : MBB) {
26193 if (MI.isDebugValue())
26196 assert(MI.isEHLabel() && "expected EH_LABEL");
26197 Sym = MI.getOperand(0).getMCSymbol();
26201 if (!MF->hasCallSiteLandingPad(Sym))
26204 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
26205 CallSiteNumToLPad[CSI].push_back(&MBB);
26206 MaxCSNum = std::max(MaxCSNum, CSI);
26210 // Get an ordered list of the machine basic blocks for the jump table.
26211 std::vector<MachineBasicBlock *> LPadList;
26212 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
26213 LPadList.reserve(CallSiteNumToLPad.size());
26215 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
26216 for (auto &LP : CallSiteNumToLPad[CSI]) {
26217 LPadList.push_back(LP);
26218 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
26222 assert(!LPadList.empty() &&
26223 "No landing pad destinations for the dispatch jump table!");
26225 // Create the MBBs for the dispatch code.
26227 // Shove the dispatch's address into the return slot in the function context.
26228 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
26229 DispatchBB->setIsEHPad(true);
26231 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
26232 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
26233 DispatchBB->addSuccessor(TrapBB);
26235 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
26236 DispatchBB->addSuccessor(DispContBB);
26239 MF->push_back(DispatchBB);
26240 MF->push_back(DispContBB);
26241 MF->push_back(TrapBB);
26243 // Insert code into the entry block that creates and registers the function
26245 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
26247 // Create the jump table and associated information
26248 MachineJumpTableInfo *JTI =
26249 MF->getOrCreateJumpTableInfo(getJumpTableEncoding());
26250 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
26252 const X86RegisterInfo &RI = TII->getRegisterInfo();
26253 // Add a register mask with no preserved registers. This results in all
26254 // registers being marked as clobbered.
26255 if (RI.hasBasePointer(*MF)) {
26256 const bool FPIs64Bit =
26257 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
26258 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
26259 MFI->setRestoreBasePointer(MF);
26261 unsigned FP = RI.getFrameRegister(*MF);
26262 unsigned BP = RI.getBaseRegister();
26263 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
26264 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
26265 MFI->getRestoreBasePointerOffset())
26266 .addRegMask(RI.getNoPreservedMask());
26268 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
26269 .addRegMask(RI.getNoPreservedMask());
26272 unsigned IReg = MRI->createVirtualRegister(&X86::GR32RegClass);
26273 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
26275 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
26277 .addImm(LPadList.size());
26278 BuildMI(DispatchBB, DL, TII->get(X86::JA_1)).addMBB(TrapBB);
26280 unsigned JReg = MRI->createVirtualRegister(&X86::GR32RegClass);
26281 BuildMI(DispContBB, DL, TII->get(X86::SUB32ri), JReg)
26284 BuildMI(DispContBB, DL,
26285 TII->get(Subtarget.is64Bit() ? X86::JMP64m : X86::JMP32m))
26287 .addImm(Subtarget.is64Bit() ? 8 : 4)
26289 .addJumpTableIndex(MJTI)
26292 // Add the jump table entries as successors to the MBB.
26293 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
26294 for (auto &LP : LPadList)
26295 if (SeenMBBs.insert(LP).second)
26296 DispContBB->addSuccessor(LP);
26298 // N.B. the order the invoke BBs are processed in doesn't matter here.
26299 SmallVector<MachineBasicBlock *, 64> MBBLPads;
26300 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
26301 for (MachineBasicBlock *MBB : InvokeBBs) {
26302 // Remove the landing pad successor from the invoke block and replace it
26303 // with the new dispatch block.
26304 // Keep a copy of Successors since it's modified inside the loop.
26305 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
26307 // FIXME: Avoid quadratic complexity.
26308 for (auto MBBS : Successors) {
26309 if (MBBS->isEHPad()) {
26310 MBB->removeSuccessor(MBBS);
26311 MBBLPads.push_back(MBBS);
26315 MBB->addSuccessor(DispatchBB);
26317 // Find the invoke call and mark all of the callee-saved registers as
26318 // 'implicit defined' so that they're spilled. This prevents code from
26319 // moving instructions to before the EH block, where they will never be
26321 for (auto &II : reverse(*MBB)) {
26325 DenseMap<unsigned, bool> DefRegs;
26326 for (auto &MOp : II.operands())
26328 DefRegs[MOp.getReg()] = true;
26330 MachineInstrBuilder MIB(*MF, &II);
26331 for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
26332 unsigned Reg = SavedRegs[RI];
26334 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
26341 // Mark all former landing pads as non-landing pads. The dispatch is the only
26342 // landing pad now.
26343 for (auto &LP : MBBLPads)
26344 LP->setIsEHPad(false);
26346 // The instruction is gone now.
26347 MI.eraseFromParent();
26351 MachineBasicBlock *
26352 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
26353 MachineBasicBlock *BB) const {
26354 MachineFunction *MF = BB->getParent();
26355 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
26356 DebugLoc DL = MI.getDebugLoc();
26358 switch (MI.getOpcode()) {
26359 default: llvm_unreachable("Unexpected instr type to insert");
26360 case X86::TAILJMPd64:
26361 case X86::TAILJMPr64:
26362 case X86::TAILJMPm64:
26363 case X86::TAILJMPr64_REX:
26364 case X86::TAILJMPm64_REX:
26365 llvm_unreachable("TAILJMP64 would not be touched here.");
26366 case X86::TCRETURNdi64:
26367 case X86::TCRETURNri64:
26368 case X86::TCRETURNmi64:
26370 case X86::TLS_addr32:
26371 case X86::TLS_addr64:
26372 case X86::TLS_base_addr32:
26373 case X86::TLS_base_addr64:
26374 return EmitLoweredTLSAddr(MI, BB);
26375 case X86::CATCHRET:
26376 return EmitLoweredCatchRet(MI, BB);
26377 case X86::CATCHPAD:
26378 return EmitLoweredCatchPad(MI, BB);
26379 case X86::SEG_ALLOCA_32:
26380 case X86::SEG_ALLOCA_64:
26381 return EmitLoweredSegAlloca(MI, BB);
26382 case X86::TLSCall_32:
26383 case X86::TLSCall_64:
26384 return EmitLoweredTLSCall(MI, BB);
26385 case X86::CMOV_FR32:
26386 case X86::CMOV_FR64:
26387 case X86::CMOV_FR128:
26388 case X86::CMOV_GR8:
26389 case X86::CMOV_GR16:
26390 case X86::CMOV_GR32:
26391 case X86::CMOV_RFP32:
26392 case X86::CMOV_RFP64:
26393 case X86::CMOV_RFP80:
26394 case X86::CMOV_V2F64:
26395 case X86::CMOV_V2I64:
26396 case X86::CMOV_V4F32:
26397 case X86::CMOV_V4F64:
26398 case X86::CMOV_V4I64:
26399 case X86::CMOV_V16F32:
26400 case X86::CMOV_V8F32:
26401 case X86::CMOV_V8F64:
26402 case X86::CMOV_V8I64:
26403 case X86::CMOV_V8I1:
26404 case X86::CMOV_V16I1:
26405 case X86::CMOV_V32I1:
26406 case X86::CMOV_V64I1:
26407 return EmitLoweredSelect(MI, BB);
26409 case X86::RDFLAGS32:
26410 case X86::RDFLAGS64: {
26412 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
26413 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
26414 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
26415 // Permit reads of the FLAGS register without it being defined.
26416 // This intrinsic exists to read external processor state in flags, such as
26417 // the trap flag, interrupt flag, and direction flag, none of which are
26418 // modeled by the backend.
26419 Push->getOperand(2).setIsUndef();
26420 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
26422 MI.eraseFromParent(); // The pseudo is gone now.
26426 case X86::WRFLAGS32:
26427 case X86::WRFLAGS64: {
26429 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
26431 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
26432 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
26433 BuildMI(*BB, MI, DL, TII->get(PopF));
26435 MI.eraseFromParent(); // The pseudo is gone now.
26439 case X86::RELEASE_FADD32mr:
26440 case X86::RELEASE_FADD64mr:
26441 return EmitLoweredAtomicFP(MI, BB);
26443 case X86::FP32_TO_INT16_IN_MEM:
26444 case X86::FP32_TO_INT32_IN_MEM:
26445 case X86::FP32_TO_INT64_IN_MEM:
26446 case X86::FP64_TO_INT16_IN_MEM:
26447 case X86::FP64_TO_INT32_IN_MEM:
26448 case X86::FP64_TO_INT64_IN_MEM:
26449 case X86::FP80_TO_INT16_IN_MEM:
26450 case X86::FP80_TO_INT32_IN_MEM:
26451 case X86::FP80_TO_INT64_IN_MEM: {
26452 // Change the floating point control register to use "round towards zero"
26453 // mode when truncating to an integer value.
26454 int CWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
26455 addFrameReference(BuildMI(*BB, MI, DL,
26456 TII->get(X86::FNSTCW16m)), CWFrameIdx);
26458 // Load the old value of the high byte of the control word...
26460 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
26461 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
26464 // Set the high part to be round to zero...
26465 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
26468 // Reload the modified control word now...
26469 addFrameReference(BuildMI(*BB, MI, DL,
26470 TII->get(X86::FLDCW16m)), CWFrameIdx);
26472 // Restore the memory image of control word to original value
26473 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
26476 // Get the X86 opcode to use.
26478 switch (MI.getOpcode()) {
26479 default: llvm_unreachable("illegal opcode!");
26480 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
26481 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
26482 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
26483 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
26484 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
26485 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
26486 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
26487 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
26488 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
26491 X86AddressMode AM = getAddressFromInstr(&MI, 0);
26492 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
26493 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
26495 // Reload the original control word now.
26496 addFrameReference(BuildMI(*BB, MI, DL,
26497 TII->get(X86::FLDCW16m)), CWFrameIdx);
26499 MI.eraseFromParent(); // The pseudo instruction is gone now.
26502 // String/text processing lowering.
26503 case X86::PCMPISTRM128REG:
26504 case X86::VPCMPISTRM128REG:
26505 case X86::PCMPISTRM128MEM:
26506 case X86::VPCMPISTRM128MEM:
26507 case X86::PCMPESTRM128REG:
26508 case X86::VPCMPESTRM128REG:
26509 case X86::PCMPESTRM128MEM:
26510 case X86::VPCMPESTRM128MEM:
26511 assert(Subtarget.hasSSE42() &&
26512 "Target must have SSE4.2 or AVX features enabled");
26513 return emitPCMPSTRM(MI, BB, Subtarget.getInstrInfo());
26515 // String/text processing lowering.
26516 case X86::PCMPISTRIREG:
26517 case X86::VPCMPISTRIREG:
26518 case X86::PCMPISTRIMEM:
26519 case X86::VPCMPISTRIMEM:
26520 case X86::PCMPESTRIREG:
26521 case X86::VPCMPESTRIREG:
26522 case X86::PCMPESTRIMEM:
26523 case X86::VPCMPESTRIMEM:
26524 assert(Subtarget.hasSSE42() &&
26525 "Target must have SSE4.2 or AVX features enabled");
26526 return emitPCMPSTRI(MI, BB, Subtarget.getInstrInfo());
26528 // Thread synchronization.
26530 return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr);
26531 case X86::MONITORX:
26532 return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr);
26536 return emitClzero(&MI, BB, Subtarget);
26540 return emitWRPKRU(MI, BB, Subtarget);
26542 return emitRDPKRU(MI, BB, Subtarget);
26545 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
26547 case X86::VASTART_SAVE_XMM_REGS:
26548 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
26550 case X86::VAARG_64:
26551 return EmitVAARG64WithCustomInserter(MI, BB);
26553 case X86::EH_SjLj_SetJmp32:
26554 case X86::EH_SjLj_SetJmp64:
26555 return emitEHSjLjSetJmp(MI, BB);
26557 case X86::EH_SjLj_LongJmp32:
26558 case X86::EH_SjLj_LongJmp64:
26559 return emitEHSjLjLongJmp(MI, BB);
26561 case X86::Int_eh_sjlj_setup_dispatch:
26562 return EmitSjLjDispatchBlock(MI, BB);
26564 case TargetOpcode::STATEPOINT:
26565 // As an implementation detail, STATEPOINT shares the STACKMAP format at
26566 // this point in the process. We diverge later.
26567 return emitPatchPoint(MI, BB);
26569 case TargetOpcode::STACKMAP:
26570 case TargetOpcode::PATCHPOINT:
26571 return emitPatchPoint(MI, BB);
26573 case TargetOpcode::PATCHABLE_EVENT_CALL:
26574 // Do nothing here, handle in xray instrumentation pass.
26577 case X86::LCMPXCHG8B: {
26578 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
26579 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
26580 // requires a memory operand. If it happens that current architecture is
26581 // i686 and for current function we need a base pointer
26582 // - which is ESI for i686 - register allocator would not be able to
26583 // allocate registers for an address in form of X(%reg, %reg, Y)
26584 // - there never would be enough unreserved registers during regalloc
26585 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
26586 // We are giving a hand to register allocator by precomputing the address in
26587 // a new vreg using LEA.
26589 // If it is not i686 or there is no base pointer - nothing to do here.
26590 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
26593 // Even though this code does not necessarily needs the base pointer to
26594 // be ESI, we check for that. The reason: if this assert fails, there are
26595 // some changes happened in the compiler base pointer handling, which most
26596 // probably have to be addressed somehow here.
26597 assert(TRI->getBaseRegister() == X86::ESI &&
26598 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
26599 "base pointer in mind");
26601 MachineRegisterInfo &MRI = MF->getRegInfo();
26602 MVT SPTy = getPointerTy(MF->getDataLayout());
26603 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
26604 unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
26606 X86AddressMode AM = getAddressFromInstr(&MI, 0);
26607 // Regalloc does not need any help when the memory operand of CMPXCHG8B
26608 // does not use index register.
26609 if (AM.IndexReg == X86::NoRegister)
26612 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
26613 // four operand definitions that are E[ABCD] registers. We skip them and
26614 // then insert the LEA.
26615 MachineBasicBlock::iterator MBBI(MI);
26616 while (MBBI->definesRegister(X86::EAX) || MBBI->definesRegister(X86::EBX) ||
26617 MBBI->definesRegister(X86::ECX) || MBBI->definesRegister(X86::EDX))
26620 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
26622 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
26626 case X86::LCMPXCHG16B:
26628 case X86::LCMPXCHG8B_SAVE_EBX:
26629 case X86::LCMPXCHG16B_SAVE_RBX: {
26631 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
26632 if (!BB->isLiveIn(BasePtr))
26633 BB->addLiveIn(BasePtr);
26639 //===----------------------------------------------------------------------===//
26640 // X86 Optimization Hooks
26641 //===----------------------------------------------------------------------===//
26643 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
26645 const APInt &DemandedElts,
26646 const SelectionDAG &DAG,
26647 unsigned Depth) const {
26648 unsigned BitWidth = Known.getBitWidth();
26649 unsigned Opc = Op.getOpcode();
26650 EVT VT = Op.getValueType();
26651 assert((Opc >= ISD::BUILTIN_OP_END ||
26652 Opc == ISD::INTRINSIC_WO_CHAIN ||
26653 Opc == ISD::INTRINSIC_W_CHAIN ||
26654 Opc == ISD::INTRINSIC_VOID) &&
26655 "Should use MaskedValueIsZero if you don't know whether Op"
26656 " is a target node!");
26672 // These nodes' second result is a boolean.
26673 if (Op.getResNo() == 0)
26676 case X86ISD::SETCC:
26677 Known.Zero.setBitsFrom(1);
26679 case X86ISD::MOVMSK: {
26680 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
26681 Known.Zero.setBitsFrom(NumLoBits);
26684 case X86ISD::VSHLI:
26685 case X86ISD::VSRLI: {
26686 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26687 if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
26688 Known.setAllZero();
26692 DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
26693 unsigned ShAmt = ShiftImm->getZExtValue();
26694 if (Opc == X86ISD::VSHLI) {
26695 Known.Zero <<= ShAmt;
26696 Known.One <<= ShAmt;
26697 // Low bits are known zero.
26698 Known.Zero.setLowBits(ShAmt);
26700 Known.Zero.lshrInPlace(ShAmt);
26701 Known.One.lshrInPlace(ShAmt);
26702 // High bits are known zero.
26703 Known.Zero.setHighBits(ShAmt);
26708 case X86ISD::VZEXT: {
26709 SDValue N0 = Op.getOperand(0);
26710 unsigned NumElts = VT.getVectorNumElements();
26712 EVT SrcVT = N0.getValueType();
26713 unsigned InNumElts = SrcVT.getVectorNumElements();
26714 unsigned InBitWidth = SrcVT.getScalarSizeInBits();
26715 assert(InNumElts >= NumElts && "Illegal VZEXT input");
26717 Known = KnownBits(InBitWidth);
26718 APInt DemandedSrcElts = APInt::getLowBitsSet(InNumElts, NumElts);
26719 DAG.computeKnownBits(N0, Known, DemandedSrcElts, Depth + 1);
26720 Known = Known.zext(BitWidth);
26721 Known.Zero.setBitsFrom(InBitWidth);
26727 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
26728 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
26729 unsigned Depth) const {
26730 unsigned VTBits = Op.getScalarValueSizeInBits();
26731 unsigned Opcode = Op.getOpcode();
26733 case X86ISD::SETCC_CARRY:
26734 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
26737 case X86ISD::VSEXT: {
26738 SDValue Src = Op.getOperand(0);
26739 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
26740 Tmp += VTBits - Src.getScalarValueSizeInBits();
26744 case X86ISD::VSRAI: {
26745 SDValue Src = Op.getOperand(0);
26746 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
26747 APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
26749 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
26752 case X86ISD::PCMPGT:
26753 case X86ISD::PCMPEQ:
26755 case X86ISD::VPCOM:
26756 case X86ISD::VPCOMU:
26757 // Vector compares return zero/all-bits result values.
26765 /// Returns true (and the GlobalValue and the offset) if the node is a
26766 /// GlobalAddress + offset.
26767 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
26768 const GlobalValue* &GA,
26769 int64_t &Offset) const {
26770 if (N->getOpcode() == X86ISD::Wrapper) {
26771 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
26772 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
26773 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
26777 return TargetLowering::isGAPlusOffset(N, GA, Offset);
26780 // Attempt to match a combined shuffle mask against supported unary shuffle
26782 // TODO: Investigate sharing more of this with shuffle lowering.
26783 static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
26784 bool AllowFloatDomain, bool AllowIntDomain,
26785 SDValue &V1, SDLoc &DL, SelectionDAG &DAG,
26786 const X86Subtarget &Subtarget,
26787 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT) {
26788 unsigned NumMaskElts = Mask.size();
26789 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
26791 // Match against a ZERO_EXTEND_VECTOR_INREG/VZEXT instruction.
26792 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
26793 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
26794 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
26795 unsigned MaxScale = 64 / MaskEltSize;
26796 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
26798 unsigned NumDstElts = NumMaskElts / Scale;
26799 for (unsigned i = 0; i != NumDstElts && Match; ++i) {
26800 Match &= isUndefOrEqual(Mask[i * Scale], (int)i);
26801 Match &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
26804 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
26805 SrcVT = MVT::getVectorVT(MaskVT.getScalarType(), SrcSize / MaskEltSize);
26806 if (SrcVT != MaskVT)
26807 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
26808 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
26809 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
26810 Shuffle = SrcVT != MaskVT ? unsigned(X86ISD::VZEXT)
26811 : unsigned(ISD::ZERO_EXTEND_VECTOR_INREG);
26817 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
26818 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
26819 isUndefOrEqual(Mask[0], 0) &&
26820 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
26821 Shuffle = X86ISD::VZEXT_MOVL;
26822 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
26826 // Check if we have SSE3 which will let us use MOVDDUP etc. The
26827 // instructions are no slower than UNPCKLPD but has the option to
26828 // fold the input operand into even an unaligned memory load.
26829 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
26830 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
26831 Shuffle = X86ISD::MOVDDUP;
26832 SrcVT = DstVT = MVT::v2f64;
26835 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
26836 Shuffle = X86ISD::MOVSLDUP;
26837 SrcVT = DstVT = MVT::v4f32;
26840 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
26841 Shuffle = X86ISD::MOVSHDUP;
26842 SrcVT = DstVT = MVT::v4f32;
26847 if (MaskVT.is256BitVector() && AllowFloatDomain) {
26848 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
26849 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
26850 Shuffle = X86ISD::MOVDDUP;
26851 SrcVT = DstVT = MVT::v4f64;
26854 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
26855 Shuffle = X86ISD::MOVSLDUP;
26856 SrcVT = DstVT = MVT::v8f32;
26859 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
26860 Shuffle = X86ISD::MOVSHDUP;
26861 SrcVT = DstVT = MVT::v8f32;
26866 if (MaskVT.is512BitVector() && AllowFloatDomain) {
26867 assert(Subtarget.hasAVX512() &&
26868 "AVX512 required for 512-bit vector shuffles");
26869 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
26870 Shuffle = X86ISD::MOVDDUP;
26871 SrcVT = DstVT = MVT::v8f64;
26874 if (isTargetShuffleEquivalent(
26875 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
26876 Shuffle = X86ISD::MOVSLDUP;
26877 SrcVT = DstVT = MVT::v16f32;
26880 if (isTargetShuffleEquivalent(
26881 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
26882 Shuffle = X86ISD::MOVSHDUP;
26883 SrcVT = DstVT = MVT::v16f32;
26888 // Attempt to match against broadcast-from-vector.
26889 if (Subtarget.hasAVX2()) {
26890 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
26891 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
26892 SrcVT = DstVT = MaskVT;
26893 Shuffle = X86ISD::VBROADCAST;
26901 // Attempt to match a combined shuffle mask against supported unary immediate
26902 // permute instructions.
26903 // TODO: Investigate sharing more of this with shuffle lowering.
26904 static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
26905 bool AllowFloatDomain,
26906 bool AllowIntDomain,
26907 const X86Subtarget &Subtarget,
26908 unsigned &Shuffle, MVT &ShuffleVT,
26909 unsigned &PermuteImm) {
26910 unsigned NumMaskElts = Mask.size();
26912 bool ContainsZeros = false;
26913 APInt Zeroable(NumMaskElts, false);
26914 for (unsigned i = 0; i != NumMaskElts; ++i) {
26916 if (isUndefOrZero(M))
26917 Zeroable.setBit(i);
26918 ContainsZeros |= (M == SM_SentinelZero);
26921 // Attempt to match against byte/bit shifts.
26922 // FIXME: Add 512-bit support.
26923 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
26924 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
26925 int ShiftAmt = matchVectorShuffleAsShift(ShuffleVT, Shuffle,
26926 MaskVT.getScalarSizeInBits(), Mask,
26927 0, Zeroable, Subtarget);
26928 if (0 < ShiftAmt) {
26929 PermuteImm = (unsigned)ShiftAmt;
26934 // Ensure we don't contain any zero elements.
26938 assert(llvm::all_of(Mask, [&](int M) {
26939 return SM_SentinelUndef <= M && M < (int)NumMaskElts;
26940 }) && "Expected unary shuffle");
26942 unsigned InputSizeInBits = MaskVT.getSizeInBits();
26943 unsigned MaskScalarSizeInBits = InputSizeInBits / Mask.size();
26944 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
26946 // Handle PSHUFLW/PSHUFHW repeated patterns.
26947 if (MaskScalarSizeInBits == 16) {
26948 SmallVector<int, 4> RepeatedMask;
26949 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
26950 ArrayRef<int> LoMask(Mask.data() + 0, 4);
26951 ArrayRef<int> HiMask(Mask.data() + 4, 4);
26953 // PSHUFLW: permute lower 4 elements only.
26954 if (isUndefOrInRange(LoMask, 0, 4) &&
26955 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
26956 Shuffle = X86ISD::PSHUFLW;
26957 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
26958 PermuteImm = getV4X86ShuffleImm(LoMask);
26962 // PSHUFHW: permute upper 4 elements only.
26963 if (isUndefOrInRange(HiMask, 4, 8) &&
26964 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
26965 // Offset the HiMask so that we can create the shuffle immediate.
26966 int OffsetHiMask[4];
26967 for (int i = 0; i != 4; ++i)
26968 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
26970 Shuffle = X86ISD::PSHUFHW;
26971 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
26972 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
26981 // We only support permutation of 32/64 bit elements after this.
26982 if (MaskScalarSizeInBits != 32 && MaskScalarSizeInBits != 64)
26985 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
26986 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
26987 if ((AllowFloatDomain && !AllowIntDomain) && !Subtarget.hasAVX())
26990 // Pre-AVX2 we must use float shuffles on 256-bit vectors.
26991 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2()) {
26992 AllowFloatDomain = true;
26993 AllowIntDomain = false;
26996 // Check for lane crossing permutes.
26997 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
26998 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
26999 if (Subtarget.hasAVX2() && MaskVT.is256BitVector() && Mask.size() == 4) {
27000 Shuffle = X86ISD::VPERMI;
27001 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
27002 PermuteImm = getV4X86ShuffleImm(Mask);
27005 if (Subtarget.hasAVX512() && MaskVT.is512BitVector() && Mask.size() == 8) {
27006 SmallVector<int, 4> RepeatedMask;
27007 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
27008 Shuffle = X86ISD::VPERMI;
27009 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
27010 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
27017 // VPERMILPD can permute with a non-repeating shuffle.
27018 if (AllowFloatDomain && MaskScalarSizeInBits == 64) {
27019 Shuffle = X86ISD::VPERMILPI;
27020 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
27022 for (int i = 0, e = Mask.size(); i != e; ++i) {
27024 if (M == SM_SentinelUndef)
27026 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
27027 PermuteImm |= (M & 1) << i;
27032 // We need a repeating shuffle mask for VPERMILPS/PSHUFD.
27033 SmallVector<int, 4> RepeatedMask;
27034 if (!is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask))
27037 // Narrow the repeated mask for 32-bit element permutes.
27038 SmallVector<int, 4> WordMask = RepeatedMask;
27039 if (MaskScalarSizeInBits == 64)
27040 scaleShuffleMask(2, RepeatedMask, WordMask);
27042 Shuffle = (AllowFloatDomain ? X86ISD::VPERMILPI : X86ISD::PSHUFD);
27043 ShuffleVT = (AllowFloatDomain ? MVT::f32 : MVT::i32);
27044 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
27045 PermuteImm = getV4X86ShuffleImm(WordMask);
27049 // Attempt to match a combined unary shuffle mask against supported binary
27050 // shuffle instructions.
27051 // TODO: Investigate sharing more of this with shuffle lowering.
27052 static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
27053 bool AllowFloatDomain, bool AllowIntDomain,
27054 SDValue &V1, SDValue &V2, SDLoc &DL,
27056 const X86Subtarget &Subtarget,
27057 unsigned &Shuffle, MVT &ShuffleVT,
27059 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
27061 if (MaskVT.is128BitVector()) {
27062 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
27064 Shuffle = X86ISD::MOVLHPS;
27065 ShuffleVT = MVT::v4f32;
27068 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
27070 Shuffle = X86ISD::MOVHLPS;
27071 ShuffleVT = MVT::v4f32;
27074 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
27075 (AllowFloatDomain || !Subtarget.hasSSE41())) {
27077 Shuffle = X86ISD::MOVSD;
27078 ShuffleVT = MaskVT;
27081 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
27082 (AllowFloatDomain || !Subtarget.hasSSE41())) {
27083 Shuffle = X86ISD::MOVSS;
27084 ShuffleVT = MaskVT;
27089 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
27090 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
27091 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
27092 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
27093 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
27094 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
27095 if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
27097 ShuffleVT = MaskVT;
27098 if (ShuffleVT.is256BitVector() && !Subtarget.hasAVX2())
27099 ShuffleVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
27107 static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
27108 bool AllowFloatDomain,
27109 bool AllowIntDomain,
27110 SDValue &V1, SDValue &V2, SDLoc &DL,
27112 const X86Subtarget &Subtarget,
27113 unsigned &Shuffle, MVT &ShuffleVT,
27114 unsigned &PermuteImm) {
27115 unsigned NumMaskElts = Mask.size();
27116 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
27118 // Attempt to match against PALIGNR byte rotate.
27119 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
27120 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
27121 int ByteRotation = matchVectorShuffleAsByteRotate(MaskVT, V1, V2, Mask);
27122 if (0 < ByteRotation) {
27123 Shuffle = X86ISD::PALIGNR;
27124 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
27125 PermuteImm = ByteRotation;
27130 // Attempt to combine to X86ISD::BLENDI.
27131 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
27132 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
27133 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
27134 uint64_t BlendMask = 0;
27135 bool ForceV1Zero = false, ForceV2Zero = false;
27136 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
27137 if (matchVectorShuffleAsBlend(V1, V2, TargetMask, ForceV1Zero, ForceV2Zero,
27139 if (MaskVT == MVT::v16i16) {
27140 // We can only use v16i16 PBLENDW if the lanes are repeated.
27141 SmallVector<int, 8> RepeatedMask;
27142 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
27144 assert(RepeatedMask.size() == 8 &&
27145 "Repeated mask size doesn't match!");
27147 for (int i = 0; i < 8; ++i)
27148 if (RepeatedMask[i] >= 8)
27149 PermuteImm |= 1 << i;
27150 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
27151 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
27152 Shuffle = X86ISD::BLENDI;
27153 ShuffleVT = MaskVT;
27157 // Determine a type compatible with X86ISD::BLENDI.
27158 ShuffleVT = MaskVT;
27159 if (Subtarget.hasAVX2()) {
27160 if (ShuffleVT == MVT::v4i64)
27161 ShuffleVT = MVT::v8i32;
27162 else if (ShuffleVT == MVT::v2i64)
27163 ShuffleVT = MVT::v4i32;
27165 if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32)
27166 ShuffleVT = MVT::v8i16;
27167 else if (ShuffleVT == MVT::v4i64)
27168 ShuffleVT = MVT::v4f64;
27169 else if (ShuffleVT == MVT::v8i32)
27170 ShuffleVT = MVT::v8f32;
27173 if (!ShuffleVT.isFloatingPoint()) {
27174 int Scale = EltSizeInBits / ShuffleVT.getScalarSizeInBits();
27176 scaleVectorShuffleBlendMask(BlendMask, NumMaskElts, Scale);
27177 ShuffleVT = MVT::getIntegerVT(EltSizeInBits / Scale);
27178 ShuffleVT = MVT::getVectorVT(ShuffleVT, NumMaskElts * Scale);
27181 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
27182 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
27183 PermuteImm = (unsigned)BlendMask;
27184 Shuffle = X86ISD::BLENDI;
27190 // Attempt to combine to INSERTPS.
27191 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
27192 MaskVT.is128BitVector()) {
27193 APInt Zeroable(4, 0);
27194 for (unsigned i = 0; i != NumMaskElts; ++i)
27196 Zeroable.setBit(i);
27198 if (Zeroable.getBoolValue() &&
27199 matchVectorShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
27200 Shuffle = X86ISD::INSERTPS;
27201 ShuffleVT = MVT::v4f32;
27206 // Attempt to combine to SHUFPD.
27207 if (AllowFloatDomain && EltSizeInBits == 64 &&
27208 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
27209 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
27210 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
27211 if (matchVectorShuffleWithSHUFPD(MaskVT, V1, V2, PermuteImm, Mask)) {
27212 Shuffle = X86ISD::SHUFP;
27213 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
27218 // Attempt to combine to SHUFPS.
27219 if (AllowFloatDomain && EltSizeInBits == 32 &&
27220 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
27221 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
27222 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
27223 SmallVector<int, 4> RepeatedMask;
27224 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
27225 // Match each half of the repeated mask, to determine if its just
27226 // referencing one of the vectors, is zeroable or entirely undef.
27227 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
27228 int M0 = RepeatedMask[Offset];
27229 int M1 = RepeatedMask[Offset + 1];
27231 if (isUndefInRange(RepeatedMask, Offset, 2)) {
27232 return DAG.getUNDEF(MaskVT);
27233 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
27234 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
27235 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
27236 return getZeroVector(MaskVT, Subtarget, DAG, DL);
27237 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
27238 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
27239 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
27241 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
27242 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
27243 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
27250 int ShufMask[4] = {-1, -1, -1, -1};
27251 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
27252 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
27257 Shuffle = X86ISD::SHUFP;
27258 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
27259 PermuteImm = getV4X86ShuffleImm(ShufMask);
27268 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
27271 /// This is the leaf of the recursive combine below. When we have found some
27272 /// chain of single-use x86 shuffle instructions and accumulated the combined
27273 /// shuffle mask represented by them, this will try to pattern match that mask
27274 /// into either a single instruction if there is a special purpose instruction
27275 /// for this operation, or into a PSHUFB instruction which is a fully general
27276 /// instruction but should only be used to replace chains over a certain depth.
27277 static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
27278 ArrayRef<int> BaseMask, int Depth,
27279 bool HasVariableMask, SelectionDAG &DAG,
27280 TargetLowering::DAGCombinerInfo &DCI,
27281 const X86Subtarget &Subtarget) {
27282 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
27283 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
27284 "Unexpected number of shuffle inputs!");
27286 // Find the inputs that enter the chain. Note that multiple uses are OK
27287 // here, we're not going to remove the operands we find.
27288 bool UnaryShuffle = (Inputs.size() == 1);
27289 SDValue V1 = peekThroughBitcasts(Inputs[0]);
27290 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
27291 : peekThroughBitcasts(Inputs[1]));
27293 MVT VT1 = V1.getSimpleValueType();
27294 MVT VT2 = V2.getSimpleValueType();
27295 MVT RootVT = Root.getSimpleValueType();
27296 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
27297 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
27298 "Vector size mismatch");
27303 unsigned NumBaseMaskElts = BaseMask.size();
27304 if (NumBaseMaskElts == 1) {
27305 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
27306 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, V1),
27311 unsigned RootSizeInBits = RootVT.getSizeInBits();
27312 unsigned NumRootElts = RootVT.getVectorNumElements();
27313 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
27314 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
27315 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
27317 // Don't combine if we are a AVX512/EVEX target and the mask element size
27318 // is different from the root element size - this would prevent writemasks
27319 // from being reused.
27320 // TODO - this currently prevents all lane shuffles from occurring.
27321 // TODO - check for writemasks usage instead of always preventing combining.
27322 // TODO - attempt to narrow Mask back to writemask size.
27323 bool IsEVEXShuffle =
27324 RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
27325 if (IsEVEXShuffle && (RootVT.getScalarSizeInBits() != BaseMaskEltSizeInBits))
27328 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
27330 // Handle 128-bit lane shuffles of 256-bit vectors.
27331 // TODO - this should support binary shuffles.
27332 if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
27333 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
27334 if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
27335 return false; // Nothing to do!
27336 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
27337 unsigned PermMask = 0;
27338 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
27339 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
27341 Res = DAG.getBitcast(ShuffleVT, V1);
27342 DCI.AddToWorklist(Res.getNode());
27343 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
27344 DAG.getUNDEF(ShuffleVT),
27345 DAG.getConstant(PermMask, DL, MVT::i8));
27346 DCI.AddToWorklist(Res.getNode());
27347 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27352 // For masks that have been widened to 128-bit elements or more,
27353 // narrow back down to 64-bit elements.
27354 SmallVector<int, 64> Mask;
27355 if (BaseMaskEltSizeInBits > 64) {
27356 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
27357 int MaskScale = BaseMaskEltSizeInBits / 64;
27358 scaleShuffleMask(MaskScale, BaseMask, Mask);
27360 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
27363 unsigned NumMaskElts = Mask.size();
27364 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
27366 // Determine the effective mask value type.
27367 FloatDomain &= (32 <= MaskEltSizeInBits);
27368 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
27369 : MVT::getIntegerVT(MaskEltSizeInBits);
27370 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
27372 // Only allow legal mask types.
27373 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
27376 // Attempt to match the mask against known shuffle patterns.
27377 MVT ShuffleSrcVT, ShuffleVT;
27378 unsigned Shuffle, PermuteImm;
27380 // Which shuffle domains are permitted?
27381 // Permit domain crossing at higher combine depths.
27382 bool AllowFloatDomain = FloatDomain || (Depth > 3);
27383 bool AllowIntDomain = !FloatDomain || (Depth > 3);
27385 if (UnaryShuffle) {
27386 // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
27387 // directly if we don't shuffle the lower element and we shuffle the upper
27388 // (zero) elements within themselves.
27389 if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
27390 (V1.getScalarValueSizeInBits() % MaskEltSizeInBits) == 0) {
27391 unsigned Scale = V1.getScalarValueSizeInBits() / MaskEltSizeInBits;
27392 ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
27393 if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
27394 isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
27395 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, V1),
27401 if (matchUnaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
27402 V1, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
27404 if (Depth == 1 && Root.getOpcode() == Shuffle)
27405 return false; // Nothing to do!
27406 if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements()))
27407 return false; // AVX512 Writemask clash.
27408 Res = DAG.getBitcast(ShuffleSrcVT, V1);
27409 DCI.AddToWorklist(Res.getNode());
27410 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
27411 DCI.AddToWorklist(Res.getNode());
27412 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27417 if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, AllowFloatDomain,
27418 AllowIntDomain, Subtarget, Shuffle,
27419 ShuffleVT, PermuteImm)) {
27420 if (Depth == 1 && Root.getOpcode() == Shuffle)
27421 return false; // Nothing to do!
27422 if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements()))
27423 return false; // AVX512 Writemask clash.
27424 Res = DAG.getBitcast(ShuffleVT, V1);
27425 DCI.AddToWorklist(Res.getNode());
27426 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
27427 DAG.getConstant(PermuteImm, DL, MVT::i8));
27428 DCI.AddToWorklist(Res.getNode());
27429 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27435 if (matchBinaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
27436 V1, V2, DL, DAG, Subtarget, Shuffle, ShuffleVT,
27438 if (Depth == 1 && Root.getOpcode() == Shuffle)
27439 return false; // Nothing to do!
27440 if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements()))
27441 return false; // AVX512 Writemask clash.
27442 V1 = DAG.getBitcast(ShuffleVT, V1);
27443 DCI.AddToWorklist(V1.getNode());
27444 V2 = DAG.getBitcast(ShuffleVT, V2);
27445 DCI.AddToWorklist(V2.getNode());
27446 Res = DAG.getNode(Shuffle, DL, ShuffleVT, V1, V2);
27447 DCI.AddToWorklist(Res.getNode());
27448 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27453 if (matchBinaryPermuteVectorShuffle(MaskVT, Mask, AllowFloatDomain,
27454 AllowIntDomain, V1, V2, DL, DAG,
27455 Subtarget, Shuffle, ShuffleVT,
27457 if (Depth == 1 && Root.getOpcode() == Shuffle)
27458 return false; // Nothing to do!
27459 if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements()))
27460 return false; // AVX512 Writemask clash.
27461 V1 = DAG.getBitcast(ShuffleVT, V1);
27462 DCI.AddToWorklist(V1.getNode());
27463 V2 = DAG.getBitcast(ShuffleVT, V2);
27464 DCI.AddToWorklist(V2.getNode());
27465 Res = DAG.getNode(Shuffle, DL, ShuffleVT, V1, V2,
27466 DAG.getConstant(PermuteImm, DL, MVT::i8));
27467 DCI.AddToWorklist(Res.getNode());
27468 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27473 // Don't try to re-form single instruction chains under any circumstances now
27474 // that we've done encoding canonicalization for them.
27478 bool MaskContainsZeros =
27479 any_of(Mask, [](int M) { return M == SM_SentinelZero; });
27481 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
27482 // If we have a single input lane-crossing shuffle then lower to VPERMV.
27483 if (UnaryShuffle && (Depth >= 3 || HasVariableMask) && !MaskContainsZeros &&
27484 ((Subtarget.hasAVX2() &&
27485 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
27486 (Subtarget.hasAVX512() &&
27487 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
27488 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
27489 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
27490 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
27491 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
27492 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
27493 MVT VPermMaskSVT = MVT::getIntegerVT(MaskEltSizeInBits);
27494 MVT VPermMaskVT = MVT::getVectorVT(VPermMaskSVT, NumMaskElts);
27495 SDValue VPermMask = getConstVector(Mask, VPermMaskVT, DAG, DL, true);
27496 DCI.AddToWorklist(VPermMask.getNode());
27497 Res = DAG.getBitcast(MaskVT, V1);
27498 DCI.AddToWorklist(Res.getNode());
27499 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
27500 DCI.AddToWorklist(Res.getNode());
27501 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27506 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
27507 // vector as the second source.
27508 if (UnaryShuffle && (Depth >= 3 || HasVariableMask) &&
27509 ((Subtarget.hasAVX512() &&
27510 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
27511 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
27512 (Subtarget.hasVLX() &&
27513 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
27514 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
27515 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
27516 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
27517 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
27518 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
27519 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
27520 for (unsigned i = 0; i != NumMaskElts; ++i)
27521 if (Mask[i] == SM_SentinelZero)
27522 Mask[i] = NumMaskElts + i;
27524 MVT VPermMaskSVT = MVT::getIntegerVT(MaskEltSizeInBits);
27525 MVT VPermMaskVT = MVT::getVectorVT(VPermMaskSVT, NumMaskElts);
27526 SDValue VPermMask = getConstVector(Mask, VPermMaskVT, DAG, DL, true);
27527 DCI.AddToWorklist(VPermMask.getNode());
27528 Res = DAG.getBitcast(MaskVT, V1);
27529 DCI.AddToWorklist(Res.getNode());
27530 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
27531 DCI.AddToWorklist(Zero.getNode());
27532 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
27533 DCI.AddToWorklist(Res.getNode());
27534 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27539 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
27540 if ((Depth >= 3 || HasVariableMask) && !MaskContainsZeros &&
27541 ((Subtarget.hasAVX512() &&
27542 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
27543 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
27544 (Subtarget.hasVLX() &&
27545 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
27546 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
27547 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
27548 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
27549 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
27550 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
27551 MVT VPermMaskSVT = MVT::getIntegerVT(MaskEltSizeInBits);
27552 MVT VPermMaskVT = MVT::getVectorVT(VPermMaskSVT, NumMaskElts);
27553 SDValue VPermMask = getConstVector(Mask, VPermMaskVT, DAG, DL, true);
27554 DCI.AddToWorklist(VPermMask.getNode());
27555 V1 = DAG.getBitcast(MaskVT, V1);
27556 DCI.AddToWorklist(V1.getNode());
27557 V2 = DAG.getBitcast(MaskVT, V2);
27558 DCI.AddToWorklist(V2.getNode());
27559 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
27560 DCI.AddToWorklist(Res.getNode());
27561 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27568 // See if we can combine a single input shuffle with zeros to a bit-mask,
27569 // which is much simpler than any shuffle.
27570 if (UnaryShuffle && MaskContainsZeros && (Depth >= 3 || HasVariableMask) &&
27571 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
27572 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
27573 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
27574 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
27575 APInt UndefElts(NumMaskElts, 0);
27576 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
27577 for (unsigned i = 0; i != NumMaskElts; ++i) {
27579 if (M == SM_SentinelUndef) {
27580 UndefElts.setBit(i);
27583 if (M == SM_SentinelZero)
27585 EltBits[i] = AllOnes;
27587 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
27588 DCI.AddToWorklist(BitMask.getNode());
27589 Res = DAG.getBitcast(MaskVT, V1);
27590 DCI.AddToWorklist(Res.getNode());
27591 unsigned AndOpcode =
27592 FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
27593 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
27594 DCI.AddToWorklist(Res.getNode());
27595 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27600 // If we have a single input shuffle with different shuffle patterns in the
27601 // the 128-bit lanes use the variable mask to VPERMILPS.
27602 // TODO Combine other mask types at higher depths.
27603 if (UnaryShuffle && HasVariableMask && !MaskContainsZeros &&
27604 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
27605 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
27606 SmallVector<SDValue, 16> VPermIdx;
27607 for (int M : Mask) {
27609 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
27610 VPermIdx.push_back(Idx);
27612 MVT VPermMaskVT = MVT::getVectorVT(MVT::i32, NumMaskElts);
27613 SDValue VPermMask = DAG.getBuildVector(VPermMaskVT, DL, VPermIdx);
27614 DCI.AddToWorklist(VPermMask.getNode());
27615 Res = DAG.getBitcast(MaskVT, V1);
27616 DCI.AddToWorklist(Res.getNode());
27617 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
27618 DCI.AddToWorklist(Res.getNode());
27619 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27624 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
27625 // to VPERMIL2PD/VPERMIL2PS.
27626 if ((Depth >= 3 || HasVariableMask) && Subtarget.hasXOP() &&
27627 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
27628 MaskVT == MVT::v8f32)) {
27629 // VPERMIL2 Operation.
27630 // Bits[3] - Match Bit.
27631 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
27632 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
27633 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
27634 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
27635 SmallVector<int, 8> VPerm2Idx;
27636 MVT MaskIdxSVT = MVT::getIntegerVT(MaskVT.getScalarSizeInBits());
27637 MVT MaskIdxVT = MVT::getVectorVT(MaskIdxSVT, NumMaskElts);
27638 unsigned M2ZImm = 0;
27639 for (int M : Mask) {
27640 if (M == SM_SentinelUndef) {
27641 VPerm2Idx.push_back(-1);
27644 if (M == SM_SentinelZero) {
27646 VPerm2Idx.push_back(8);
27649 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
27650 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
27651 VPerm2Idx.push_back(Index);
27653 V1 = DAG.getBitcast(MaskVT, V1);
27654 DCI.AddToWorklist(V1.getNode());
27655 V2 = DAG.getBitcast(MaskVT, V2);
27656 DCI.AddToWorklist(V2.getNode());
27657 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, MaskIdxVT, DAG, DL, true);
27658 DCI.AddToWorklist(VPerm2MaskOp.getNode());
27659 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
27660 DAG.getConstant(M2ZImm, DL, MVT::i8));
27661 DCI.AddToWorklist(Res.getNode());
27662 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27667 // If we have 3 or more shuffle instructions or a chain involving a variable
27668 // mask, we can replace them with a single PSHUFB instruction profitably.
27669 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
27670 // instructions, but in practice PSHUFB tends to be *very* fast so we're
27671 // more aggressive.
27672 if (UnaryShuffle && (Depth >= 3 || HasVariableMask) &&
27673 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
27674 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
27675 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
27676 SmallVector<SDValue, 16> PSHUFBMask;
27677 int NumBytes = RootVT.getSizeInBits() / 8;
27678 int Ratio = NumBytes / NumMaskElts;
27679 for (int i = 0; i < NumBytes; ++i) {
27680 int M = Mask[i / Ratio];
27681 if (M == SM_SentinelUndef) {
27682 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
27685 if (M == SM_SentinelZero) {
27686 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
27689 M = Ratio * M + i % Ratio;
27690 assert ((M / 16) == (i / 16) && "Lane crossing detected");
27691 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
27693 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
27694 Res = DAG.getBitcast(ByteVT, V1);
27695 DCI.AddToWorklist(Res.getNode());
27696 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
27697 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
27698 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
27699 DCI.AddToWorklist(Res.getNode());
27700 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27705 // With XOP, if we have a 128-bit binary input shuffle we can always combine
27706 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
27707 // slower than PSHUFB on targets that support both.
27708 if ((Depth >= 3 || HasVariableMask) && RootVT.is128BitVector() &&
27709 Subtarget.hasXOP()) {
27710 // VPPERM Mask Operation
27711 // Bits[4:0] - Byte Index (0 - 31)
27712 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
27713 SmallVector<SDValue, 16> VPPERMMask;
27715 int Ratio = NumBytes / NumMaskElts;
27716 for (int i = 0; i < NumBytes; ++i) {
27717 int M = Mask[i / Ratio];
27718 if (M == SM_SentinelUndef) {
27719 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
27722 if (M == SM_SentinelZero) {
27723 VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
27726 M = Ratio * M + i % Ratio;
27727 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
27729 MVT ByteVT = MVT::v16i8;
27730 V1 = DAG.getBitcast(ByteVT, V1);
27731 DCI.AddToWorklist(V1.getNode());
27732 V2 = DAG.getBitcast(ByteVT, V2);
27733 DCI.AddToWorklist(V2.getNode());
27734 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
27735 DCI.AddToWorklist(VPPERMMaskOp.getNode());
27736 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
27737 DCI.AddToWorklist(Res.getNode());
27738 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
27743 // Failed to find any combines.
27747 // Attempt to constant fold all of the constant source ops.
27748 // Returns true if the entire shuffle is folded to a constant.
27749 // TODO: Extend this to merge multiple constant Ops and update the mask.
27750 static bool combineX86ShufflesConstants(const SmallVectorImpl<SDValue> &Ops,
27751 ArrayRef<int> Mask, SDValue Root,
27752 bool HasVariableMask, SelectionDAG &DAG,
27753 TargetLowering::DAGCombinerInfo &DCI,
27754 const X86Subtarget &Subtarget) {
27755 MVT VT = Root.getSimpleValueType();
27757 unsigned SizeInBits = VT.getSizeInBits();
27758 unsigned NumMaskElts = Mask.size();
27759 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
27760 unsigned NumOps = Ops.size();
27762 // Extract constant bits from each source op.
27763 bool OneUseConstantOp = false;
27764 SmallVector<APInt, 16> UndefEltsOps(NumOps);
27765 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
27766 for (unsigned i = 0; i != NumOps; ++i) {
27767 SDValue SrcOp = Ops[i];
27768 OneUseConstantOp |= SrcOp.hasOneUse();
27769 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
27774 // Only fold if at least one of the constants is only used once or
27775 // the combined shuffle has included a variable mask shuffle, this
27776 // is to avoid constant pool bloat.
27777 if (!OneUseConstantOp && !HasVariableMask)
27780 // Shuffle the constant bits according to the mask.
27781 APInt UndefElts(NumMaskElts, 0);
27782 APInt ZeroElts(NumMaskElts, 0);
27783 APInt ConstantElts(NumMaskElts, 0);
27784 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
27785 APInt::getNullValue(MaskSizeInBits));
27786 for (unsigned i = 0; i != NumMaskElts; ++i) {
27788 if (M == SM_SentinelUndef) {
27789 UndefElts.setBit(i);
27791 } else if (M == SM_SentinelZero) {
27792 ZeroElts.setBit(i);
27795 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
27797 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
27798 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
27800 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
27801 if (SrcUndefElts[SrcMaskIdx]) {
27802 UndefElts.setBit(i);
27806 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
27807 APInt &Bits = SrcEltBits[SrcMaskIdx];
27809 ZeroElts.setBit(i);
27813 ConstantElts.setBit(i);
27814 ConstantBitData[i] = Bits;
27816 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
27818 // Create the constant data.
27820 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
27821 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
27823 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
27825 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
27828 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
27829 DCI.AddToWorklist(CstOp.getNode());
27830 DCI.CombineTo(Root.getNode(), DAG.getBitcast(VT, CstOp));
27834 /// \brief Fully generic combining of x86 shuffle instructions.
27836 /// This should be the last combine run over the x86 shuffle instructions. Once
27837 /// they have been fully optimized, this will recursively consider all chains
27838 /// of single-use shuffle instructions, build a generic model of the cumulative
27839 /// shuffle operation, and check for simpler instructions which implement this
27840 /// operation. We use this primarily for two purposes:
27842 /// 1) Collapse generic shuffles to specialized single instructions when
27843 /// equivalent. In most cases, this is just an encoding size win, but
27844 /// sometimes we will collapse multiple generic shuffles into a single
27845 /// special-purpose shuffle.
27846 /// 2) Look for sequences of shuffle instructions with 3 or more total
27847 /// instructions, and replace them with the slightly more expensive SSSE3
27848 /// PSHUFB instruction if available. We do this as the last combining step
27849 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
27850 /// a suitable short sequence of other instructions. The PSHUFB will either
27851 /// use a register or have to read from memory and so is slightly (but only
27852 /// slightly) more expensive than the other shuffle instructions.
27854 /// Because this is inherently a quadratic operation (for each shuffle in
27855 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
27856 /// This should never be an issue in practice as the shuffle lowering doesn't
27857 /// produce sequences of more than 8 instructions.
27859 /// FIXME: We will currently miss some cases where the redundant shuffling
27860 /// would simplify under the threshold for PSHUFB formation because of
27861 /// combine-ordering. To fix this, we should do the redundant instruction
27862 /// combining in this recursive walk.
27863 static bool combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,
27864 int SrcOpIndex, SDValue Root,
27865 ArrayRef<int> RootMask,
27866 ArrayRef<const SDNode*> SrcNodes,
27867 int Depth, bool HasVariableMask,
27869 TargetLowering::DAGCombinerInfo &DCI,
27870 const X86Subtarget &Subtarget) {
27871 // Bound the depth of our recursive combine because this is ultimately
27872 // quadratic in nature.
27876 // Directly rip through bitcasts to find the underlying operand.
27877 SDValue Op = SrcOps[SrcOpIndex];
27878 Op = peekThroughOneUseBitcasts(Op);
27880 MVT VT = Op.getSimpleValueType();
27881 if (!VT.isVector())
27882 return false; // Bail if we hit a non-vector.
27884 assert(Root.getSimpleValueType().isVector() &&
27885 "Shuffles operate on vector types!");
27886 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
27887 "Can only combine shuffles of the same vector register size.");
27889 // Extract target shuffle mask and resolve sentinels and inputs.
27890 SmallVector<int, 64> OpMask;
27891 SmallVector<SDValue, 2> OpInputs;
27892 if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask))
27895 assert(OpInputs.size() <= 2 && "Too many shuffle inputs");
27896 SDValue Input0 = (OpInputs.size() > 0 ? OpInputs[0] : SDValue());
27897 SDValue Input1 = (OpInputs.size() > 1 ? OpInputs[1] : SDValue());
27899 // Add the inputs to the Ops list, avoiding duplicates.
27900 SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
27902 int InputIdx0 = -1, InputIdx1 = -1;
27903 for (int i = 0, e = Ops.size(); i < e; ++i) {
27904 SDValue BC = peekThroughBitcasts(Ops[i]);
27905 if (Input0 && BC == peekThroughBitcasts(Input0))
27907 if (Input1 && BC == peekThroughBitcasts(Input1))
27911 if (Input0 && InputIdx0 < 0) {
27912 InputIdx0 = SrcOpIndex;
27913 Ops[SrcOpIndex] = Input0;
27915 if (Input1 && InputIdx1 < 0) {
27916 InputIdx1 = Ops.size();
27917 Ops.push_back(Input1);
27920 assert(((RootMask.size() > OpMask.size() &&
27921 RootMask.size() % OpMask.size() == 0) ||
27922 (OpMask.size() > RootMask.size() &&
27923 OpMask.size() % RootMask.size() == 0) ||
27924 OpMask.size() == RootMask.size()) &&
27925 "The smaller number of elements must divide the larger.");
27926 int MaskWidth = std::max<int>(OpMask.size(), RootMask.size());
27927 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
27928 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
27929 assert(((RootRatio == 1 && OpRatio == 1) ||
27930 (RootRatio == 1) != (OpRatio == 1)) &&
27931 "Must not have a ratio for both incoming and op masks!");
27933 SmallVector<int, 64> Mask((unsigned)MaskWidth, SM_SentinelUndef);
27935 // Merge this shuffle operation's mask into our accumulated mask. Note that
27936 // this shuffle's mask will be the first applied to the input, followed by the
27937 // root mask to get us all the way to the root value arrangement. The reason
27938 // for this order is that we are recursing up the operation chain.
27939 for (int i = 0; i < MaskWidth; ++i) {
27940 int RootIdx = i / RootRatio;
27941 if (RootMask[RootIdx] < 0) {
27942 // This is a zero or undef lane, we're done.
27943 Mask[i] = RootMask[RootIdx];
27947 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
27949 // Just insert the scaled root mask value if it references an input other
27950 // than the SrcOp we're currently inserting.
27951 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
27952 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
27953 Mask[i] = RootMaskedIdx;
27957 RootMaskedIdx %= MaskWidth;
27959 int OpIdx = RootMaskedIdx / OpRatio;
27960 if (OpMask[OpIdx] < 0) {
27961 // The incoming lanes are zero or undef, it doesn't matter which ones we
27963 Mask[i] = OpMask[OpIdx];
27967 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
27968 int OpMaskedIdx = OpMask[OpIdx] * OpRatio + RootMaskedIdx % OpRatio;
27969 OpMaskedIdx %= MaskWidth;
27971 if (OpMask[OpIdx] < (int)OpMask.size()) {
27972 assert(0 <= InputIdx0 && "Unknown target shuffle input");
27973 OpMaskedIdx += InputIdx0 * MaskWidth;
27975 assert(0 <= InputIdx1 && "Unknown target shuffle input");
27976 OpMaskedIdx += InputIdx1 * MaskWidth;
27979 Mask[i] = OpMaskedIdx;
27982 // Handle the all undef/zero cases early.
27983 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
27984 DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType()));
27987 if (all_of(Mask, [](int Idx) { return Idx < 0; })) {
27988 // TODO - should we handle the mixed zero/undef case as well? Just returning
27989 // a zero mask will lose information on undef elements possibly reducing
27990 // future combine possibilities.
27991 DCI.CombineTo(Root.getNode(), getZeroVector(Root.getSimpleValueType(),
27992 Subtarget, DAG, SDLoc(Root)));
27996 // Remove unused shuffle source ops.
27997 resolveTargetShuffleInputsAndMask(Ops, Mask);
27998 assert(!Ops.empty() && "Shuffle with no inputs detected");
28000 HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
28002 // Update the list of shuffle nodes that have been combined so far.
28003 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
28005 CombinedNodes.push_back(Op.getNode());
28007 // See if we can recurse into each shuffle source op (if it's a target
28008 // shuffle). The source op should only be combined if it either has a
28009 // single use (i.e. current Op) or all its users have already been combined.
28010 for (int i = 0, e = Ops.size(); i < e; ++i)
28011 if (Ops[i].getNode()->hasOneUse() ||
28012 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
28013 if (combineX86ShufflesRecursively(Ops, i, Root, Mask, CombinedNodes,
28014 Depth + 1, HasVariableMask, DAG, DCI,
28018 // Attempt to constant fold all of the constant source ops.
28019 if (combineX86ShufflesConstants(Ops, Mask, Root, HasVariableMask, DAG, DCI,
28023 // We can only combine unary and binary shuffle mask cases.
28024 if (Ops.size() > 2)
28027 // Minor canonicalization of the accumulated shuffle mask to make it easier
28028 // to match below. All this does is detect masks with sequential pairs of
28029 // elements, and shrink them to the half-width mask. It does this in a loop
28030 // so it will reduce the size of the mask to the minimal width mask which
28031 // performs an equivalent shuffle.
28032 SmallVector<int, 64> WidenedMask;
28033 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
28034 Mask = std::move(WidenedMask);
28037 // Canonicalization of binary shuffle masks to improve pattern matching by
28038 // commuting the inputs.
28039 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
28040 ShuffleVectorSDNode::commuteMask(Mask);
28041 std::swap(Ops[0], Ops[1]);
28044 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask, DAG,
28048 /// \brief Get the PSHUF-style mask from PSHUF node.
28050 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
28051 /// PSHUF-style masks that can be reused with such instructions.
28052 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
28053 MVT VT = N.getSimpleValueType();
28054 SmallVector<int, 4> Mask;
28055 SmallVector<SDValue, 2> Ops;
28058 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
28062 // If we have more than 128-bits, only the low 128-bits of shuffle mask
28063 // matter. Check that the upper masks are repeats and remove them.
28064 if (VT.getSizeInBits() > 128) {
28065 int LaneElts = 128 / VT.getScalarSizeInBits();
28067 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
28068 for (int j = 0; j < LaneElts; ++j)
28069 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
28070 "Mask doesn't repeat in high 128-bit lanes!");
28072 Mask.resize(LaneElts);
28075 switch (N.getOpcode()) {
28076 case X86ISD::PSHUFD:
28078 case X86ISD::PSHUFLW:
28081 case X86ISD::PSHUFHW:
28082 Mask.erase(Mask.begin(), Mask.begin() + 4);
28083 for (int &M : Mask)
28087 llvm_unreachable("No valid shuffle instruction found!");
28091 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
28093 /// We walk up the chain and look for a combinable shuffle, skipping over
28094 /// shuffles that we could hoist this shuffle's transformation past without
28095 /// altering anything.
28097 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
28098 SelectionDAG &DAG) {
28099 assert(N.getOpcode() == X86ISD::PSHUFD &&
28100 "Called with something other than an x86 128-bit half shuffle!");
28103 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
28104 // of the shuffles in the chain so that we can form a fresh chain to replace
28106 SmallVector<SDValue, 8> Chain;
28107 SDValue V = N.getOperand(0);
28108 for (; V.hasOneUse(); V = V.getOperand(0)) {
28109 switch (V.getOpcode()) {
28111 return SDValue(); // Nothing combined!
28114 // Skip bitcasts as we always know the type for the target specific
28118 case X86ISD::PSHUFD:
28119 // Found another dword shuffle.
28122 case X86ISD::PSHUFLW:
28123 // Check that the low words (being shuffled) are the identity in the
28124 // dword shuffle, and the high words are self-contained.
28125 if (Mask[0] != 0 || Mask[1] != 1 ||
28126 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
28129 Chain.push_back(V);
28132 case X86ISD::PSHUFHW:
28133 // Check that the high words (being shuffled) are the identity in the
28134 // dword shuffle, and the low words are self-contained.
28135 if (Mask[2] != 2 || Mask[3] != 3 ||
28136 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
28139 Chain.push_back(V);
28142 case X86ISD::UNPCKL:
28143 case X86ISD::UNPCKH:
28144 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
28145 // shuffle into a preceding word shuffle.
28146 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
28147 V.getSimpleValueType().getVectorElementType() != MVT::i16)
28150 // Search for a half-shuffle which we can combine with.
28151 unsigned CombineOp =
28152 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
28153 if (V.getOperand(0) != V.getOperand(1) ||
28154 !V->isOnlyUserOf(V.getOperand(0).getNode()))
28156 Chain.push_back(V);
28157 V = V.getOperand(0);
28159 switch (V.getOpcode()) {
28161 return SDValue(); // Nothing to combine.
28163 case X86ISD::PSHUFLW:
28164 case X86ISD::PSHUFHW:
28165 if (V.getOpcode() == CombineOp)
28168 Chain.push_back(V);
28172 V = V.getOperand(0);
28176 } while (V.hasOneUse());
28179 // Break out of the loop if we break out of the switch.
28183 if (!V.hasOneUse())
28184 // We fell out of the loop without finding a viable combining instruction.
28187 // Merge this node's mask and our incoming mask.
28188 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
28189 for (int &M : Mask)
28191 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
28192 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
28194 // Rebuild the chain around this new shuffle.
28195 while (!Chain.empty()) {
28196 SDValue W = Chain.pop_back_val();
28198 if (V.getValueType() != W.getOperand(0).getValueType())
28199 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
28201 switch (W.getOpcode()) {
28203 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
28205 case X86ISD::UNPCKL:
28206 case X86ISD::UNPCKH:
28207 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
28210 case X86ISD::PSHUFD:
28211 case X86ISD::PSHUFLW:
28212 case X86ISD::PSHUFHW:
28213 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
28217 if (V.getValueType() != N.getValueType())
28218 V = DAG.getBitcast(N.getValueType(), V);
28220 // Return the new chain to replace N.
28224 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or
28227 /// We walk up the chain, skipping shuffles of the other half and looking
28228 /// through shuffles which switch halves trying to find a shuffle of the same
28229 /// pair of dwords.
28230 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
28232 TargetLowering::DAGCombinerInfo &DCI) {
28234 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
28235 "Called with something other than an x86 128-bit half shuffle!");
28237 unsigned CombineOpcode = N.getOpcode();
28239 // Walk up a single-use chain looking for a combinable shuffle.
28240 SDValue V = N.getOperand(0);
28241 for (; V.hasOneUse(); V = V.getOperand(0)) {
28242 switch (V.getOpcode()) {
28244 return false; // Nothing combined!
28247 // Skip bitcasts as we always know the type for the target specific
28251 case X86ISD::PSHUFLW:
28252 case X86ISD::PSHUFHW:
28253 if (V.getOpcode() == CombineOpcode)
28256 // Other-half shuffles are no-ops.
28259 // Break out of the loop if we break out of the switch.
28263 if (!V.hasOneUse())
28264 // We fell out of the loop without finding a viable combining instruction.
28267 // Combine away the bottom node as its shuffle will be accumulated into
28268 // a preceding shuffle.
28269 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
28271 // Record the old value.
28274 // Merge this node's mask and our incoming mask (adjusted to account for all
28275 // the pshufd instructions encountered).
28276 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
28277 for (int &M : Mask)
28279 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
28280 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
28282 // Check that the shuffles didn't cancel each other out. If not, we need to
28283 // combine to the new one.
28285 // Replace the combinable shuffle with the combined one, updating all users
28286 // so that we re-evaluate the chain here.
28287 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
28292 /// \brief Try to combine x86 target specific shuffles.
28293 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
28294 TargetLowering::DAGCombinerInfo &DCI,
28295 const X86Subtarget &Subtarget) {
28297 MVT VT = N.getSimpleValueType();
28298 SmallVector<int, 4> Mask;
28300 unsigned Opcode = N.getOpcode();
28302 case X86ISD::PSHUFD:
28303 case X86ISD::PSHUFLW:
28304 case X86ISD::PSHUFHW:
28305 Mask = getPSHUFShuffleMask(N);
28306 assert(Mask.size() == 4);
28308 case X86ISD::UNPCKL: {
28309 auto Op0 = N.getOperand(0);
28310 auto Op1 = N.getOperand(1);
28311 unsigned Opcode0 = Op0.getOpcode();
28312 unsigned Opcode1 = Op1.getOpcode();
28314 // Combine X86ISD::UNPCKL with 2 X86ISD::FHADD inputs into a single
28315 // X86ISD::FHADD. This is generated by UINT_TO_FP v2f64 scalarization.
28316 // TODO: Add other horizontal operations as required.
28317 if (VT == MVT::v2f64 && Opcode0 == Opcode1 && Opcode0 == X86ISD::FHADD)
28318 return DAG.getNode(Opcode0, DL, VT, Op0.getOperand(0), Op1.getOperand(0));
28320 // Combine X86ISD::UNPCKL and ISD::VECTOR_SHUFFLE into X86ISD::UNPCKH, in
28321 // which X86ISD::UNPCKL has a ISD::UNDEF operand, and ISD::VECTOR_SHUFFLE
28322 // moves upper half elements into the lower half part. For example:
28324 // t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1,
28326 // t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
28328 // will be combined to:
28330 // t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
28332 // This is only for 128-bit vectors. From SSE4.1 onward this combine may not
28333 // happen due to advanced instructions.
28334 if (!VT.is128BitVector())
28337 if (Op0.isUndef() && Opcode1 == ISD::VECTOR_SHUFFLE) {
28338 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask();
28340 unsigned NumElts = VT.getVectorNumElements();
28341 SmallVector<int, 8> ExpectedMask(NumElts, -1);
28342 std::iota(ExpectedMask.begin(), ExpectedMask.begin() + NumElts / 2,
28345 auto ShufOp = Op1.getOperand(0);
28346 if (isShuffleEquivalent(Op1, ShufOp, Mask, ExpectedMask))
28347 return DAG.getNode(X86ISD::UNPCKH, DL, VT, N.getOperand(0), ShufOp);
28351 case X86ISD::BLENDI: {
28352 SDValue V0 = N->getOperand(0);
28353 SDValue V1 = N->getOperand(1);
28354 assert(VT == V0.getSimpleValueType() && VT == V1.getSimpleValueType() &&
28355 "Unexpected input vector types");
28357 // Canonicalize a v2f64 blend with a mask of 2 by swapping the vector
28358 // operands and changing the mask to 1. This saves us a bunch of
28359 // pattern-matching possibilities related to scalar math ops in SSE/AVX.
28360 // x86InstrInfo knows how to commute this back after instruction selection
28361 // if it would help register allocation.
28363 // TODO: If optimizing for size or a processor that doesn't suffer from
28364 // partial register update stalls, this should be transformed into a MOVSD
28365 // instruction because a MOVSD is 1-2 bytes smaller than a BLENDPD.
28367 if (VT == MVT::v2f64)
28368 if (auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(2)))
28369 if (Mask->getZExtValue() == 2 && !isShuffleFoldableLoad(V0)) {
28370 SDValue NewMask = DAG.getConstant(1, DL, MVT::i8);
28371 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
28376 case X86ISD::MOVSD:
28377 case X86ISD::MOVSS: {
28378 SDValue V0 = peekThroughBitcasts(N->getOperand(0));
28379 SDValue V1 = peekThroughBitcasts(N->getOperand(1));
28380 bool isZero0 = ISD::isBuildVectorAllZeros(V0.getNode());
28381 bool isZero1 = ISD::isBuildVectorAllZeros(V1.getNode());
28382 if (isZero0 && isZero1)
28385 // We often lower to MOVSD/MOVSS from integer as well as native float
28386 // types; remove unnecessary domain-crossing bitcasts if we can to make it
28387 // easier to combine shuffles later on. We've already accounted for the
28388 // domain switching cost when we decided to lower with it.
28389 bool isFloat = VT.isFloatingPoint();
28390 bool isFloat0 = V0.getSimpleValueType().isFloatingPoint();
28391 bool isFloat1 = V1.getSimpleValueType().isFloatingPoint();
28392 if ((isFloat != isFloat0 || isZero0) && (isFloat != isFloat1 || isZero1)) {
28393 MVT NewVT = isFloat ? (X86ISD::MOVSD == Opcode ? MVT::v2i64 : MVT::v4i32)
28394 : (X86ISD::MOVSD == Opcode ? MVT::v2f64 : MVT::v4f32);
28395 V0 = DAG.getBitcast(NewVT, V0);
28396 V1 = DAG.getBitcast(NewVT, V1);
28397 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, NewVT, V0, V1));
28402 case X86ISD::INSERTPS: {
28403 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
28404 SDValue Op0 = N.getOperand(0);
28405 SDValue Op1 = N.getOperand(1);
28406 SDValue Op2 = N.getOperand(2);
28407 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
28408 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
28409 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
28410 unsigned ZeroMask = InsertPSMask & 0xF;
28412 // If we zero out all elements from Op0 then we don't need to reference it.
28413 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
28414 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
28415 DAG.getConstant(InsertPSMask, DL, MVT::i8));
28417 // If we zero out the element from Op1 then we don't need to reference it.
28418 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
28419 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
28420 DAG.getConstant(InsertPSMask, DL, MVT::i8));
28422 // Attempt to merge insertps Op1 with an inner target shuffle node.
28423 SmallVector<int, 8> TargetMask1;
28424 SmallVector<SDValue, 2> Ops1;
28425 if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
28426 int M = TargetMask1[SrcIdx];
28427 if (isUndefOrZero(M)) {
28428 // Zero/UNDEF insertion - zero out element and remove dependency.
28429 InsertPSMask |= (1u << DstIdx);
28430 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
28431 DAG.getConstant(InsertPSMask, DL, MVT::i8));
28433 // Update insertps mask srcidx and reference the source input directly.
28434 assert(0 <= M && M < 8 && "Shuffle index out of range");
28435 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
28436 Op1 = Ops1[M < 4 ? 0 : 1];
28437 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
28438 DAG.getConstant(InsertPSMask, DL, MVT::i8));
28441 // Attempt to merge insertps Op0 with an inner target shuffle node.
28442 SmallVector<int, 8> TargetMask0;
28443 SmallVector<SDValue, 2> Ops0;
28444 if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
28447 bool Updated = false;
28448 bool UseInput00 = false;
28449 bool UseInput01 = false;
28450 for (int i = 0; i != 4; ++i) {
28451 int M = TargetMask0[i];
28452 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
28453 // No change if element is already zero or the inserted element.
28455 } else if (isUndefOrZero(M)) {
28456 // If the target mask is undef/zero then we must zero the element.
28457 InsertPSMask |= (1u << i);
28462 // The input vector element must be inline.
28463 if (M != i && M != (i + 4))
28466 // Determine which inputs of the target shuffle we're using.
28467 UseInput00 |= (0 <= M && M < 4);
28468 UseInput01 |= (4 <= M);
28471 // If we're not using both inputs of the target shuffle then use the
28472 // referenced input directly.
28473 if (UseInput00 && !UseInput01) {
28476 } else if (!UseInput00 && UseInput01) {
28482 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
28483 DAG.getConstant(InsertPSMask, DL, MVT::i8));
28491 // Nuke no-op shuffles that show up after combining.
28492 if (isNoopShuffleMask(Mask))
28493 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
28495 // Look for simplifications involving one or two shuffle instructions.
28496 SDValue V = N.getOperand(0);
28497 switch (N.getOpcode()) {
28500 case X86ISD::PSHUFLW:
28501 case X86ISD::PSHUFHW:
28502 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
28504 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
28505 return SDValue(); // We combined away this shuffle, so we're done.
28507 // See if this reduces to a PSHUFD which is no more expensive and can
28508 // combine with more operations. Note that it has to at least flip the
28509 // dwords as otherwise it would have been removed as a no-op.
28510 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
28511 int DMask[] = {0, 1, 2, 3};
28512 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
28513 DMask[DOffset + 0] = DOffset + 1;
28514 DMask[DOffset + 1] = DOffset + 0;
28515 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
28516 V = DAG.getBitcast(DVT, V);
28517 DCI.AddToWorklist(V.getNode());
28518 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
28519 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
28520 DCI.AddToWorklist(V.getNode());
28521 return DAG.getBitcast(VT, V);
28524 // Look for shuffle patterns which can be implemented as a single unpack.
28525 // FIXME: This doesn't handle the location of the PSHUFD generically, and
28526 // only works when we have a PSHUFD followed by two half-shuffles.
28527 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
28528 (V.getOpcode() == X86ISD::PSHUFLW ||
28529 V.getOpcode() == X86ISD::PSHUFHW) &&
28530 V.getOpcode() != N.getOpcode() &&
28532 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
28533 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
28534 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
28535 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
28536 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
28537 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
28539 for (int i = 0; i < 4; ++i) {
28540 WordMask[i + NOffset] = Mask[i] + NOffset;
28541 WordMask[i + VOffset] = VMask[i] + VOffset;
28543 // Map the word mask through the DWord mask.
28545 for (int i = 0; i < 8; ++i)
28546 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
28547 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
28548 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
28549 // We can replace all three shuffles with an unpack.
28550 V = DAG.getBitcast(VT, D.getOperand(0));
28551 DCI.AddToWorklist(V.getNode());
28552 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
28561 case X86ISD::PSHUFD:
28562 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
28571 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB
28572 /// operation. If true is returned then the operands of ADDSUB operation
28573 /// are written to the parameters \p Opnd0 and \p Opnd1.
28575 /// We combine shuffle to ADDSUB directly on the abstract vector shuffle nodes
28576 /// so it is easier to generically match. We also insert dummy vector shuffle
28577 /// nodes for the operands which explicitly discard the lanes which are unused
28578 /// by this operation to try to flow through the rest of the combiner
28579 /// the fact that they're unused.
28580 static bool isAddSub(SDNode *N, const X86Subtarget &Subtarget,
28581 SDValue &Opnd0, SDValue &Opnd1) {
28583 EVT VT = N->getValueType(0);
28584 if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
28585 (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)) &&
28586 (!Subtarget.hasAVX512() || (VT != MVT::v16f32 && VT != MVT::v8f64)))
28589 // We only handle target-independent shuffles.
28590 // FIXME: It would be easy and harmless to use the target shuffle mask
28591 // extraction tool to support more.
28592 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
28595 ArrayRef<int> OrigMask = cast<ShuffleVectorSDNode>(N)->getMask();
28596 SmallVector<int, 16> Mask(OrigMask.begin(), OrigMask.end());
28598 SDValue V1 = N->getOperand(0);
28599 SDValue V2 = N->getOperand(1);
28601 // We require the first shuffle operand to be the FSUB node, and the second to
28602 // be the FADD node.
28603 if (V1.getOpcode() == ISD::FADD && V2.getOpcode() == ISD::FSUB) {
28604 ShuffleVectorSDNode::commuteMask(Mask);
28606 } else if (V1.getOpcode() != ISD::FSUB || V2.getOpcode() != ISD::FADD)
28609 // If there are other uses of these operations we can't fold them.
28610 if (!V1->hasOneUse() || !V2->hasOneUse())
28613 // Ensure that both operations have the same operands. Note that we can
28614 // commute the FADD operands.
28615 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
28616 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
28617 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
28620 // We're looking for blends between FADD and FSUB nodes. We insist on these
28621 // nodes being lined up in a specific expected pattern.
28622 if (!(isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
28623 isShuffleEquivalent(V1, V2, Mask, {0, 5, 2, 7}) ||
28624 isShuffleEquivalent(V1, V2, Mask, {0, 9, 2, 11, 4, 13, 6, 15}) ||
28625 isShuffleEquivalent(V1, V2, Mask, {0, 17, 2, 19, 4, 21, 6, 23,
28626 8, 25, 10, 27, 12, 29, 14, 31})))
28634 /// \brief Try to combine a shuffle into a target-specific add-sub or
28635 /// mul-add-sub node.
28636 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
28637 const X86Subtarget &Subtarget,
28638 SelectionDAG &DAG) {
28639 SDValue Opnd0, Opnd1;
28640 if (!isAddSub(N, Subtarget, Opnd0, Opnd1))
28643 EVT VT = N->getValueType(0);
28646 // Try to generate X86ISD::FMADDSUB node here.
28648 if (isFMAddSub(Subtarget, DAG, Opnd0, Opnd1, Opnd2))
28649 return DAG.getNode(X86ISD::FMADDSUB, DL, VT, Opnd0, Opnd1, Opnd2);
28651 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
28652 // the ADDSUB idiom has been successfully recognized. There are no known
28653 // X86 targets with 512-bit ADDSUB instructions!
28654 if (VT.is512BitVector())
28657 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
28660 // We are looking for a shuffle where both sources are concatenated with undef
28661 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
28662 // if we can express this as a single-source shuffle, that's preferable.
28663 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
28664 const X86Subtarget &Subtarget) {
28665 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
28668 EVT VT = N->getValueType(0);
28670 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
28671 if (!VT.is128BitVector() && !VT.is256BitVector())
28674 if (VT.getVectorElementType() != MVT::i32 &&
28675 VT.getVectorElementType() != MVT::i64 &&
28676 VT.getVectorElementType() != MVT::f32 &&
28677 VT.getVectorElementType() != MVT::f64)
28680 SDValue N0 = N->getOperand(0);
28681 SDValue N1 = N->getOperand(1);
28683 // Check that both sources are concats with undef.
28684 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
28685 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
28686 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
28687 !N1.getOperand(1).isUndef())
28690 // Construct the new shuffle mask. Elements from the first source retain their
28691 // index, but elements from the second source no longer need to skip an undef.
28692 SmallVector<int, 8> Mask;
28693 int NumElts = VT.getVectorNumElements();
28695 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
28696 for (int Elt : SVOp->getMask())
28697 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
28700 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
28702 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
28705 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
28706 TargetLowering::DAGCombinerInfo &DCI,
28707 const X86Subtarget &Subtarget) {
28709 EVT VT = N->getValueType(0);
28710 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28711 // If we have legalized the vector types, look for blends of FADD and FSUB
28712 // nodes that we can fuse into an ADDSUB node.
28713 if (TLI.isTypeLegal(VT))
28714 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
28717 // During Type Legalization, when promoting illegal vector types,
28718 // the backend might introduce new shuffle dag nodes and bitcasts.
28720 // This code performs the following transformation:
28721 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
28722 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
28724 // We do this only if both the bitcast and the BINOP dag nodes have
28725 // one use. Also, perform this transformation only if the new binary
28726 // operation is legal. This is to avoid introducing dag nodes that
28727 // potentially need to be further expanded (or custom lowered) into a
28728 // less optimal sequence of dag nodes.
28729 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
28730 N->getOpcode() == ISD::VECTOR_SHUFFLE &&
28731 N->getOperand(0).getOpcode() == ISD::BITCAST &&
28732 N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
28733 SDValue N0 = N->getOperand(0);
28734 SDValue N1 = N->getOperand(1);
28736 SDValue BC0 = N0.getOperand(0);
28737 EVT SVT = BC0.getValueType();
28738 unsigned Opcode = BC0.getOpcode();
28739 unsigned NumElts = VT.getVectorNumElements();
28741 if (BC0.hasOneUse() && SVT.isVector() &&
28742 SVT.getVectorNumElements() * 2 == NumElts &&
28743 TLI.isOperationLegal(Opcode, VT)) {
28744 bool CanFold = false;
28750 // isOperationLegal lies for integer ops on floating point types.
28751 CanFold = VT.isInteger();
28756 // isOperationLegal lies for floating point ops on integer types.
28757 CanFold = VT.isFloatingPoint();
28761 unsigned SVTNumElts = SVT.getVectorNumElements();
28762 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
28763 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
28764 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
28765 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
28766 CanFold = SVOp->getMaskElt(i) < 0;
28769 SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
28770 SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
28771 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
28772 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
28777 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
28778 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
28779 // consecutive, non-overlapping, and in the right order.
28780 SmallVector<SDValue, 16> Elts;
28781 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
28782 if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
28783 Elts.push_back(Elt);
28790 if (Elts.size() == VT.getVectorNumElements())
28791 if (SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true))
28794 // For AVX2, we sometimes want to combine
28795 // (vector_shuffle <mask> (concat_vectors t1, undef)
28796 // (concat_vectors t2, undef))
28798 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
28799 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
28800 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
28803 if (isTargetShuffle(N->getOpcode())) {
28805 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
28808 // Try recursively combining arbitrary sequences of x86 shuffle
28809 // instructions into higher-order shuffles. We do this after combining
28810 // specific PSHUF instruction sequences into their minimal form so that we
28811 // can evaluate how many specialized shuffle instructions are involved in
28812 // a particular chain.
28813 SmallVector<int, 1> NonceMask; // Just a placeholder.
28814 NonceMask.push_back(0);
28815 if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, {},
28816 /*Depth*/ 1, /*HasVarMask*/ false, DAG,
28818 return SDValue(); // This routine will use CombineTo to replace N.
28824 /// Check if a vector extract from a target-specific shuffle of a load can be
28825 /// folded into a single element load.
28826 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
28827 /// shuffles have been custom lowered so we need to handle those here.
28828 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
28829 TargetLowering::DAGCombinerInfo &DCI) {
28830 if (DCI.isBeforeLegalizeOps())
28833 SDValue InVec = N->getOperand(0);
28834 SDValue EltNo = N->getOperand(1);
28835 EVT EltVT = N->getValueType(0);
28837 if (!isa<ConstantSDNode>(EltNo))
28840 EVT OriginalVT = InVec.getValueType();
28842 // Peek through bitcasts, don't duplicate a load with other uses.
28843 InVec = peekThroughOneUseBitcasts(InVec);
28845 EVT CurrentVT = InVec.getValueType();
28846 if (!CurrentVT.isVector() ||
28847 CurrentVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
28850 if (!isTargetShuffle(InVec.getOpcode()))
28853 // Don't duplicate a load with other uses.
28854 if (!InVec.hasOneUse())
28857 SmallVector<int, 16> ShuffleMask;
28858 SmallVector<SDValue, 2> ShuffleOps;
28860 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
28861 ShuffleOps, ShuffleMask, UnaryShuffle))
28864 // Select the input vector, guarding against out of range extract vector.
28865 unsigned NumElems = CurrentVT.getVectorNumElements();
28866 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
28867 int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt];
28869 if (Idx == SM_SentinelZero)
28870 return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
28871 : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
28872 if (Idx == SM_SentinelUndef)
28873 return DAG.getUNDEF(EltVT);
28875 assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
28876 SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0]
28879 // If inputs to shuffle are the same for both ops, then allow 2 uses
28880 unsigned AllowedUses =
28881 (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
28883 if (LdNode.getOpcode() == ISD::BITCAST) {
28884 // Don't duplicate a load with other uses.
28885 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
28888 AllowedUses = 1; // only allow 1 load use if we have a bitcast
28889 LdNode = LdNode.getOperand(0);
28892 if (!ISD::isNormalLoad(LdNode.getNode()))
28895 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
28897 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
28900 // If there's a bitcast before the shuffle, check if the load type and
28901 // alignment is valid.
28902 unsigned Align = LN0->getAlignment();
28903 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28904 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
28905 EltVT.getTypeForEVT(*DAG.getContext()));
28907 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
28910 // All checks match so transform back to vector_shuffle so that DAG combiner
28911 // can finish the job
28914 // Create shuffle node taking into account the case that its a unary shuffle
28915 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
28916 Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
28918 Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
28919 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
28923 // Try to match patterns such as
28924 // (i16 bitcast (v16i1 x))
28926 // (i16 movmsk (16i8 sext (v16i1 x)))
28927 // before the illegal vector is scalarized on subtargets that don't have legal
28929 static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
28930 const X86Subtarget &Subtarget) {
28931 EVT VT = BitCast.getValueType();
28932 SDValue N0 = BitCast.getOperand(0);
28933 EVT VecVT = N0->getValueType(0);
28935 if (!VT.isScalarInteger() || !VecVT.isSimple())
28938 // With AVX512 vxi1 types are legal and we prefer using k-regs.
28939 // MOVMSK is supported in SSE2 or later.
28940 if (Subtarget.hasAVX512() || !Subtarget.hasSSE2())
28943 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
28944 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
28945 // v8i16 and v16i16.
28946 // For these two cases, we can shuffle the upper element bytes to a
28947 // consecutive sequence at the start of the vector and treat the results as
28948 // v16i8 or v32i8, and for v61i8 this is the prefferable solution. However,
28949 // for v16i16 this is not the case, because the shuffle is expensive, so we
28950 // avoid sign-exteding to this type entirely.
28951 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
28952 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
28954 MVT FPCastVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
28955 switch (VecVT.getSimpleVT().SimpleTy) {
28959 SExtVT = MVT::v2i64;
28960 FPCastVT = MVT::v2f64;
28963 SExtVT = MVT::v4i32;
28964 FPCastVT = MVT::v4f32;
28965 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
28966 // sign-extend to a 256-bit operation to avoid truncation.
28967 if (N0->getOpcode() == ISD::SETCC &&
28968 N0->getOperand(0)->getValueType(0).is256BitVector() &&
28969 Subtarget.hasInt256()) {
28970 SExtVT = MVT::v4i64;
28971 FPCastVT = MVT::v4f64;
28975 SExtVT = MVT::v8i16;
28976 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
28977 // sign-extend to a 256-bit operation to match the compare.
28978 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
28979 // 256-bit because the shuffle is cheaper than sign extending the result of
28981 if (N0->getOpcode() == ISD::SETCC &&
28982 N0->getOperand(0)->getValueType(0).is256BitVector() &&
28983 Subtarget.hasInt256()) {
28984 SExtVT = MVT::v8i32;
28985 FPCastVT = MVT::v8f32;
28989 SExtVT = MVT::v16i8;
28990 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
28991 // it is not profitable to sign-extend to 256-bit because this will
28992 // require an extra cross-lane shuffle which is more exprensive than
28993 // truncating the result of the compare to 128-bits.
28996 // TODO: Handle pre-AVX2 cases by splitting to two v16i1's.
28997 if (!Subtarget.hasInt256())
28999 SExtVT = MVT::v32i8;
29004 SDValue V = DAG.getSExtOrTrunc(N0, DL, SExtVT);
29005 if (SExtVT == MVT::v8i16) {
29006 V = DAG.getBitcast(MVT::v16i8, V);
29007 V = DAG.getVectorShuffle(
29008 MVT::v16i8, DL, V, DAG.getUNDEF(MVT::v16i8),
29009 {0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1});
29011 assert(SExtVT.getScalarType() != MVT::i16 &&
29012 "Vectors of i16 must be shuffled");
29013 if (FPCastVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
29014 V = DAG.getBitcast(FPCastVT, V);
29015 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
29016 return DAG.getZExtOrTrunc(V, DL, VT);
29019 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
29020 TargetLowering::DAGCombinerInfo &DCI,
29021 const X86Subtarget &Subtarget) {
29022 SDValue N0 = N->getOperand(0);
29023 EVT VT = N->getValueType(0);
29024 EVT SrcVT = N0.getValueType();
29026 // Try to match patterns such as
29027 // (i16 bitcast (v16i1 x))
29029 // (i16 movmsk (16i8 sext (v16i1 x)))
29030 // before the setcc result is scalarized on subtargets that don't have legal
29032 if (DCI.isBeforeLegalize())
29033 if (SDValue V = combineBitcastvxi1(DAG, SDValue(N, 0), Subtarget))
29035 // Since MMX types are special and don't usually play with other vector types,
29036 // it's better to handle them early to be sure we emit efficient code by
29037 // avoiding store-load conversions.
29039 // Detect bitcasts between i32 to x86mmx low word.
29040 if (VT == MVT::x86mmx && N0.getOpcode() == ISD::BUILD_VECTOR &&
29041 SrcVT == MVT::v2i32 && isNullConstant(N0.getOperand(1))) {
29042 SDValue N00 = N0->getOperand(0);
29043 if (N00.getValueType() == MVT::i32)
29044 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(N00), VT, N00);
29047 // Detect bitcasts between element or subvector extraction to x86mmx.
29048 if (VT == MVT::x86mmx &&
29049 (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
29050 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
29051 isNullConstant(N0.getOperand(1))) {
29052 SDValue N00 = N0->getOperand(0);
29053 if (N00.getValueType().is128BitVector())
29054 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
29055 DAG.getBitcast(MVT::v2i64, N00));
29058 // Detect bitcasts from FP_TO_SINT to x86mmx.
29059 if (VT == MVT::x86mmx && SrcVT == MVT::v2i32 &&
29060 N0.getOpcode() == ISD::FP_TO_SINT) {
29062 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
29063 DAG.getUNDEF(MVT::v2i32));
29064 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
29065 DAG.getBitcast(MVT::v2i64, Res));
29068 // Convert a bitcasted integer logic operation that has one bitcasted
29069 // floating-point operand into a floating-point logic operation. This may
29070 // create a load of a constant, but that is cheaper than materializing the
29071 // constant in an integer register and transferring it to an SSE register or
29072 // transferring the SSE operand to integer register and back.
29074 switch (N0.getOpcode()) {
29075 case ISD::AND: FPOpcode = X86ISD::FAND; break;
29076 case ISD::OR: FPOpcode = X86ISD::FOR; break;
29077 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
29078 default: return SDValue();
29081 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
29082 (Subtarget.hasSSE2() && VT == MVT::f64)))
29085 SDValue LogicOp0 = N0.getOperand(0);
29086 SDValue LogicOp1 = N0.getOperand(1);
29089 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
29090 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
29091 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
29092 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
29093 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
29094 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
29096 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
29097 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
29098 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
29099 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
29100 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
29101 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
29107 // Match a binop + shuffle pyramid that represents a horizontal reduction over
29108 // the elements of a vector.
29109 // Returns the vector that is being reduced on, or SDValue() if a reduction
29110 // was not matched.
29111 static SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType BinOp) {
29112 // The pattern must end in an extract from index 0.
29113 if ((Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) ||
29114 !isNullConstant(Extract->getOperand(1)))
29118 Log2_32(Extract->getOperand(0).getValueType().getVectorNumElements());
29120 SDValue Op = Extract->getOperand(0);
29121 // At each stage, we're looking for something that looks like:
29122 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
29123 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
29124 // i32 undef, i32 undef, i32 undef, i32 undef>
29125 // %a = binop <8 x i32> %op, %s
29126 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
29127 // we expect something like:
29128 // <4,5,6,7,u,u,u,u>
29129 // <2,3,u,u,u,u,u,u>
29130 // <1,u,u,u,u,u,u,u>
29131 for (unsigned i = 0; i < Stages; ++i) {
29132 if (Op.getOpcode() != BinOp)
29135 ShuffleVectorSDNode *Shuffle =
29136 dyn_cast<ShuffleVectorSDNode>(Op.getOperand(0).getNode());
29138 Op = Op.getOperand(1);
29140 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op.getOperand(1).getNode());
29141 Op = Op.getOperand(0);
29144 // The first operand of the shuffle should be the same as the other operand
29146 if (!Shuffle || (Shuffle->getOperand(0) != Op))
29149 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
29150 for (int Index = 0, MaskEnd = 1 << i; Index < MaskEnd; ++Index)
29151 if (Shuffle->getMaskElt(Index) != MaskEnd + Index)
29158 // Given a select, detect the following pattern:
29159 // 1: %2 = zext <N x i8> %0 to <N x i32>
29160 // 2: %3 = zext <N x i8> %1 to <N x i32>
29161 // 3: %4 = sub nsw <N x i32> %2, %3
29162 // 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N]
29163 // 5: %6 = sub nsw <N x i32> zeroinitializer, %4
29164 // 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6
29165 // This is useful as it is the input into a SAD pattern.
29166 static bool detectZextAbsDiff(const SDValue &Select, SDValue &Op0,
29168 // Check the condition of the select instruction is greater-than.
29169 SDValue SetCC = Select->getOperand(0);
29170 if (SetCC.getOpcode() != ISD::SETCC)
29172 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
29173 if (CC != ISD::SETGT && CC != ISD::SETLT)
29176 SDValue SelectOp1 = Select->getOperand(1);
29177 SDValue SelectOp2 = Select->getOperand(2);
29179 // The following instructions assume SelectOp1 is the subtraction operand
29180 // and SelectOp2 is the negation operand.
29181 // In the case of SETLT this is the other way around.
29182 if (CC == ISD::SETLT)
29183 std::swap(SelectOp1, SelectOp2);
29185 // The second operand of the select should be the negation of the first
29186 // operand, which is implemented as 0 - SelectOp1.
29187 if (!(SelectOp2.getOpcode() == ISD::SUB &&
29188 ISD::isBuildVectorAllZeros(SelectOp2.getOperand(0).getNode()) &&
29189 SelectOp2.getOperand(1) == SelectOp1))
29192 // The first operand of SetCC is the first operand of the select, which is the
29193 // difference between the two input vectors.
29194 if (SetCC.getOperand(0) != SelectOp1)
29197 // In SetLT case, The second operand of the comparison can be either 1 or 0.
29199 if ((CC == ISD::SETLT) &&
29200 !((ISD::isConstantSplatVector(SetCC.getOperand(1).getNode(), SplatVal) &&
29202 (ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()))))
29205 // In SetGT case, The second operand of the comparison can be either -1 or 0.
29206 if ((CC == ISD::SETGT) &&
29207 !(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) ||
29208 ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode())))
29211 // The first operand of the select is the difference between the two input
29213 if (SelectOp1.getOpcode() != ISD::SUB)
29216 Op0 = SelectOp1.getOperand(0);
29217 Op1 = SelectOp1.getOperand(1);
29219 // Check if the operands of the sub are zero-extended from vectors of i8.
29220 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
29221 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
29222 Op1.getOpcode() != ISD::ZERO_EXTEND ||
29223 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
29229 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
29231 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
29232 const SDValue &Zext1, const SDLoc &DL) {
29234 // Find the appropriate width for the PSADBW.
29235 EVT InVT = Zext0.getOperand(0).getValueType();
29236 unsigned RegSize = std::max(128u, InVT.getSizeInBits());
29238 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
29239 // fill in the missing vector elements with 0.
29240 unsigned NumConcat = RegSize / InVT.getSizeInBits();
29241 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
29242 Ops[0] = Zext0.getOperand(0);
29243 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
29244 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
29245 Ops[0] = Zext1.getOperand(0);
29246 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
29248 // Actually build the SAD
29249 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
29250 return DAG.getNode(X86ISD::PSADBW, DL, SadVT, SadOp0, SadOp1);
29253 // Attempt to replace an all_of/any_of style horizontal reduction with a MOVMSK.
29254 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
29256 const X86Subtarget &Subtarget) {
29257 // Bail without SSE2 or with AVX512VL (which uses predicate registers).
29258 if (!Subtarget.hasSSE2() || Subtarget.hasVLX())
29261 EVT ExtractVT = Extract->getValueType(0);
29262 unsigned BitWidth = ExtractVT.getSizeInBits();
29263 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
29264 ExtractVT != MVT::i8)
29267 // Check for OR(any_of) and AND(all_of) horizontal reduction patterns.
29268 for (ISD::NodeType Op : {ISD::OR, ISD::AND}) {
29269 SDValue Match = matchBinOpReduction(Extract, Op);
29273 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
29274 // which we can't support here for now.
29275 if (Match.getScalarValueSizeInBits() != BitWidth)
29278 // We require AVX2 for PMOVMSKB for v16i16/v32i8;
29279 unsigned MatchSizeInBits = Match.getValueSizeInBits();
29280 if (!(MatchSizeInBits == 128 ||
29281 (MatchSizeInBits == 256 &&
29282 ((Subtarget.hasAVX() && BitWidth >= 32) || Subtarget.hasAVX2()))))
29285 // Don't bother performing this for 2-element vectors.
29286 if (Match.getValueType().getVectorNumElements() <= 2)
29289 // Check that we are extracting a reduction of all sign bits.
29290 if (DAG.ComputeNumSignBits(Match) != BitWidth)
29293 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
29295 if (64 == BitWidth || 32 == BitWidth)
29296 MaskVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
29297 MatchSizeInBits / BitWidth);
29299 MaskVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
29302 ISD::CondCode CondCode;
29303 if (Op == ISD::OR) {
29304 // any_of -> MOVMSK != 0
29305 CompareBits = APInt::getNullValue(32);
29306 CondCode = ISD::CondCode::SETNE;
29308 // all_of -> MOVMSK == ((1 << NumElts) - 1)
29309 CompareBits = APInt::getLowBitsSet(32, MaskVT.getVectorNumElements());
29310 CondCode = ISD::CondCode::SETEQ;
29313 // Perform the select as i32/i64 and then truncate to avoid partial register
29315 unsigned ResWidth = std::max(BitWidth, 32u);
29316 EVT ResVT = EVT::getIntegerVT(*DAG.getContext(), ResWidth);
29318 SDValue Zero = DAG.getConstant(0, DL, ResVT);
29319 SDValue Ones = DAG.getAllOnesConstant(DL, ResVT);
29320 SDValue Res = DAG.getBitcast(MaskVT, Match);
29321 Res = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Res);
29322 Res = DAG.getSelectCC(DL, Res, DAG.getConstant(CompareBits, DL, MVT::i32),
29323 Ones, Zero, CondCode);
29324 return DAG.getSExtOrTrunc(Res, DL, ExtractVT);
29330 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
29331 const X86Subtarget &Subtarget) {
29332 // PSADBW is only supported on SSE2 and up.
29333 if (!Subtarget.hasSSE2())
29336 // Verify the type we're extracting from is any integer type above i16.
29337 EVT VT = Extract->getOperand(0).getValueType();
29338 if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
29341 unsigned RegSize = 128;
29342 if (Subtarget.hasBWI())
29344 else if (Subtarget.hasAVX2())
29347 // We handle upto v16i* for SSE2 / v32i* for AVX2 / v64i* for AVX512.
29348 // TODO: We should be able to handle larger vectors by splitting them before
29349 // feeding them into several SADs, and then reducing over those.
29350 if (RegSize / VT.getVectorNumElements() < 8)
29353 // Match shuffle + add pyramid.
29354 SDValue Root = matchBinOpReduction(Extract, ISD::ADD);
29356 // The operand is expected to be zero extended from i8
29357 // (verified in detectZextAbsDiff).
29358 // In order to convert to i64 and above, additional any/zero/sign
29359 // extend is expected.
29360 // The zero extend from 32 bit has no mathematical effect on the result.
29361 // Also the sign extend is basically zero extend
29362 // (extends the sign bit which is zero).
29363 // So it is correct to skip the sign/zero extend instruction.
29364 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
29365 Root.getOpcode() == ISD::ZERO_EXTEND ||
29366 Root.getOpcode() == ISD::ANY_EXTEND))
29367 Root = Root.getOperand(0);
29369 // If there was a match, we want Root to be a select that is the root of an
29370 // abs-diff pattern.
29371 if (!Root || (Root.getOpcode() != ISD::VSELECT))
29374 // Check whether we have an abs-diff pattern feeding into the select.
29375 SDValue Zext0, Zext1;
29376 if (!detectZextAbsDiff(Root, Zext0, Zext1))
29379 // Create the SAD instruction.
29381 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL);
29383 // If the original vector was wider than 8 elements, sum over the results
29384 // in the SAD vector.
29385 unsigned Stages = Log2_32(VT.getVectorNumElements());
29386 MVT SadVT = SAD.getSimpleValueType();
29388 unsigned SadElems = SadVT.getVectorNumElements();
29390 for(unsigned i = Stages - 3; i > 0; --i) {
29391 SmallVector<int, 16> Mask(SadElems, -1);
29392 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
29393 Mask[j] = MaskEnd + j;
29396 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
29397 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
29401 MVT Type = Extract->getSimpleValueType(0);
29402 unsigned TypeSizeInBits = Type.getSizeInBits();
29403 // Return the lowest TypeSizeInBits bits.
29404 MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
29405 SAD = DAG.getNode(ISD::BITCAST, DL, ResVT, SAD);
29406 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
29407 Extract->getOperand(1));
29410 // Attempt to peek through a target shuffle and extract the scalar from the
29412 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
29413 TargetLowering::DAGCombinerInfo &DCI,
29414 const X86Subtarget &Subtarget) {
29415 if (DCI.isBeforeLegalizeOps())
29418 SDValue Src = N->getOperand(0);
29419 SDValue Idx = N->getOperand(1);
29421 EVT VT = N->getValueType(0);
29422 EVT SrcVT = Src.getValueType();
29423 EVT SrcSVT = SrcVT.getVectorElementType();
29424 unsigned NumSrcElts = SrcVT.getVectorNumElements();
29426 // Don't attempt this for boolean mask vectors or unknown extraction indices.
29427 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
29430 // Resolve the target shuffle inputs and mask.
29431 SmallVector<int, 16> Mask;
29432 SmallVector<SDValue, 2> Ops;
29433 if (!resolveTargetShuffleInputs(peekThroughBitcasts(Src), Ops, Mask))
29436 // Attempt to narrow/widen the shuffle mask to the correct size.
29437 if (Mask.size() != NumSrcElts) {
29438 if ((NumSrcElts % Mask.size()) == 0) {
29439 SmallVector<int, 16> ScaledMask;
29440 int Scale = NumSrcElts / Mask.size();
29441 scaleShuffleMask(Scale, Mask, ScaledMask);
29442 Mask = std::move(ScaledMask);
29443 } else if ((Mask.size() % NumSrcElts) == 0) {
29444 SmallVector<int, 16> WidenedMask;
29445 while (Mask.size() > NumSrcElts &&
29446 canWidenShuffleElements(Mask, WidenedMask))
29447 Mask = std::move(WidenedMask);
29448 // TODO - investigate support for wider shuffle masks with known upper
29449 // undef/zero elements for implicit zero-extension.
29453 // Check if narrowing/widening failed.
29454 if (Mask.size() != NumSrcElts)
29457 int SrcIdx = Mask[N->getConstantOperandVal(1)];
29460 // If the shuffle source element is undef/zero then we can just accept it.
29461 if (SrcIdx == SM_SentinelUndef)
29462 return DAG.getUNDEF(VT);
29464 if (SrcIdx == SM_SentinelZero)
29465 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
29466 : DAG.getConstant(0, dl, VT);
29468 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
29469 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
29470 SrcIdx = SrcIdx % Mask.size();
29472 // We can only extract other elements from 128-bit vectors and in certain
29473 // circumstances, depending on SSE-level.
29474 // TODO: Investigate using extract_subvector for larger vectors.
29475 // TODO: Investigate float/double extraction if it will be just stored.
29476 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
29477 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
29478 assert(SrcSVT == VT && "Unexpected extraction type");
29479 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
29480 DAG.getIntPtrConstant(SrcIdx, dl));
29483 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
29484 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
29485 assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
29486 "Unexpected extraction type");
29487 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
29488 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
29489 DAG.getIntPtrConstant(SrcIdx, dl));
29490 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, ExtOp,
29491 DAG.getValueType(SrcSVT));
29492 return DAG.getZExtOrTrunc(Assert, dl, VT);
29498 /// Detect vector gather/scatter index generation and convert it from being a
29499 /// bunch of shuffles and extracts into a somewhat faster sequence.
29500 /// For i686, the best sequence is apparently storing the value and loading
29501 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
29502 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
29503 TargetLowering::DAGCombinerInfo &DCI,
29504 const X86Subtarget &Subtarget) {
29505 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
29508 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
29511 SDValue InputVector = N->getOperand(0);
29512 SDValue EltIdx = N->getOperand(1);
29514 EVT SrcVT = InputVector.getValueType();
29515 EVT VT = N->getValueType(0);
29516 SDLoc dl(InputVector);
29518 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
29519 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
29520 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
29521 SDValue MMXSrc = InputVector.getOperand(0);
29523 // The bitcast source is a direct mmx result.
29524 if (MMXSrc.getValueType() == MVT::x86mmx)
29525 return DAG.getBitcast(VT, InputVector);
29528 // Detect mmx to i32 conversion through a v2i32 elt extract.
29529 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
29530 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
29531 SDValue MMXSrc = InputVector.getOperand(0);
29533 // The bitcast source is a direct mmx result.
29534 if (MMXSrc.getValueType() == MVT::x86mmx)
29535 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
29538 if (VT == MVT::i1 && InputVector.getOpcode() == ISD::BITCAST &&
29539 isa<ConstantSDNode>(EltIdx) &&
29540 isa<ConstantSDNode>(InputVector.getOperand(0))) {
29541 uint64_t ExtractedElt = N->getConstantOperandVal(1);
29542 uint64_t InputValue = InputVector.getConstantOperandVal(0);
29543 uint64_t Res = (InputValue >> ExtractedElt) & 1;
29544 return DAG.getConstant(Res, dl, MVT::i1);
29547 // Check whether this extract is the root of a sum of absolute differences
29548 // pattern. This has to be done here because we really want it to happen
29549 // pre-legalization,
29550 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
29553 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
29554 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
29557 // Only operate on vectors of 4 elements, where the alternative shuffling
29558 // gets to be more expensive.
29559 if (SrcVT != MVT::v4i32)
29562 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
29563 // single use which is a sign-extend or zero-extend, and all elements are
29565 SmallVector<SDNode *, 4> Uses;
29566 unsigned ExtractedElements = 0;
29567 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
29568 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
29569 if (UI.getUse().getResNo() != InputVector.getResNo())
29572 SDNode *Extract = *UI;
29573 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
29576 if (Extract->getValueType(0) != MVT::i32)
29578 if (!Extract->hasOneUse())
29580 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
29581 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
29583 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
29586 // Record which element was extracted.
29587 ExtractedElements |= 1 << Extract->getConstantOperandVal(1);
29588 Uses.push_back(Extract);
29591 // If not all the elements were used, this may not be worthwhile.
29592 if (ExtractedElements != 15)
29595 // Ok, we've now decided to do the transformation.
29596 // If 64-bit shifts are legal, use the extract-shift sequence,
29597 // otherwise bounce the vector off the cache.
29598 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29601 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
29602 SDValue Cst = DAG.getBitcast(MVT::v2i64, InputVector);
29603 auto &DL = DAG.getDataLayout();
29604 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy(DL);
29605 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
29606 DAG.getConstant(0, dl, VecIdxTy));
29607 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
29608 DAG.getConstant(1, dl, VecIdxTy));
29610 SDValue ShAmt = DAG.getConstant(
29611 32, dl, DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64, DL));
29612 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
29613 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
29614 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
29615 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
29616 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
29617 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
29619 // Store the value to a temporary stack slot.
29620 SDValue StackPtr = DAG.CreateStackTemporary(SrcVT);
29621 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
29622 MachinePointerInfo());
29624 EVT ElementType = SrcVT.getVectorElementType();
29625 unsigned EltSize = ElementType.getSizeInBits() / 8;
29627 // Replace each use (extract) with a load of the appropriate element.
29628 for (unsigned i = 0; i < 4; ++i) {
29629 uint64_t Offset = EltSize * i;
29630 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
29631 SDValue OffsetVal = DAG.getConstant(Offset, dl, PtrVT);
29633 SDValue ScalarAddr =
29634 DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, OffsetVal);
29636 // Load the scalar.
29638 DAG.getLoad(ElementType, dl, Ch, ScalarAddr, MachinePointerInfo());
29642 // Replace the extracts
29643 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
29644 UE = Uses.end(); UI != UE; ++UI) {
29645 SDNode *Extract = *UI;
29647 uint64_t IdxVal = Extract->getConstantOperandVal(1);
29648 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
29651 // The replacement was made in place; don't return anything.
29655 // TODO - merge with combineExtractVectorElt once it can handle the implicit
29656 // zero-extension of X86ISD::PINSRW/X86ISD::PINSRB in:
29657 // XFormVExtractWithShuffleIntoLoad, combineHorizontalPredicateResult and
29658 // combineBasicSADPattern.
29659 static SDValue combineExtractVectorElt_SSE(SDNode *N, SelectionDAG &DAG,
29660 TargetLowering::DAGCombinerInfo &DCI,
29661 const X86Subtarget &Subtarget) {
29662 return combineExtractWithShuffle(N, DAG, DCI, Subtarget);
29665 /// If a vector select has an operand that is -1 or 0, try to simplify the
29666 /// select to a bitwise logic operation.
29668 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
29669 TargetLowering::DAGCombinerInfo &DCI,
29670 const X86Subtarget &Subtarget) {
29671 SDValue Cond = N->getOperand(0);
29672 SDValue LHS = N->getOperand(1);
29673 SDValue RHS = N->getOperand(2);
29674 EVT VT = LHS.getValueType();
29675 EVT CondVT = Cond.getValueType();
29677 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29679 if (N->getOpcode() != ISD::VSELECT)
29682 assert(CondVT.isVector() && "Vector select expects a vector selector!");
29684 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
29685 // Check if the first operand is all zeros and Cond type is vXi1.
29686 // This situation only applies to avx512.
29687 if (FValIsAllZeros && Subtarget.hasAVX512() && Cond.hasOneUse() &&
29688 CondVT.getVectorElementType() == MVT::i1) {
29689 // Invert the cond to not(cond) : xor(op,allones)=not(op)
29690 SDValue CondNew = DAG.getNode(ISD::XOR, DL, CondVT, Cond,
29691 DAG.getAllOnesConstant(DL, CondVT));
29692 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
29693 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
29696 // To use the condition operand as a bitwise mask, it must have elements that
29697 // are the same size as the select elements. Ie, the condition operand must
29698 // have already been promoted from the IR select condition type <N x i1>.
29699 // Don't check if the types themselves are equal because that excludes
29700 // vector floating-point selects.
29701 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
29704 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
29705 FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
29707 // Try to invert the condition if true value is not all 1s and false value is
29709 if (!TValIsAllOnes && !FValIsAllZeros &&
29710 // Check if the selector will be produced by CMPP*/PCMP*.
29711 Cond.getOpcode() == ISD::SETCC &&
29712 // Check if SETCC has already been promoted.
29713 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
29715 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
29716 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
29718 if (TValIsAllZeros || FValIsAllOnes) {
29719 SDValue CC = Cond.getOperand(2);
29720 ISD::CondCode NewCC =
29721 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
29722 Cond.getOperand(0).getValueType().isInteger());
29723 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
29725 std::swap(LHS, RHS);
29726 TValIsAllOnes = FValIsAllOnes;
29727 FValIsAllZeros = TValIsAllZeros;
29731 // vselect Cond, 111..., 000... -> Cond
29732 if (TValIsAllOnes && FValIsAllZeros)
29733 return DAG.getBitcast(VT, Cond);
29735 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
29738 // vselect Cond, 111..., X -> or Cond, X
29739 if (TValIsAllOnes) {
29740 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
29741 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
29742 return DAG.getBitcast(VT, Or);
29745 // vselect Cond, X, 000... -> and Cond, X
29746 if (FValIsAllZeros) {
29747 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
29748 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
29749 return DAG.getBitcast(VT, And);
29755 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
29756 SDValue Cond = N->getOperand(0);
29757 SDValue LHS = N->getOperand(1);
29758 SDValue RHS = N->getOperand(2);
29761 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
29762 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
29763 if (!TrueC || !FalseC)
29766 // Don't do this for crazy integer types.
29767 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType()))
29770 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
29771 // so that TrueC (the true value) is larger than FalseC.
29772 bool NeedsCondInvert = false;
29773 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
29774 // Efficiently invertible.
29775 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
29776 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
29777 isa<ConstantSDNode>(Cond.getOperand(1))))) {
29778 NeedsCondInvert = true;
29779 std::swap(TrueC, FalseC);
29782 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
29783 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
29784 if (NeedsCondInvert) // Invert the condition if needed.
29785 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
29786 DAG.getConstant(1, DL, Cond.getValueType()));
29788 // Zero extend the condition if needed.
29789 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
29791 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
29792 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
29793 DAG.getConstant(ShAmt, DL, MVT::i8));
29796 // Optimize cases that will turn into an LEA instruction. This requires
29797 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
29798 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
29799 uint64_t Diff = TrueC->getZExtValue() - FalseC->getZExtValue();
29800 if (N->getValueType(0) == MVT::i32)
29801 Diff = (unsigned)Diff;
29803 bool isFastMultiplier = false;
29805 switch ((unsigned char)Diff) {
29808 case 1: // result = add base, cond
29809 case 2: // result = lea base( , cond*2)
29810 case 3: // result = lea base(cond, cond*2)
29811 case 4: // result = lea base( , cond*4)
29812 case 5: // result = lea base(cond, cond*4)
29813 case 8: // result = lea base( , cond*8)
29814 case 9: // result = lea base(cond, cond*8)
29815 isFastMultiplier = true;
29820 if (isFastMultiplier) {
29821 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
29822 if (NeedsCondInvert) // Invert the condition if needed.
29823 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
29824 DAG.getConstant(1, DL, Cond.getValueType()));
29826 // Zero extend the condition if needed.
29827 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), Cond);
29828 // Scale the condition by the difference.
29830 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
29831 DAG.getConstant(Diff, DL, Cond.getValueType()));
29833 // Add the base if non-zero.
29834 if (FalseC->getAPIntValue() != 0)
29835 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
29836 SDValue(FalseC, 0));
29844 // If this is a bitcasted op that can be represented as another type, push the
29845 // the bitcast to the inputs. This allows more opportunities for pattern
29846 // matching masked instructions. This is called when we know that the operation
29847 // is used as one of the inputs of a vselect.
29848 static bool combineBitcastForMaskedOp(SDValue OrigOp, SelectionDAG &DAG,
29849 TargetLowering::DAGCombinerInfo &DCI) {
29850 // Make sure we have a bitcast.
29851 if (OrigOp.getOpcode() != ISD::BITCAST)
29854 SDValue Op = OrigOp.getOperand(0);
29856 // If the operation is used by anything other than the bitcast, we shouldn't
29857 // do this combine as that would replicate the operation.
29858 if (!Op.hasOneUse())
29861 MVT VT = OrigOp.getSimpleValueType();
29862 MVT EltVT = VT.getVectorElementType();
29863 SDLoc DL(Op.getNode());
29865 auto BitcastAndCombineShuffle = [&](unsigned Opcode, SDValue Op0, SDValue Op1,
29867 Op0 = DAG.getBitcast(VT, Op0);
29868 DCI.AddToWorklist(Op0.getNode());
29869 Op1 = DAG.getBitcast(VT, Op1);
29870 DCI.AddToWorklist(Op1.getNode());
29871 DCI.CombineTo(OrigOp.getNode(),
29872 DAG.getNode(Opcode, DL, VT, Op0, Op1, Op2));
29876 unsigned Opcode = Op.getOpcode();
29878 case X86ISD::PALIGNR:
29879 // PALIGNR can be converted to VALIGND/Q for 128-bit vectors.
29880 if (!VT.is128BitVector())
29882 Opcode = X86ISD::VALIGN;
29884 case X86ISD::VALIGN: {
29885 if (EltVT != MVT::i32 && EltVT != MVT::i64)
29887 uint64_t Imm = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
29888 MVT OpEltVT = Op.getSimpleValueType().getVectorElementType();
29889 unsigned ShiftAmt = Imm * OpEltVT.getSizeInBits();
29890 unsigned EltSize = EltVT.getSizeInBits();
29891 // Make sure we can represent the same shift with the new VT.
29892 if ((ShiftAmt % EltSize) != 0)
29894 Imm = ShiftAmt / EltSize;
29895 return BitcastAndCombineShuffle(Opcode, Op.getOperand(0), Op.getOperand(1),
29896 DAG.getConstant(Imm, DL, MVT::i8));
29898 case X86ISD::SHUF128: {
29899 if (EltVT.getSizeInBits() != 32 && EltVT.getSizeInBits() != 64)
29901 // Only change element size, not type.
29902 if (VT.isInteger() != Op.getSimpleValueType().isInteger())
29904 return BitcastAndCombineShuffle(Opcode, Op.getOperand(0), Op.getOperand(1),
29907 case ISD::INSERT_SUBVECTOR: {
29908 unsigned EltSize = EltVT.getSizeInBits();
29909 if (EltSize != 32 && EltSize != 64)
29911 MVT OpEltVT = Op.getSimpleValueType().getVectorElementType();
29912 // Only change element size, not type.
29913 if (EltVT.isInteger() != OpEltVT.isInteger())
29915 uint64_t Imm = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
29916 Imm = (Imm * OpEltVT.getSizeInBits()) / EltSize;
29917 SDValue Op0 = DAG.getBitcast(VT, Op.getOperand(0));
29918 DCI.AddToWorklist(Op0.getNode());
29919 // Op1 needs to be bitcasted to a smaller vector with the same element type.
29920 SDValue Op1 = Op.getOperand(1);
29921 MVT Op1VT = MVT::getVectorVT(EltVT,
29922 Op1.getSimpleValueType().getSizeInBits() / EltSize);
29923 Op1 = DAG.getBitcast(Op1VT, Op1);
29924 DCI.AddToWorklist(Op1.getNode());
29925 DCI.CombineTo(OrigOp.getNode(),
29926 DAG.getNode(Opcode, DL, VT, Op0, Op1,
29927 DAG.getIntPtrConstant(Imm, DL)));
29930 case ISD::EXTRACT_SUBVECTOR: {
29931 unsigned EltSize = EltVT.getSizeInBits();
29932 if (EltSize != 32 && EltSize != 64)
29934 MVT OpEltVT = Op.getSimpleValueType().getVectorElementType();
29935 // Only change element size, not type.
29936 if (EltVT.isInteger() != OpEltVT.isInteger())
29938 uint64_t Imm = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
29939 Imm = (Imm * OpEltVT.getSizeInBits()) / EltSize;
29940 // Op0 needs to be bitcasted to a larger vector with the same element type.
29941 SDValue Op0 = Op.getOperand(0);
29942 MVT Op0VT = MVT::getVectorVT(EltVT,
29943 Op0.getSimpleValueType().getSizeInBits() / EltSize);
29944 Op0 = DAG.getBitcast(Op0VT, Op0);
29945 DCI.AddToWorklist(Op0.getNode());
29946 DCI.CombineTo(OrigOp.getNode(),
29947 DAG.getNode(Opcode, DL, VT, Op0,
29948 DAG.getIntPtrConstant(Imm, DL)));
29951 case X86ISD::SUBV_BROADCAST: {
29952 unsigned EltSize = EltVT.getSizeInBits();
29953 if (EltSize != 32 && EltSize != 64)
29955 // Only change element size, not type.
29956 if (VT.isInteger() != Op.getSimpleValueType().isInteger())
29958 SDValue Op0 = Op.getOperand(0);
29959 MVT Op0VT = MVT::getVectorVT(EltVT,
29960 Op0.getSimpleValueType().getSizeInBits() / EltSize);
29961 Op0 = DAG.getBitcast(Op0VT, Op.getOperand(0));
29962 DCI.AddToWorklist(Op0.getNode());
29963 DCI.CombineTo(OrigOp.getNode(),
29964 DAG.getNode(Opcode, DL, VT, Op0));
29972 /// Do target-specific dag combines on SELECT and VSELECT nodes.
29973 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
29974 TargetLowering::DAGCombinerInfo &DCI,
29975 const X86Subtarget &Subtarget) {
29977 SDValue Cond = N->getOperand(0);
29978 // Get the LHS/RHS of the select.
29979 SDValue LHS = N->getOperand(1);
29980 SDValue RHS = N->getOperand(2);
29981 EVT VT = LHS.getValueType();
29982 EVT CondVT = Cond.getValueType();
29983 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29985 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
29986 // instructions match the semantics of the common C idiom x<y?x:y but not
29987 // x<=y?x:y, because of how they handle negative zero (which can be
29988 // ignored in unsafe-math mode).
29989 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
29990 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
29991 VT != MVT::f80 && VT != MVT::f128 &&
29992 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
29993 (Subtarget.hasSSE2() ||
29994 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
29995 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
29997 unsigned Opcode = 0;
29998 // Check for x CC y ? x : y.
29999 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
30000 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
30004 // Converting this to a min would handle NaNs incorrectly, and swapping
30005 // the operands would cause it to handle comparisons between positive
30006 // and negative zero incorrectly.
30007 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
30008 if (!DAG.getTarget().Options.UnsafeFPMath &&
30009 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
30011 std::swap(LHS, RHS);
30013 Opcode = X86ISD::FMIN;
30016 // Converting this to a min would handle comparisons between positive
30017 // and negative zero incorrectly.
30018 if (!DAG.getTarget().Options.UnsafeFPMath &&
30019 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
30021 Opcode = X86ISD::FMIN;
30024 // Converting this to a min would handle both negative zeros and NaNs
30025 // incorrectly, but we can swap the operands to fix both.
30026 std::swap(LHS, RHS);
30031 Opcode = X86ISD::FMIN;
30035 // Converting this to a max would handle comparisons between positive
30036 // and negative zero incorrectly.
30037 if (!DAG.getTarget().Options.UnsafeFPMath &&
30038 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
30040 Opcode = X86ISD::FMAX;
30043 // Converting this to a max would handle NaNs incorrectly, and swapping
30044 // the operands would cause it to handle comparisons between positive
30045 // and negative zero incorrectly.
30046 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
30047 if (!DAG.getTarget().Options.UnsafeFPMath &&
30048 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
30050 std::swap(LHS, RHS);
30052 Opcode = X86ISD::FMAX;
30055 // Converting this to a max would handle both negative zeros and NaNs
30056 // incorrectly, but we can swap the operands to fix both.
30057 std::swap(LHS, RHS);
30062 Opcode = X86ISD::FMAX;
30065 // Check for x CC y ? y : x -- a min/max with reversed arms.
30066 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
30067 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
30071 // Converting this to a min would handle comparisons between positive
30072 // and negative zero incorrectly, and swapping the operands would
30073 // cause it to handle NaNs incorrectly.
30074 if (!DAG.getTarget().Options.UnsafeFPMath &&
30075 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
30076 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
30078 std::swap(LHS, RHS);
30080 Opcode = X86ISD::FMIN;
30083 // Converting this to a min would handle NaNs incorrectly.
30084 if (!DAG.getTarget().Options.UnsafeFPMath &&
30085 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
30087 Opcode = X86ISD::FMIN;
30090 // Converting this to a min would handle both negative zeros and NaNs
30091 // incorrectly, but we can swap the operands to fix both.
30092 std::swap(LHS, RHS);
30097 Opcode = X86ISD::FMIN;
30101 // Converting this to a max would handle NaNs incorrectly.
30102 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
30104 Opcode = X86ISD::FMAX;
30107 // Converting this to a max would handle comparisons between positive
30108 // and negative zero incorrectly, and swapping the operands would
30109 // cause it to handle NaNs incorrectly.
30110 if (!DAG.getTarget().Options.UnsafeFPMath &&
30111 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
30112 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
30114 std::swap(LHS, RHS);
30116 Opcode = X86ISD::FMAX;
30119 // Converting this to a max would handle both negative zeros and NaNs
30120 // incorrectly, but we can swap the operands to fix both.
30121 std::swap(LHS, RHS);
30126 Opcode = X86ISD::FMAX;
30132 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
30135 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
30136 // lowering on KNL. In this case we convert it to
30137 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
30138 // The same situation for all 128 and 256-bit vectors of i8 and i16.
30139 // Since SKX these selects have a proper lowering.
30140 if (Subtarget.hasAVX512() && CondVT.isVector() &&
30141 CondVT.getVectorElementType() == MVT::i1 &&
30142 (VT.is128BitVector() || VT.is256BitVector()) &&
30143 (VT.getVectorElementType() == MVT::i8 ||
30144 VT.getVectorElementType() == MVT::i16) &&
30145 !(Subtarget.hasBWI() && Subtarget.hasVLX())) {
30146 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
30147 DCI.AddToWorklist(Cond.getNode());
30148 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
30151 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
30154 // Canonicalize max and min:
30155 // (x > y) ? x : y -> (x >= y) ? x : y
30156 // (x < y) ? x : y -> (x <= y) ? x : y
30157 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
30158 // the need for an extra compare
30159 // against zero. e.g.
30160 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
30162 // testl %edi, %edi
30164 // cmovgl %edi, %eax
30168 // cmovsl %eax, %edi
30169 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
30170 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
30171 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
30172 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
30177 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
30178 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
30179 Cond.getOperand(0), Cond.getOperand(1), NewCC);
30180 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
30185 // Early exit check
30186 if (!TLI.isTypeLegal(VT))
30189 // Match VSELECTs into subs with unsigned saturation.
30190 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
30191 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
30192 ((Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
30193 (Subtarget.hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
30194 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
30196 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
30197 // left side invert the predicate to simplify logic below.
30199 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
30201 CC = ISD::getSetCCInverse(CC, true);
30202 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
30206 if (Other.getNode() && Other->getNumOperands() == 2 &&
30207 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
30208 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
30209 SDValue CondRHS = Cond->getOperand(1);
30211 // Look for a general sub with unsigned saturation first.
30212 // x >= y ? x-y : 0 --> subus x, y
30213 // x > y ? x-y : 0 --> subus x, y
30214 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
30215 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
30216 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
30218 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
30219 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
30220 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
30221 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
30222 // If the RHS is a constant we have to reverse the const
30223 // canonicalization.
30224 // x > C-1 ? x+-C : 0 --> subus x, C
30225 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
30226 CondRHSConst->getAPIntValue() ==
30227 (-OpRHSConst->getAPIntValue() - 1))
30228 return DAG.getNode(
30229 X86ISD::SUBUS, DL, VT, OpLHS,
30230 DAG.getConstant(-OpRHSConst->getAPIntValue(), DL, VT));
30232 // Another special case: If C was a sign bit, the sub has been
30233 // canonicalized into a xor.
30234 // FIXME: Would it be better to use computeKnownBits to determine
30235 // whether it's safe to decanonicalize the xor?
30236 // x s< 0 ? x^C : 0 --> subus x, C
30237 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
30238 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
30239 OpRHSConst->getAPIntValue().isSignMask())
30240 // Note that we have to rebuild the RHS constant here to ensure we
30241 // don't rely on particular values of undef lanes.
30242 return DAG.getNode(
30243 X86ISD::SUBUS, DL, VT, OpLHS,
30244 DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT));
30249 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
30252 // If this is a *dynamic* select (non-constant condition) and we can match
30253 // this node with one of the variable blend instructions, restructure the
30254 // condition so that blends can use the high (sign) bit of each element and
30255 // use SimplifyDemandedBits to simplify the condition operand.
30256 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
30257 !DCI.isBeforeLegalize() &&
30258 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
30259 unsigned BitWidth = Cond.getScalarValueSizeInBits();
30261 // Don't optimize vector selects that map to mask-registers.
30265 // We can only handle the cases where VSELECT is directly legal on the
30266 // subtarget. We custom lower VSELECT nodes with constant conditions and
30267 // this makes it hard to see whether a dynamic VSELECT will correctly
30268 // lower, so we both check the operation's status and explicitly handle the
30269 // cases where a *dynamic* blend will fail even though a constant-condition
30270 // blend could be custom lowered.
30271 // FIXME: We should find a better way to handle this class of problems.
30272 // Potentially, we should combine constant-condition vselect nodes
30273 // pre-legalization into shuffles and not mark as many types as custom
30275 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
30277 // FIXME: We don't support i16-element blends currently. We could and
30278 // should support them by making *all* the bits in the condition be set
30279 // rather than just the high bit and using an i8-element blend.
30280 if (VT.getVectorElementType() == MVT::i16)
30282 // Dynamic blending was only available from SSE4.1 onward.
30283 if (VT.is128BitVector() && !Subtarget.hasSSE41())
30285 // Byte blends are only available in AVX2
30286 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
30289 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
30290 APInt DemandedMask(APInt::getSignMask(BitWidth));
30292 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
30293 DCI.isBeforeLegalizeOps());
30294 if (TLI.ShrinkDemandedConstant(Cond, DemandedMask, TLO) ||
30295 TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO)) {
30296 // If we changed the computation somewhere in the DAG, this change will
30297 // affect all users of Cond. Make sure it is fine and update all the nodes
30298 // so that we do not use the generic VSELECT anymore. Otherwise, we may
30299 // perform wrong optimizations as we messed with the actual expectation
30300 // for the vector boolean values.
30301 if (Cond != TLO.Old) {
30302 // Check all uses of the condition operand to check whether it will be
30303 // consumed by non-BLEND instructions. Those may require that all bits
30304 // are set properly.
30305 for (SDNode *U : Cond->uses()) {
30306 // TODO: Add other opcodes eventually lowered into BLEND.
30307 if (U->getOpcode() != ISD::VSELECT)
30311 // Update all users of the condition before committing the change, so
30312 // that the VSELECT optimizations that expect the correct vector boolean
30313 // value will not be triggered.
30314 for (SDNode *U : Cond->uses()) {
30315 SDValue SB = DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(U),
30316 U->getValueType(0), Cond, U->getOperand(1),
30318 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
30320 DCI.CommitTargetLoweringOpt(TLO);
30323 // Only Cond (rather than other nodes in the computation chain) was
30324 // changed. Change the condition just for N to keep the opportunity to
30325 // optimize all other users their own way.
30326 SDValue SB = DAG.getNode(X86ISD::SHRUNKBLEND, DL, VT, TLO.New, LHS, RHS);
30327 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), SB);
30332 // Look for vselects with LHS/RHS being bitcasted from an operation that
30333 // can be executed on another type. Push the bitcast to the inputs of
30334 // the operation. This exposes opportunities for using masking instructions.
30335 if (N->getOpcode() == ISD::VSELECT && DCI.isAfterLegalizeVectorOps() &&
30336 CondVT.getVectorElementType() == MVT::i1) {
30337 if (combineBitcastForMaskedOp(LHS, DAG, DCI))
30338 return SDValue(N, 0);
30339 if (combineBitcastForMaskedOp(RHS, DAG, DCI))
30340 return SDValue(N, 0);
30347 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
30349 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
30350 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
30351 /// Note that this is only legal for some op/cc combinations.
30352 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
30353 SelectionDAG &DAG) {
30354 // This combine only operates on CMP-like nodes.
30355 if (!(Cmp.getOpcode() == X86ISD::CMP ||
30356 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
30359 // Can't replace the cmp if it has more uses than the one we're looking at.
30360 // FIXME: We would like to be able to handle this, but would need to make sure
30361 // all uses were updated.
30362 if (!Cmp.hasOneUse())
30365 // This only applies to variations of the common case:
30366 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
30367 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
30368 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
30369 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
30370 // Using the proper condcodes (see below), overflow is checked for.
30372 // FIXME: We can generalize both constraints:
30373 // - XOR/OR/AND (if they were made to survive AtomicExpand)
30375 // if the result is compared.
30377 SDValue CmpLHS = Cmp.getOperand(0);
30378 SDValue CmpRHS = Cmp.getOperand(1);
30380 if (!CmpLHS.hasOneUse())
30383 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
30384 if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
30387 const unsigned Opc = CmpLHS.getOpcode();
30389 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
30392 SDValue OpRHS = CmpLHS.getOperand(2);
30393 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
30397 APInt Addend = OpRHSC->getAPIntValue();
30398 if (Opc == ISD::ATOMIC_LOAD_SUB)
30401 if (CC == X86::COND_S && Addend == 1)
30403 else if (CC == X86::COND_NS && Addend == 1)
30405 else if (CC == X86::COND_G && Addend == -1)
30407 else if (CC == X86::COND_LE && Addend == -1)
30412 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
30413 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
30414 DAG.getUNDEF(CmpLHS.getValueType()));
30415 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
30419 // Check whether a boolean test is testing a boolean value generated by
30420 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
30423 // Simplify the following patterns:
30424 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
30425 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
30426 // to (Op EFLAGS Cond)
30428 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
30429 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
30430 // to (Op EFLAGS !Cond)
30432 // where Op could be BRCOND or CMOV.
30434 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
30435 // This combine only operates on CMP-like nodes.
30436 if (!(Cmp.getOpcode() == X86ISD::CMP ||
30437 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
30440 // Quit if not used as a boolean value.
30441 if (CC != X86::COND_E && CC != X86::COND_NE)
30444 // Check CMP operands. One of them should be 0 or 1 and the other should be
30445 // an SetCC or extended from it.
30446 SDValue Op1 = Cmp.getOperand(0);
30447 SDValue Op2 = Cmp.getOperand(1);
30450 const ConstantSDNode* C = nullptr;
30451 bool needOppositeCond = (CC == X86::COND_E);
30452 bool checkAgainstTrue = false; // Is it a comparison against 1?
30454 if ((C = dyn_cast<ConstantSDNode>(Op1)))
30456 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
30458 else // Quit if all operands are not constants.
30461 if (C->getZExtValue() == 1) {
30462 needOppositeCond = !needOppositeCond;
30463 checkAgainstTrue = true;
30464 } else if (C->getZExtValue() != 0)
30465 // Quit if the constant is neither 0 or 1.
30468 bool truncatedToBoolWithAnd = false;
30469 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
30470 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
30471 SetCC.getOpcode() == ISD::TRUNCATE ||
30472 SetCC.getOpcode() == ISD::AND) {
30473 if (SetCC.getOpcode() == ISD::AND) {
30475 if (isOneConstant(SetCC.getOperand(0)))
30477 if (isOneConstant(SetCC.getOperand(1)))
30481 SetCC = SetCC.getOperand(OpIdx);
30482 truncatedToBoolWithAnd = true;
30484 SetCC = SetCC.getOperand(0);
30487 switch (SetCC.getOpcode()) {
30488 case X86ISD::SETCC_CARRY:
30489 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
30490 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
30491 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
30492 // truncated to i1 using 'and'.
30493 if (checkAgainstTrue && !truncatedToBoolWithAnd)
30495 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
30496 "Invalid use of SETCC_CARRY!");
30498 case X86ISD::SETCC:
30499 // Set the condition code or opposite one if necessary.
30500 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
30501 if (needOppositeCond)
30502 CC = X86::GetOppositeBranchCondition(CC);
30503 return SetCC.getOperand(1);
30504 case X86ISD::CMOV: {
30505 // Check whether false/true value has canonical one, i.e. 0 or 1.
30506 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
30507 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
30508 // Quit if true value is not a constant.
30511 // Quit if false value is not a constant.
30513 SDValue Op = SetCC.getOperand(0);
30514 // Skip 'zext' or 'trunc' node.
30515 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
30516 Op.getOpcode() == ISD::TRUNCATE)
30517 Op = Op.getOperand(0);
30518 // A special case for rdrand/rdseed, where 0 is set if false cond is
30520 if ((Op.getOpcode() != X86ISD::RDRAND &&
30521 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
30524 // Quit if false value is not the constant 0 or 1.
30525 bool FValIsFalse = true;
30526 if (FVal && FVal->getZExtValue() != 0) {
30527 if (FVal->getZExtValue() != 1)
30529 // If FVal is 1, opposite cond is needed.
30530 needOppositeCond = !needOppositeCond;
30531 FValIsFalse = false;
30533 // Quit if TVal is not the constant opposite of FVal.
30534 if (FValIsFalse && TVal->getZExtValue() != 1)
30536 if (!FValIsFalse && TVal->getZExtValue() != 0)
30538 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
30539 if (needOppositeCond)
30540 CC = X86::GetOppositeBranchCondition(CC);
30541 return SetCC.getOperand(3);
30548 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
30550 /// (X86or (X86setcc) (X86setcc))
30551 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
30552 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
30553 X86::CondCode &CC1, SDValue &Flags,
30555 if (Cond->getOpcode() == X86ISD::CMP) {
30556 if (!isNullConstant(Cond->getOperand(1)))
30559 Cond = Cond->getOperand(0);
30564 SDValue SetCC0, SetCC1;
30565 switch (Cond->getOpcode()) {
30566 default: return false;
30573 SetCC0 = Cond->getOperand(0);
30574 SetCC1 = Cond->getOperand(1);
30578 // Make sure we have SETCC nodes, using the same flags value.
30579 if (SetCC0.getOpcode() != X86ISD::SETCC ||
30580 SetCC1.getOpcode() != X86ISD::SETCC ||
30581 SetCC0->getOperand(1) != SetCC1->getOperand(1))
30584 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
30585 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
30586 Flags = SetCC0->getOperand(1);
30590 /// Optimize an EFLAGS definition used according to the condition code \p CC
30591 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
30592 /// uses of chain values.
30593 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
30594 SelectionDAG &DAG) {
30595 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
30597 return combineSetCCAtomicArith(EFLAGS, CC, DAG);
30600 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
30601 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
30602 TargetLowering::DAGCombinerInfo &DCI,
30603 const X86Subtarget &Subtarget) {
30606 // If the flag operand isn't dead, don't touch this CMOV.
30607 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
30610 SDValue FalseOp = N->getOperand(0);
30611 SDValue TrueOp = N->getOperand(1);
30612 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
30613 SDValue Cond = N->getOperand(3);
30615 if (CC == X86::COND_E || CC == X86::COND_NE) {
30616 switch (Cond.getOpcode()) {
30620 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
30621 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
30622 return (CC == X86::COND_E) ? FalseOp : TrueOp;
30626 // Try to simplify the EFLAGS and condition code operands.
30627 // We can't always do this as FCMOV only supports a subset of X86 cond.
30628 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG)) {
30629 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
30630 SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
30632 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
30636 // If this is a select between two integer constants, try to do some
30637 // optimizations. Note that the operands are ordered the opposite of SELECT
30639 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
30640 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
30641 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
30642 // larger than FalseC (the false value).
30643 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
30644 CC = X86::GetOppositeBranchCondition(CC);
30645 std::swap(TrueC, FalseC);
30646 std::swap(TrueOp, FalseOp);
30649 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
30650 // This is efficient for any integer data type (including i8/i16) and
30652 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
30653 Cond = getSETCC(CC, Cond, DL, DAG);
30655 // Zero extend the condition if needed.
30656 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
30658 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
30659 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
30660 DAG.getConstant(ShAmt, DL, MVT::i8));
30661 if (N->getNumValues() == 2) // Dead flag value?
30662 return DCI.CombineTo(N, Cond, SDValue());
30666 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
30667 // for any integer data type, including i8/i16.
30668 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
30669 Cond = getSETCC(CC, Cond, DL, DAG);
30671 // Zero extend the condition if needed.
30672 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
30673 FalseC->getValueType(0), Cond);
30674 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
30675 SDValue(FalseC, 0));
30677 if (N->getNumValues() == 2) // Dead flag value?
30678 return DCI.CombineTo(N, Cond, SDValue());
30682 // Optimize cases that will turn into an LEA instruction. This requires
30683 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
30684 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
30685 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
30686 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
30688 bool isFastMultiplier = false;
30690 switch ((unsigned char)Diff) {
30692 case 1: // result = add base, cond
30693 case 2: // result = lea base( , cond*2)
30694 case 3: // result = lea base(cond, cond*2)
30695 case 4: // result = lea base( , cond*4)
30696 case 5: // result = lea base(cond, cond*4)
30697 case 8: // result = lea base( , cond*8)
30698 case 9: // result = lea base(cond, cond*8)
30699 isFastMultiplier = true;
30704 if (isFastMultiplier) {
30705 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
30706 Cond = getSETCC(CC, Cond, DL ,DAG);
30707 // Zero extend the condition if needed.
30708 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
30710 // Scale the condition by the difference.
30712 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
30713 DAG.getConstant(Diff, DL, Cond.getValueType()));
30715 // Add the base if non-zero.
30716 if (FalseC->getAPIntValue() != 0)
30717 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
30718 SDValue(FalseC, 0));
30719 if (N->getNumValues() == 2) // Dead flag value?
30720 return DCI.CombineTo(N, Cond, SDValue());
30727 // Handle these cases:
30728 // (select (x != c), e, c) -> select (x != c), e, x),
30729 // (select (x == c), c, e) -> select (x == c), x, e)
30730 // where the c is an integer constant, and the "select" is the combination
30731 // of CMOV and CMP.
30733 // The rationale for this change is that the conditional-move from a constant
30734 // needs two instructions, however, conditional-move from a register needs
30735 // only one instruction.
30737 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
30738 // some instruction-combining opportunities. This opt needs to be
30739 // postponed as late as possible.
30741 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
30742 // the DCI.xxxx conditions are provided to postpone the optimization as
30743 // late as possible.
30745 ConstantSDNode *CmpAgainst = nullptr;
30746 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
30747 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
30748 !isa<ConstantSDNode>(Cond.getOperand(0))) {
30750 if (CC == X86::COND_NE &&
30751 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
30752 CC = X86::GetOppositeBranchCondition(CC);
30753 std::swap(TrueOp, FalseOp);
30756 if (CC == X86::COND_E &&
30757 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
30758 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
30759 DAG.getConstant(CC, DL, MVT::i8), Cond };
30760 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
30765 // Fold and/or of setcc's to double CMOV:
30766 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
30767 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
30769 // This combine lets us generate:
30770 // cmovcc1 (jcc1 if we don't have CMOV)
30776 // cmovne (jne if we don't have CMOV)
30777 // When we can't use the CMOV instruction, it might increase branch
30779 // When we can use CMOV, or when there is no mispredict, this improves
30780 // throughput and reduces register pressure.
30782 if (CC == X86::COND_NE) {
30784 X86::CondCode CC0, CC1;
30786 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
30788 std::swap(FalseOp, TrueOp);
30789 CC0 = X86::GetOppositeBranchCondition(CC0);
30790 CC1 = X86::GetOppositeBranchCondition(CC1);
30793 SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
30795 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), LOps);
30796 SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
30797 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
30798 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(CMOV.getNode(), 1));
30806 /// Different mul shrinking modes.
30807 enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
30809 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
30810 EVT VT = N->getOperand(0).getValueType();
30811 if (VT.getScalarSizeInBits() != 32)
30814 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
30815 unsigned SignBits[2] = {1, 1};
30816 bool IsPositive[2] = {false, false};
30817 for (unsigned i = 0; i < 2; i++) {
30818 SDValue Opd = N->getOperand(i);
30820 // DAG.ComputeNumSignBits return 1 for ISD::ANY_EXTEND, so we need to
30821 // compute signbits for it separately.
30822 if (Opd.getOpcode() == ISD::ANY_EXTEND) {
30823 // For anyextend, it is safe to assume an appropriate number of leading
30825 if (Opd.getOperand(0).getValueType().getVectorElementType() == MVT::i8)
30827 else if (Opd.getOperand(0).getValueType().getVectorElementType() ==
30832 IsPositive[i] = true;
30833 } else if (Opd.getOpcode() == ISD::BUILD_VECTOR) {
30834 // All the operands of BUILD_VECTOR need to be int constant.
30835 // Find the smallest value range which all the operands belong to.
30837 IsPositive[i] = true;
30838 for (const SDValue &SubOp : Opd.getNode()->op_values()) {
30839 if (SubOp.isUndef())
30841 auto *CN = dyn_cast<ConstantSDNode>(SubOp);
30844 APInt IntVal = CN->getAPIntValue();
30845 if (IntVal.isNegative())
30846 IsPositive[i] = false;
30847 SignBits[i] = std::min(SignBits[i], IntVal.getNumSignBits());
30850 SignBits[i] = DAG.ComputeNumSignBits(Opd);
30851 if (Opd.getOpcode() == ISD::ZERO_EXTEND)
30852 IsPositive[i] = true;
30856 bool AllPositive = IsPositive[0] && IsPositive[1];
30857 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
30858 // When ranges are from -128 ~ 127, use MULS8 mode.
30859 if (MinSignBits >= 25)
30861 // When ranges are from 0 ~ 255, use MULU8 mode.
30862 else if (AllPositive && MinSignBits >= 24)
30864 // When ranges are from -32768 ~ 32767, use MULS16 mode.
30865 else if (MinSignBits >= 17)
30867 // When ranges are from 0 ~ 65535, use MULU16 mode.
30868 else if (AllPositive && MinSignBits >= 16)
30875 /// When the operands of vector mul are extended from smaller size values,
30876 /// like i8 and i16, the type of mul may be shrinked to generate more
30877 /// efficient code. Two typical patterns are handled:
30879 /// %2 = sext/zext <N x i8> %1 to <N x i32>
30880 /// %4 = sext/zext <N x i8> %3 to <N x i32>
30881 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
30882 /// %5 = mul <N x i32> %2, %4
30885 /// %2 = zext/sext <N x i16> %1 to <N x i32>
30886 /// %4 = zext/sext <N x i16> %3 to <N x i32>
30887 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
30888 /// %5 = mul <N x i32> %2, %4
30890 /// There are four mul shrinking modes:
30891 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
30892 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
30893 /// generate pmullw+sext32 for it (MULS8 mode).
30894 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
30895 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
30896 /// generate pmullw+zext32 for it (MULU8 mode).
30897 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
30898 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
30899 /// generate pmullw+pmulhw for it (MULS16 mode).
30900 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
30901 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
30902 /// generate pmullw+pmulhuw for it (MULU16 mode).
30903 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
30904 const X86Subtarget &Subtarget) {
30905 // Check for legality
30906 // pmullw/pmulhw are not supported by SSE.
30907 if (!Subtarget.hasSSE2())
30910 // Check for profitability
30911 // pmulld is supported since SSE41. It is better to use pmulld
30912 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
30914 bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
30915 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
30919 if (!canReduceVMulWidth(N, DAG, Mode))
30923 SDValue N0 = N->getOperand(0);
30924 SDValue N1 = N->getOperand(1);
30925 EVT VT = N->getOperand(0).getValueType();
30926 unsigned RegSize = 128;
30927 MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
30929 EVT::getVectorVT(*DAG.getContext(), MVT::i16, VT.getVectorNumElements());
30930 // Shrink the operands of mul.
30931 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
30932 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
30934 if (VT.getVectorNumElements() >= OpsVT.getVectorNumElements()) {
30935 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
30936 // lower part is needed.
30937 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
30938 if (Mode == MULU8 || Mode == MULS8) {
30939 return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
30942 MVT ResVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
30943 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
30944 // the higher part is also needed.
30945 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
30946 ReducedVT, NewN0, NewN1);
30948 // Repack the lower part and higher part result of mul into a wider
30950 // Generate shuffle functioning as punpcklwd.
30951 SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements());
30952 for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
30953 ShuffleMask[2 * i] = i;
30954 ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements();
30957 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
30958 ResLo = DAG.getNode(ISD::BITCAST, DL, ResVT, ResLo);
30959 // Generate shuffle functioning as punpckhwd.
30960 for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
30961 ShuffleMask[2 * i] = i + VT.getVectorNumElements() / 2;
30962 ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements() * 3 / 2;
30965 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
30966 ResHi = DAG.getNode(ISD::BITCAST, DL, ResVT, ResHi);
30967 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
30970 // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
30971 // to legalize the mul explicitly because implicit legalization for type
30972 // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
30973 // instructions which will not exist when we explicitly legalize it by
30974 // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
30975 // <4 x i16> undef).
30977 // Legalize the operands of mul.
30978 // FIXME: We may be able to handle non-concatenated vectors by insertion.
30979 unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
30980 if ((RegSize % ReducedSizeInBits) != 0)
30983 SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
30984 DAG.getUNDEF(ReducedVT));
30986 NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
30988 NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
30990 if (Mode == MULU8 || Mode == MULS8) {
30991 // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
30993 SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
30995 // convert the type of mul result to VT.
30996 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
30997 SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
30998 : ISD::SIGN_EXTEND_VECTOR_INREG,
31000 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
31001 DAG.getIntPtrConstant(0, DL));
31003 // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
31004 // MULU16/MULS16, both parts are needed.
31005 SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
31006 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
31007 OpsVT, NewN0, NewN1);
31009 // Repack the lower part and higher part result of mul into a wider
31010 // result. Make sure the type of mul result is VT.
31011 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
31012 SDValue Res = DAG.getNode(X86ISD::UNPCKL, DL, OpsVT, MulLo, MulHi);
31013 Res = DAG.getNode(ISD::BITCAST, DL, ResVT, Res);
31014 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
31015 DAG.getIntPtrConstant(0, DL));
31020 /// Optimize a single multiply with constant into two operations in order to
31021 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
31022 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
31023 TargetLowering::DAGCombinerInfo &DCI,
31024 const X86Subtarget &Subtarget) {
31025 EVT VT = N->getValueType(0);
31026 if (DCI.isBeforeLegalize() && VT.isVector())
31027 return reduceVMULWidth(N, DAG, Subtarget);
31029 // An imul is usually smaller than the alternative sequence.
31030 if (DAG.getMachineFunction().getFunction()->optForMinSize())
31033 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
31036 if (VT != MVT::i64 && VT != MVT::i32)
31039 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
31042 uint64_t MulAmt = C->getZExtValue();
31043 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
31046 uint64_t MulAmt1 = 0;
31047 uint64_t MulAmt2 = 0;
31048 if ((MulAmt % 9) == 0) {
31050 MulAmt2 = MulAmt / 9;
31051 } else if ((MulAmt % 5) == 0) {
31053 MulAmt2 = MulAmt / 5;
31054 } else if ((MulAmt % 3) == 0) {
31056 MulAmt2 = MulAmt / 3;
31062 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
31064 if (isPowerOf2_64(MulAmt2) &&
31065 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
31066 // If second multiplifer is pow2, issue it first. We want the multiply by
31067 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
31069 std::swap(MulAmt1, MulAmt2);
31071 if (isPowerOf2_64(MulAmt1))
31072 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
31073 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
31075 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
31076 DAG.getConstant(MulAmt1, DL, VT));
31078 if (isPowerOf2_64(MulAmt2))
31079 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
31080 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
31082 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
31083 DAG.getConstant(MulAmt2, DL, VT));
31087 assert(MulAmt != 0 &&
31088 MulAmt != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
31089 "Both cases that could cause potential overflows should have "
31090 "already been handled.");
31091 int64_t SignMulAmt = C->getSExtValue();
31092 if ((SignMulAmt != INT64_MIN) && (SignMulAmt != INT64_MAX) &&
31093 (SignMulAmt != -INT64_MAX)) {
31094 int NumSign = SignMulAmt > 0 ? 1 : -1;
31095 bool IsPowerOf2_64PlusOne = isPowerOf2_64(NumSign * SignMulAmt - 1);
31096 bool IsPowerOf2_64MinusOne = isPowerOf2_64(NumSign * SignMulAmt + 1);
31097 if (IsPowerOf2_64PlusOne) {
31098 // (mul x, 2^N + 1) => (add (shl x, N), x)
31099 NewMul = DAG.getNode(
31100 ISD::ADD, DL, VT, N->getOperand(0),
31101 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
31102 DAG.getConstant(Log2_64(NumSign * SignMulAmt - 1), DL,
31104 } else if (IsPowerOf2_64MinusOne) {
31105 // (mul x, 2^N - 1) => (sub (shl x, N), x)
31106 NewMul = DAG.getNode(
31108 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
31109 DAG.getConstant(Log2_64(NumSign * SignMulAmt + 1), DL,
31113 // To negate, subtract the number from zero
31114 if ((IsPowerOf2_64PlusOne || IsPowerOf2_64MinusOne) && NumSign == -1)
31116 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
31121 // Do not add new nodes to DAG combiner worklist.
31122 DCI.CombineTo(N, NewMul, false);
31127 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
31128 SDValue N0 = N->getOperand(0);
31129 SDValue N1 = N->getOperand(1);
31130 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
31131 EVT VT = N0.getValueType();
31133 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
31134 // since the result of setcc_c is all zero's or all ones.
31135 if (VT.isInteger() && !VT.isVector() &&
31136 N1C && N0.getOpcode() == ISD::AND &&
31137 N0.getOperand(1).getOpcode() == ISD::Constant) {
31138 SDValue N00 = N0.getOperand(0);
31139 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
31140 Mask <<= N1C->getAPIntValue();
31141 bool MaskOK = false;
31142 // We can handle cases concerning bit-widening nodes containing setcc_c if
31143 // we carefully interrogate the mask to make sure we are semantics
31145 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
31146 // of the underlying setcc_c operation if the setcc_c was zero extended.
31147 // Consider the following example:
31148 // zext(setcc_c) -> i32 0x0000FFFF
31149 // c1 -> i32 0x0000FFFF
31150 // c2 -> i32 0x00000001
31151 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
31152 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
31153 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
31155 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
31156 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
31158 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
31159 N00.getOpcode() == ISD::ANY_EXTEND) &&
31160 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
31161 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
31163 if (MaskOK && Mask != 0) {
31165 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
31169 // Hardware support for vector shifts is sparse which makes us scalarize the
31170 // vector operations in many cases. Also, on sandybridge ADD is faster than
31172 // (shl V, 1) -> add V,V
31173 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
31174 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
31175 assert(N0.getValueType().isVector() && "Invalid vector shift type");
31176 // We shift all of the values by one. In many cases we do not have
31177 // hardware support for this operation. This is better expressed as an ADD
31179 if (N1SplatC->getAPIntValue() == 1)
31180 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
31186 static SDValue combineShiftRightAlgebraic(SDNode *N, SelectionDAG &DAG) {
31187 SDValue N0 = N->getOperand(0);
31188 SDValue N1 = N->getOperand(1);
31189 EVT VT = N0.getValueType();
31190 unsigned Size = VT.getSizeInBits();
31192 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
31193 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
31194 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
31195 // depending on sign of (SarConst - [56,48,32,24,16])
31197 // sexts in X86 are MOVs. The MOVs have the same code size
31198 // as above SHIFTs (only SHIFT on 1 has lower code size).
31199 // However the MOVs have 2 advantages to a SHIFT:
31200 // 1. MOVs can write to a register that differs from source
31201 // 2. MOVs accept memory operands
31203 if (!VT.isInteger() || VT.isVector() || N1.getOpcode() != ISD::Constant ||
31204 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
31205 N0.getOperand(1).getOpcode() != ISD::Constant)
31208 SDValue N00 = N0.getOperand(0);
31209 SDValue N01 = N0.getOperand(1);
31210 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
31211 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
31212 EVT CVT = N1.getValueType();
31214 if (SarConst.isNegative())
31217 for (MVT SVT : MVT::integer_valuetypes()) {
31218 unsigned ShiftSize = SVT.getSizeInBits();
31219 // skipping types without corresponding sext/zext and
31220 // ShlConst that is not one of [56,48,32,24,16]
31221 if (ShiftSize < 8 || ShiftSize > 64 || ShlConst != Size - ShiftSize)
31225 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
31226 SarConst = SarConst - (Size - ShiftSize);
31229 else if (SarConst.isNegative())
31230 return DAG.getNode(ISD::SHL, DL, VT, NN,
31231 DAG.getConstant(-SarConst, DL, CVT));
31233 return DAG.getNode(ISD::SRA, DL, VT, NN,
31234 DAG.getConstant(SarConst, DL, CVT));
31239 /// \brief Returns a vector of 0s if the node in input is a vector logical
31240 /// shift by a constant amount which is known to be bigger than or equal
31241 /// to the vector element size in bits.
31242 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
31243 const X86Subtarget &Subtarget) {
31244 EVT VT = N->getValueType(0);
31246 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
31247 (!Subtarget.hasInt256() ||
31248 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
31251 SDValue Amt = N->getOperand(1);
31253 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
31254 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
31255 const APInt &ShiftAmt = AmtSplat->getAPIntValue();
31256 unsigned MaxAmount =
31257 VT.getSimpleVT().getScalarSizeInBits();
31259 // SSE2/AVX2 logical shifts always return a vector of 0s
31260 // if the shift amount is bigger than or equal to
31261 // the element size. The constant shift amount will be
31262 // encoded as a 8-bit immediate.
31263 if (ShiftAmt.trunc(8).uge(MaxAmount))
31264 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, DL);
31270 static SDValue combineShift(SDNode* N, SelectionDAG &DAG,
31271 TargetLowering::DAGCombinerInfo &DCI,
31272 const X86Subtarget &Subtarget) {
31273 if (N->getOpcode() == ISD::SHL)
31274 if (SDValue V = combineShiftLeft(N, DAG))
31277 if (N->getOpcode() == ISD::SRA)
31278 if (SDValue V = combineShiftRightAlgebraic(N, DAG))
31281 // Try to fold this logical shift into a zero vector.
31282 if (N->getOpcode() != ISD::SRA)
31283 if (SDValue V = performShiftToAllZeros(N, DAG, Subtarget))
31289 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
31290 TargetLowering::DAGCombinerInfo &DCI,
31291 const X86Subtarget &Subtarget) {
31292 unsigned Opcode = N->getOpcode();
31293 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
31294 X86ISD::VSRLI == Opcode) &&
31295 "Unexpected shift opcode");
31296 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
31297 EVT VT = N->getValueType(0);
31298 SDValue N0 = N->getOperand(0);
31299 SDValue N1 = N->getOperand(1);
31300 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
31301 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
31302 "Unexpected value type");
31304 // Out of range logical bit shifts are guaranteed to be zero.
31305 // Out of range arithmetic bit shifts splat the sign bit.
31306 APInt ShiftVal = cast<ConstantSDNode>(N1)->getAPIntValue();
31307 if (ShiftVal.zextOrTrunc(8).uge(NumBitsPerElt)) {
31309 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(N));
31311 ShiftVal = NumBitsPerElt - 1;
31314 // Shift N0 by zero -> N0.
31318 // Shift zero -> zero.
31319 if (ISD::isBuildVectorAllZeros(N0.getNode()))
31320 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(N));
31322 // fold (VSRLI (VSRAI X, Y), 31) -> (VSRLI X, 31).
31323 // This VSRLI only looks at the sign bit, which is unmodified by VSRAI.
31324 // TODO - support other sra opcodes as needed.
31325 if (Opcode == X86ISD::VSRLI && (ShiftVal + 1) == NumBitsPerElt &&
31326 N0.getOpcode() == X86ISD::VSRAI)
31327 return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, N0.getOperand(0), N1);
31329 // We can decode 'whole byte' logical bit shifts as shuffles.
31330 if (LogicalShift && (ShiftVal.getZExtValue() % 8) == 0) {
31332 SmallVector<int, 1> NonceMask; // Just a placeholder.
31333 NonceMask.push_back(0);
31334 if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, {},
31335 /*Depth*/ 1, /*HasVarMask*/ false, DAG,
31337 return SDValue(); // This routine will use CombineTo to replace N.
31340 // Constant Folding.
31342 SmallVector<APInt, 32> EltBits;
31343 if (N->isOnlyUserOf(N0.getNode()) &&
31344 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
31345 assert(EltBits.size() == VT.getVectorNumElements() &&
31346 "Unexpected shift value type");
31347 unsigned ShiftImm = ShiftVal.getZExtValue();
31348 for (APInt &Elt : EltBits) {
31349 if (X86ISD::VSHLI == Opcode)
31351 else if (X86ISD::VSRAI == Opcode)
31352 Elt.ashrInPlace(ShiftImm);
31354 Elt.lshrInPlace(ShiftImm);
31356 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
31362 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
31363 TargetLowering::DAGCombinerInfo &DCI,
31364 const X86Subtarget &Subtarget) {
31366 ((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) ||
31367 (N->getOpcode() == X86ISD::PINSRW &&
31368 N->getValueType(0) == MVT::v8i16)) &&
31369 "Unexpected vector insertion");
31371 // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
31373 SmallVector<int, 1> NonceMask; // Just a placeholder.
31374 NonceMask.push_back(0);
31375 combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, {},
31376 /*Depth*/ 1, /*HasVarMask*/ false, DAG,
31381 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
31382 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
31383 /// OR -> CMPNEQSS.
31384 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
31385 TargetLowering::DAGCombinerInfo &DCI,
31386 const X86Subtarget &Subtarget) {
31389 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
31390 // we're requiring SSE2 for both.
31391 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
31392 SDValue N0 = N->getOperand(0);
31393 SDValue N1 = N->getOperand(1);
31394 SDValue CMP0 = N0->getOperand(1);
31395 SDValue CMP1 = N1->getOperand(1);
31398 // The SETCCs should both refer to the same CMP.
31399 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
31402 SDValue CMP00 = CMP0->getOperand(0);
31403 SDValue CMP01 = CMP0->getOperand(1);
31404 EVT VT = CMP00.getValueType();
31406 if (VT == MVT::f32 || VT == MVT::f64) {
31407 bool ExpectingFlags = false;
31408 // Check for any users that want flags:
31409 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
31410 !ExpectingFlags && UI != UE; ++UI)
31411 switch (UI->getOpcode()) {
31416 ExpectingFlags = true;
31418 case ISD::CopyToReg:
31419 case ISD::SIGN_EXTEND:
31420 case ISD::ZERO_EXTEND:
31421 case ISD::ANY_EXTEND:
31425 if (!ExpectingFlags) {
31426 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
31427 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
31429 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
31430 X86::CondCode tmp = cc0;
31435 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
31436 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
31437 // FIXME: need symbolic constants for these magic numbers.
31438 // See X86ATTInstPrinter.cpp:printSSECC().
31439 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
31440 if (Subtarget.hasAVX512()) {
31442 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
31443 DAG.getConstant(x86cc, DL, MVT::i8));
31444 return DAG.getNode(X86ISD::VEXTRACT, DL, N->getSimpleValueType(0),
31445 FSetCC, DAG.getIntPtrConstant(0, DL));
31447 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
31448 CMP00.getValueType(), CMP00, CMP01,
31449 DAG.getConstant(x86cc, DL,
31452 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
31453 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
31455 if (is64BitFP && !Subtarget.is64Bit()) {
31456 // On a 32-bit target, we cannot bitcast the 64-bit float to a
31457 // 64-bit integer, since that's not a legal type. Since
31458 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
31459 // bits, but can do this little dance to extract the lowest 32 bits
31460 // and work with those going forward.
31461 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
31463 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
31464 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
31465 Vector32, DAG.getIntPtrConstant(0, DL));
31469 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
31470 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
31471 DAG.getConstant(1, DL, IntVT));
31472 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
31474 return OneBitOfTruth;
31482 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
31483 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
31484 assert(N->getOpcode() == ISD::AND);
31486 EVT VT = N->getValueType(0);
31487 SDValue N0 = N->getOperand(0);
31488 SDValue N1 = N->getOperand(1);
31491 if (VT != MVT::v2i64 && VT != MVT::v4i64 && VT != MVT::v8i64)
31494 if (N0.getOpcode() == ISD::XOR &&
31495 ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
31496 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
31498 if (N1.getOpcode() == ISD::XOR &&
31499 ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
31500 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
31505 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
31506 // register. In most cases we actually compare or select YMM-sized registers
31507 // and mixing the two types creates horrible code. This method optimizes
31508 // some of the transition sequences.
31509 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
31510 TargetLowering::DAGCombinerInfo &DCI,
31511 const X86Subtarget &Subtarget) {
31512 EVT VT = N->getValueType(0);
31513 if (!VT.is256BitVector())
31516 assert((N->getOpcode() == ISD::ANY_EXTEND ||
31517 N->getOpcode() == ISD::ZERO_EXTEND ||
31518 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
31520 SDValue Narrow = N->getOperand(0);
31521 EVT NarrowVT = Narrow->getValueType(0);
31522 if (!NarrowVT.is128BitVector())
31525 if (Narrow->getOpcode() != ISD::XOR &&
31526 Narrow->getOpcode() != ISD::AND &&
31527 Narrow->getOpcode() != ISD::OR)
31530 SDValue N0 = Narrow->getOperand(0);
31531 SDValue N1 = Narrow->getOperand(1);
31534 // The Left side has to be a trunc.
31535 if (N0.getOpcode() != ISD::TRUNCATE)
31538 // The type of the truncated inputs.
31539 EVT WideVT = N0->getOperand(0)->getValueType(0);
31543 // The right side has to be a 'trunc' or a constant vector.
31544 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
31545 ConstantSDNode *RHSConstSplat = nullptr;
31546 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
31547 RHSConstSplat = RHSBV->getConstantSplatNode();
31548 if (!RHSTrunc && !RHSConstSplat)
31551 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31553 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
31556 // Set N0 and N1 to hold the inputs to the new wide operation.
31557 N0 = N0->getOperand(0);
31558 if (RHSConstSplat) {
31559 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getVectorElementType(),
31560 SDValue(RHSConstSplat, 0));
31561 N1 = DAG.getSplatBuildVector(WideVT, DL, N1);
31562 } else if (RHSTrunc) {
31563 N1 = N1->getOperand(0);
31566 // Generate the wide operation.
31567 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
31568 unsigned Opcode = N->getOpcode();
31570 case ISD::ANY_EXTEND:
31572 case ISD::ZERO_EXTEND: {
31573 unsigned InBits = NarrowVT.getScalarSizeInBits();
31574 APInt Mask = APInt::getAllOnesValue(InBits);
31575 Mask = Mask.zext(VT.getScalarSizeInBits());
31576 return DAG.getNode(ISD::AND, DL, VT,
31577 Op, DAG.getConstant(Mask, DL, VT));
31579 case ISD::SIGN_EXTEND:
31580 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
31581 Op, DAG.getValueType(NarrowVT));
31583 llvm_unreachable("Unexpected opcode");
31587 /// If both input operands of a logic op are being cast from floating point
31588 /// types, try to convert this into a floating point logic node to avoid
31589 /// unnecessary moves from SSE to integer registers.
31590 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
31591 const X86Subtarget &Subtarget) {
31592 unsigned FPOpcode = ISD::DELETED_NODE;
31593 if (N->getOpcode() == ISD::AND)
31594 FPOpcode = X86ISD::FAND;
31595 else if (N->getOpcode() == ISD::OR)
31596 FPOpcode = X86ISD::FOR;
31597 else if (N->getOpcode() == ISD::XOR)
31598 FPOpcode = X86ISD::FXOR;
31600 assert(FPOpcode != ISD::DELETED_NODE &&
31601 "Unexpected input node for FP logic conversion");
31603 EVT VT = N->getValueType(0);
31604 SDValue N0 = N->getOperand(0);
31605 SDValue N1 = N->getOperand(1);
31607 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
31608 ((Subtarget.hasSSE1() && VT == MVT::i32) ||
31609 (Subtarget.hasSSE2() && VT == MVT::i64))) {
31610 SDValue N00 = N0.getOperand(0);
31611 SDValue N10 = N1.getOperand(0);
31612 EVT N00Type = N00.getValueType();
31613 EVT N10Type = N10.getValueType();
31614 if (N00Type.isFloatingPoint() && N10Type.isFloatingPoint()) {
31615 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
31616 return DAG.getBitcast(VT, FPLogic);
31622 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
31623 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
31624 /// with a shift-right to eliminate loading the vector constant mask value.
31625 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
31626 const X86Subtarget &Subtarget) {
31627 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
31628 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
31629 EVT VT0 = Op0.getValueType();
31630 EVT VT1 = Op1.getValueType();
31632 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
31636 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
31637 !SplatVal.isMask())
31640 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
31643 unsigned EltBitWidth = VT0.getScalarSizeInBits();
31644 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
31648 unsigned ShiftVal = SplatVal.countTrailingOnes();
31649 SDValue ShAmt = DAG.getConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
31650 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
31651 return DAG.getBitcast(N->getValueType(0), Shift);
31654 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
31655 TargetLowering::DAGCombinerInfo &DCI,
31656 const X86Subtarget &Subtarget) {
31657 if (DCI.isBeforeLegalizeOps())
31660 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
31663 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
31666 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
31669 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
31672 EVT VT = N->getValueType(0);
31673 SDValue N0 = N->getOperand(0);
31674 SDValue N1 = N->getOperand(1);
31677 // Attempt to recursively combine a bitmask AND with shuffles.
31678 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
31680 SmallVector<int, 1> NonceMask; // Just a placeholder.
31681 NonceMask.push_back(0);
31682 if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, {},
31683 /*Depth*/ 1, /*HasVarMask*/ false, DAG,
31685 return SDValue(); // This routine will use CombineTo to replace N.
31688 // Create BEXTR instructions
31689 // BEXTR is ((X >> imm) & (2**size-1))
31690 if (VT != MVT::i32 && VT != MVT::i64)
31693 if (!Subtarget.hasBMI() && !Subtarget.hasTBM())
31695 if (N0.getOpcode() != ISD::SRA && N0.getOpcode() != ISD::SRL)
31698 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
31699 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
31700 if (MaskNode && ShiftNode) {
31701 uint64_t Mask = MaskNode->getZExtValue();
31702 uint64_t Shift = ShiftNode->getZExtValue();
31703 if (isMask_64(Mask)) {
31704 uint64_t MaskSize = countPopulation(Mask);
31705 if (Shift + MaskSize <= VT.getSizeInBits())
31706 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
31707 DAG.getConstant(Shift | (MaskSize << 8), DL,
31715 // (or (and (m, y), (pandn m, x)))
31717 // (vselect m, x, y)
31718 // As a special case, try to fold:
31719 // (or (and (m, (sub 0, x)), (pandn m, x)))
31721 // (sub (xor X, M), M)
31722 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
31723 const X86Subtarget &Subtarget) {
31724 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
31726 SDValue N0 = N->getOperand(0);
31727 SDValue N1 = N->getOperand(1);
31728 EVT VT = N->getValueType(0);
31730 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
31731 (VT.is256BitVector() && Subtarget.hasInt256())))
31734 // Canonicalize AND to LHS.
31735 if (N1.getOpcode() == ISD::AND)
31738 // TODO: Attempt to match against AND(XOR(-1,X),Y) as well, waiting for
31739 // ANDNP combine allows other combines to happen that prevent matching.
31740 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
31743 SDValue Mask = N1.getOperand(0);
31744 SDValue X = N1.getOperand(1);
31746 if (N0.getOperand(0) == Mask)
31747 Y = N0.getOperand(1);
31748 if (N0.getOperand(1) == Mask)
31749 Y = N0.getOperand(0);
31751 // Check to see if the mask appeared in both the AND and ANDNP.
31755 // Validate that X, Y, and Mask are bitcasts, and see through them.
31756 Mask = peekThroughBitcasts(Mask);
31757 X = peekThroughBitcasts(X);
31758 Y = peekThroughBitcasts(Y);
31760 EVT MaskVT = Mask.getValueType();
31761 unsigned EltBits = MaskVT.getScalarSizeInBits();
31763 // TODO: Attempt to handle floating point cases as well?
31764 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
31770 // (or (and (M, (sub 0, X)), (pandn M, X)))
31771 // which is a special case of vselect:
31772 // (vselect M, (sub 0, X), X)
31774 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
31775 // We know that, if fNegate is 0 or 1:
31776 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
31778 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
31779 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
31780 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
31781 // This lets us transform our vselect to:
31782 // (add (xor X, M), (and M, 1))
31784 // (sub (xor X, M), M)
31785 if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT &&
31786 DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT)) {
31787 auto IsNegV = [](SDNode *N, SDValue V) {
31788 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
31789 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
31792 if (IsNegV(Y.getNode(), X))
31794 else if (IsNegV(X.getNode(), Y))
31798 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
31799 SDValue SubOp2 = Mask;
31801 // If the negate was on the false side of the select, then
31802 // the operands of the SUB need to be swapped. PR 27251.
31803 // This is because the pattern being matched above is
31804 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
31805 // but if the pattern matched was
31806 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
31807 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
31808 // pattern also needs to be a negation of the replacement pattern above.
31809 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
31810 // sub accomplishes the negation of the replacement pattern.
31812 std::swap(SubOp1, SubOp2);
31814 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
31815 return DAG.getBitcast(VT, Res);
31819 // PBLENDVB is only available on SSE 4.1.
31820 if (!Subtarget.hasSSE41())
31823 MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
31825 X = DAG.getBitcast(BlendVT, X);
31826 Y = DAG.getBitcast(BlendVT, Y);
31827 Mask = DAG.getBitcast(BlendVT, Mask);
31828 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
31829 return DAG.getBitcast(VT, Mask);
31832 // Helper function for combineOrCmpEqZeroToCtlzSrl
31836 // srl(ctlz x), log2(bitsize(x))
31837 // Input pattern is checked by caller.
31838 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
31839 SelectionDAG &DAG) {
31840 SDValue Cmp = Op.getOperand(1);
31841 EVT VT = Cmp.getOperand(0).getValueType();
31842 unsigned Log2b = Log2_32(VT.getSizeInBits());
31844 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
31845 // The result of the shift is true or false, and on X86, the 32-bit
31846 // encoding of shr and lzcnt is more desirable.
31847 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
31848 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
31849 DAG.getConstant(Log2b, dl, VT));
31850 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
31853 // Try to transform:
31854 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
31856 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
31857 // Will also attempt to match more generic cases, eg:
31858 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
31859 // Only applies if the target supports the FastLZCNT feature.
31860 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
31861 TargetLowering::DAGCombinerInfo &DCI,
31862 const X86Subtarget &Subtarget) {
31863 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
31866 auto isORCandidate = [](SDValue N) {
31867 return (N->getOpcode() == ISD::OR && N->hasOneUse());
31870 // Check the zero extend is extending to 32-bit or more. The code generated by
31871 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
31872 // instructions to clear the upper bits.
31873 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
31874 !isORCandidate(N->getOperand(0)))
31877 // Check the node matches: setcc(eq, cmp 0)
31878 auto isSetCCCandidate = [](SDValue N) {
31879 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
31880 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
31881 N->getOperand(1).getOpcode() == X86ISD::CMP &&
31882 isNullConstant(N->getOperand(1).getOperand(1)) &&
31883 N->getOperand(1).getValueType().bitsGE(MVT::i32);
31886 SDNode *OR = N->getOperand(0).getNode();
31887 SDValue LHS = OR->getOperand(0);
31888 SDValue RHS = OR->getOperand(1);
31890 // Save nodes matching or(or, setcc(eq, cmp 0)).
31891 SmallVector<SDNode *, 2> ORNodes;
31892 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
31893 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
31894 ORNodes.push_back(OR);
31895 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
31896 LHS = OR->getOperand(0);
31897 RHS = OR->getOperand(1);
31900 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
31901 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
31902 !isORCandidate(SDValue(OR, 0)))
31905 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
31907 // or(srl(ctlz),srl(ctlz)).
31908 // The dag combiner can then fold it into:
31909 // srl(or(ctlz, ctlz)).
31910 EVT VT = OR->getValueType(0);
31911 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
31912 SDValue Ret, NewRHS;
31913 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
31914 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
31919 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
31920 while (ORNodes.size() > 0) {
31921 OR = ORNodes.pop_back_val();
31922 LHS = OR->getOperand(0);
31923 RHS = OR->getOperand(1);
31924 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
31925 if (RHS->getOpcode() == ISD::OR)
31926 std::swap(LHS, RHS);
31927 EVT VT = OR->getValueType(0);
31928 SDValue NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
31931 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
31935 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
31940 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
31941 TargetLowering::DAGCombinerInfo &DCI,
31942 const X86Subtarget &Subtarget) {
31943 if (DCI.isBeforeLegalizeOps())
31946 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
31949 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
31952 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
31955 SDValue N0 = N->getOperand(0);
31956 SDValue N1 = N->getOperand(1);
31957 EVT VT = N->getValueType(0);
31959 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
31962 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
31963 bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
31965 // SHLD/SHRD instructions have lower register pressure, but on some
31966 // platforms they have higher latency than the equivalent
31967 // series of shifts/or that would otherwise be generated.
31968 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
31969 // have higher latencies and we are not optimizing for size.
31970 if (!OptForSize && Subtarget.isSHLDSlow())
31973 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
31975 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
31977 if (!N0.hasOneUse() || !N1.hasOneUse())
31980 SDValue ShAmt0 = N0.getOperand(1);
31981 if (ShAmt0.getValueType() != MVT::i8)
31983 SDValue ShAmt1 = N1.getOperand(1);
31984 if (ShAmt1.getValueType() != MVT::i8)
31986 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
31987 ShAmt0 = ShAmt0.getOperand(0);
31988 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
31989 ShAmt1 = ShAmt1.getOperand(0);
31992 unsigned Opc = X86ISD::SHLD;
31993 SDValue Op0 = N0.getOperand(0);
31994 SDValue Op1 = N1.getOperand(0);
31995 if (ShAmt0.getOpcode() == ISD::SUB ||
31996 ShAmt0.getOpcode() == ISD::XOR) {
31997 Opc = X86ISD::SHRD;
31998 std::swap(Op0, Op1);
31999 std::swap(ShAmt0, ShAmt1);
32002 // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> SHLD( X, Y, C )
32003 // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> SHRD( X, Y, C )
32004 // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> SHLD( X, Y, C )
32005 // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> SHRD( X, Y, C )
32006 unsigned Bits = VT.getSizeInBits();
32007 if (ShAmt1.getOpcode() == ISD::SUB) {
32008 SDValue Sum = ShAmt1.getOperand(0);
32009 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
32010 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
32011 if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
32012 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
32013 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
32014 return DAG.getNode(Opc, DL, VT,
32016 DAG.getNode(ISD::TRUNCATE, DL,
32019 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
32020 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
32021 if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
32022 return DAG.getNode(Opc, DL, VT,
32023 N0.getOperand(0), N1.getOperand(0),
32024 DAG.getNode(ISD::TRUNCATE, DL,
32026 } else if (ShAmt1.getOpcode() == ISD::XOR) {
32027 SDValue Mask = ShAmt1.getOperand(1);
32028 if (ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
32029 unsigned InnerShift = (X86ISD::SHLD == Opc ? ISD::SRL : ISD::SHL);
32030 SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
32031 if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
32032 ShAmt1Op0 = ShAmt1Op0.getOperand(0);
32033 if (MaskC->getSExtValue() == (Bits - 1) && ShAmt1Op0 == ShAmt0) {
32034 if (Op1.getOpcode() == InnerShift &&
32035 isa<ConstantSDNode>(Op1.getOperand(1)) &&
32036 Op1.getConstantOperandVal(1) == 1) {
32037 return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
32038 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
32040 // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
32041 if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
32042 Op1.getOperand(0) == Op1.getOperand(1)) {
32043 return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
32044 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
32053 /// Generate NEG and CMOV for integer abs.
32054 static SDValue combineIntegerAbs(SDNode *N, SelectionDAG &DAG) {
32055 EVT VT = N->getValueType(0);
32057 // Since X86 does not have CMOV for 8-bit integer, we don't convert
32058 // 8-bit integer abs to NEG and CMOV.
32059 if (VT.isInteger() && VT.getSizeInBits() == 8)
32062 SDValue N0 = N->getOperand(0);
32063 SDValue N1 = N->getOperand(1);
32066 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
32067 // and change it to SUB and CMOV.
32068 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
32069 N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 &&
32070 N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0)) {
32071 auto *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
32072 if (Y1C && Y1C->getAPIntValue() == VT.getSizeInBits() - 1) {
32073 // Generate SUB & CMOV.
32074 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
32075 DAG.getConstant(0, DL, VT), N0.getOperand(0));
32076 SDValue Ops[] = {N0.getOperand(0), Neg,
32077 DAG.getConstant(X86::COND_GE, DL, MVT::i8),
32078 SDValue(Neg.getNode(), 1)};
32079 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
32085 /// Try to turn tests against the signbit in the form of:
32086 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
32089 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
32090 // This is only worth doing if the output type is i8 or i1.
32091 EVT ResultType = N->getValueType(0);
32092 if (ResultType != MVT::i8 && ResultType != MVT::i1)
32095 SDValue N0 = N->getOperand(0);
32096 SDValue N1 = N->getOperand(1);
32098 // We should be performing an xor against a truncated shift.
32099 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
32102 // Make sure we are performing an xor against one.
32103 if (!isOneConstant(N1))
32106 // SetCC on x86 zero extends so only act on this if it's a logical shift.
32107 SDValue Shift = N0.getOperand(0);
32108 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
32111 // Make sure we are truncating from one of i16, i32 or i64.
32112 EVT ShiftTy = Shift.getValueType();
32113 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
32116 // Make sure the shift amount extracts the sign bit.
32117 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
32118 Shift.getConstantOperandVal(1) != ShiftTy.getSizeInBits() - 1)
32121 // Create a greater-than comparison against -1.
32122 // N.B. Using SETGE against 0 works but we want a canonical looking
32123 // comparison, using SETGT matches up with what TranslateX86CC.
32125 SDValue ShiftOp = Shift.getOperand(0);
32126 EVT ShiftOpTy = ShiftOp.getValueType();
32127 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32128 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
32129 *DAG.getContext(), ResultType);
32130 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
32131 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
32132 if (SetCCResultType != ResultType)
32133 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
32137 /// Turn vector tests of the signbit in the form of:
32138 /// xor (sra X, elt_size(X)-1), -1
32142 /// This should be called before type legalization because the pattern may not
32143 /// persist after that.
32144 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
32145 const X86Subtarget &Subtarget) {
32146 EVT VT = N->getValueType(0);
32147 if (!VT.isSimple())
32150 switch (VT.getSimpleVT().SimpleTy) {
32151 default: return SDValue();
32154 case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
32155 case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
32159 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
32162 // There must be a shift right algebraic before the xor, and the xor must be a
32163 // 'not' operation.
32164 SDValue Shift = N->getOperand(0);
32165 SDValue Ones = N->getOperand(1);
32166 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
32167 !ISD::isBuildVectorAllOnes(Ones.getNode()))
32170 // The shift should be smearing the sign bit across each vector element.
32171 auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1));
32175 EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
32176 auto *ShiftAmt = ShiftBV->getConstantSplatNode();
32177 if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
32180 // Create a greater-than comparison against -1. We don't use the more obvious
32181 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
32182 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
32185 /// Check if truncation with saturation form type \p SrcVT to \p DstVT
32186 /// is valid for the given \p Subtarget.
32187 static bool isSATValidOnAVX512Subtarget(EVT SrcVT, EVT DstVT,
32188 const X86Subtarget &Subtarget) {
32189 if (!Subtarget.hasAVX512())
32192 // FIXME: Scalar type may be supported if we move it to vector register.
32193 if (!SrcVT.isVector() || !SrcVT.isSimple() || SrcVT.getSizeInBits() > 512)
32196 EVT SrcElVT = SrcVT.getScalarType();
32197 EVT DstElVT = DstVT.getScalarType();
32198 if (SrcElVT.getSizeInBits() < 16 || SrcElVT.getSizeInBits() > 64)
32200 if (DstElVT.getSizeInBits() < 8 || DstElVT.getSizeInBits() > 32)
32202 if (SrcVT.is512BitVector() || Subtarget.hasVLX())
32203 return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI();
32207 /// Detect a pattern of truncation with saturation:
32208 /// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
32209 /// Return the source value to be truncated or SDValue() if the pattern was not
32211 static SDValue detectUSatPattern(SDValue In, EVT VT) {
32212 if (In.getOpcode() != ISD::UMIN)
32215 //Saturation with truncation. We truncate from InVT to VT.
32216 assert(In.getScalarValueSizeInBits() > VT.getScalarSizeInBits() &&
32217 "Unexpected types for truncate operation");
32220 if (ISD::isConstantSplatVector(In.getOperand(1).getNode(), C)) {
32221 // C should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
32222 // the element size of the destination type.
32223 return C.isMask(VT.getScalarSizeInBits()) ? In.getOperand(0) :
32229 /// Detect a pattern of truncation with saturation:
32230 /// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
32231 /// The types should allow to use VPMOVUS* instruction on AVX512.
32232 /// Return the source value to be truncated or SDValue() if the pattern was not
32234 static SDValue detectAVX512USatPattern(SDValue In, EVT VT,
32235 const X86Subtarget &Subtarget) {
32236 if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
32238 return detectUSatPattern(In, VT);
32242 combineTruncateWithUSat(SDValue In, EVT VT, SDLoc &DL, SelectionDAG &DAG,
32243 const X86Subtarget &Subtarget) {
32244 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32245 if (!TLI.isTypeLegal(In.getValueType()) || !TLI.isTypeLegal(VT))
32247 if (auto USatVal = detectUSatPattern(In, VT))
32248 if (isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
32249 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
32253 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
32254 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
32255 /// X86ISD::AVG instruction.
32256 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
32257 const X86Subtarget &Subtarget,
32259 if (!VT.isVector() || !VT.isSimple())
32261 EVT InVT = In.getValueType();
32262 unsigned NumElems = VT.getVectorNumElements();
32264 EVT ScalarVT = VT.getVectorElementType();
32265 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
32266 isPowerOf2_32(NumElems)))
32269 // InScalarVT is the intermediate type in AVG pattern and it should be greater
32270 // than the original input type (i8/i16).
32271 EVT InScalarVT = InVT.getVectorElementType();
32272 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
32275 if (!Subtarget.hasSSE2())
32277 if (Subtarget.hasBWI()) {
32278 if (VT.getSizeInBits() > 512)
32280 } else if (Subtarget.hasAVX2()) {
32281 if (VT.getSizeInBits() > 256)
32284 if (VT.getSizeInBits() > 128)
32288 // Detect the following pattern:
32290 // %1 = zext <N x i8> %a to <N x i32>
32291 // %2 = zext <N x i8> %b to <N x i32>
32292 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
32293 // %4 = add nuw nsw <N x i32> %3, %2
32294 // %5 = lshr <N x i32> %N, <i32 1 x N>
32295 // %6 = trunc <N x i32> %5 to <N x i8>
32297 // In AVX512, the last instruction can also be a trunc store.
32299 if (In.getOpcode() != ISD::SRL)
32302 // A lambda checking the given SDValue is a constant vector and each element
32303 // is in the range [Min, Max].
32304 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
32305 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
32306 if (!BV || !BV->isConstant())
32308 for (SDValue Op : V->ops()) {
32309 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
32312 uint64_t Val = C->getZExtValue();
32313 if (Val < Min || Val > Max)
32319 // Check if each element of the vector is left-shifted by one.
32320 auto LHS = In.getOperand(0);
32321 auto RHS = In.getOperand(1);
32322 if (!IsConstVectorInRange(RHS, 1, 1))
32324 if (LHS.getOpcode() != ISD::ADD)
32327 // Detect a pattern of a + b + 1 where the order doesn't matter.
32328 SDValue Operands[3];
32329 Operands[0] = LHS.getOperand(0);
32330 Operands[1] = LHS.getOperand(1);
32332 // Take care of the case when one of the operands is a constant vector whose
32333 // element is in the range [1, 256].
32334 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
32335 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
32336 Operands[0].getOperand(0).getValueType() == VT) {
32337 // The pattern is detected. Subtract one from the constant vector, then
32338 // demote it and emit X86ISD::AVG instruction.
32339 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
32340 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
32341 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
32342 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
32346 if (Operands[0].getOpcode() == ISD::ADD)
32347 std::swap(Operands[0], Operands[1]);
32348 else if (Operands[1].getOpcode() != ISD::ADD)
32350 Operands[2] = Operands[1].getOperand(0);
32351 Operands[1] = Operands[1].getOperand(1);
32353 // Now we have three operands of two additions. Check that one of them is a
32354 // constant vector with ones, and the other two are promoted from i8/i16.
32355 for (int i = 0; i < 3; ++i) {
32356 if (!IsConstVectorInRange(Operands[i], 1, 1))
32358 std::swap(Operands[i], Operands[2]);
32360 // Check if Operands[0] and Operands[1] are results of type promotion.
32361 for (int j = 0; j < 2; ++j)
32362 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
32363 Operands[j].getOperand(0).getValueType() != VT)
32366 // The pattern is detected, emit X86ISD::AVG instruction.
32367 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
32368 Operands[1].getOperand(0));
32374 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
32375 TargetLowering::DAGCombinerInfo &DCI,
32376 const X86Subtarget &Subtarget) {
32377 LoadSDNode *Ld = cast<LoadSDNode>(N);
32378 EVT RegVT = Ld->getValueType(0);
32379 EVT MemVT = Ld->getMemoryVT();
32381 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32383 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
32384 // into two 16-byte operations.
32385 ISD::LoadExtType Ext = Ld->getExtensionType();
32387 unsigned AddressSpace = Ld->getAddressSpace();
32388 unsigned Alignment = Ld->getAlignment();
32389 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
32390 Ext == ISD::NON_EXTLOAD &&
32391 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
32392 AddressSpace, Alignment, &Fast) && !Fast) {
32393 unsigned NumElems = RegVT.getVectorNumElements();
32397 SDValue Ptr = Ld->getBasePtr();
32399 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
32402 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
32403 Alignment, Ld->getMemOperand()->getFlags());
32405 Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl);
32407 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
32408 std::min(16U, Alignment), Ld->getMemOperand()->getFlags());
32409 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32411 Load2.getValue(1));
32413 SDValue NewVec = DAG.getUNDEF(RegVT);
32414 NewVec = insert128BitVector(NewVec, Load1, 0, DAG, dl);
32415 NewVec = insert128BitVector(NewVec, Load2, NumElems / 2, DAG, dl);
32416 return DCI.CombineTo(N, NewVec, TF, true);
32422 /// If V is a build vector of boolean constants and exactly one of those
32423 /// constants is true, return the operand index of that true element.
32424 /// Otherwise, return -1.
32425 static int getOneTrueElt(SDValue V) {
32426 // This needs to be a build vector of booleans.
32427 // TODO: Checking for the i1 type matches the IR definition for the mask,
32428 // but the mask check could be loosened to i8 or other types. That might
32429 // also require checking more than 'allOnesValue'; eg, the x86 HW
32430 // instructions only require that the MSB is set for each mask element.
32431 // The ISD::MSTORE comments/definition do not specify how the mask operand
32433 auto *BV = dyn_cast<BuildVectorSDNode>(V);
32434 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
32437 int TrueIndex = -1;
32438 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
32439 for (unsigned i = 0; i < NumElts; ++i) {
32440 const SDValue &Op = BV->getOperand(i);
32443 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
32446 if (ConstNode->getAPIntValue().isAllOnesValue()) {
32447 // If we already found a one, this is too many.
32448 if (TrueIndex >= 0)
32456 /// Given a masked memory load/store operation, return true if it has one mask
32457 /// bit set. If it has one mask bit set, then also return the memory address of
32458 /// the scalar element to load/store, the vector index to insert/extract that
32459 /// scalar element, and the alignment for the scalar memory access.
32460 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
32461 SelectionDAG &DAG, SDValue &Addr,
32462 SDValue &Index, unsigned &Alignment) {
32463 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
32464 if (TrueMaskElt < 0)
32467 // Get the address of the one scalar element that is specified by the mask
32468 // using the appropriate offset from the base pointer.
32469 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
32470 Addr = MaskedOp->getBasePtr();
32471 if (TrueMaskElt != 0) {
32472 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
32473 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
32476 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
32477 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
32481 /// If exactly one element of the mask is set for a non-extending masked load,
32482 /// it is a scalar load and vector insert.
32483 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
32484 /// mask have already been optimized in IR, so we don't bother with those here.
32486 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
32487 TargetLowering::DAGCombinerInfo &DCI) {
32488 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
32489 // However, some target hooks may need to be added to know when the transform
32490 // is profitable. Endianness would also have to be considered.
32492 SDValue Addr, VecIndex;
32493 unsigned Alignment;
32494 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
32497 // Load the one scalar element that is specified by the mask using the
32498 // appropriate offset from the base pointer.
32500 EVT VT = ML->getValueType(0);
32501 EVT EltVT = VT.getVectorElementType();
32503 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
32504 Alignment, ML->getMemOperand()->getFlags());
32506 // Insert the loaded element into the appropriate place in the vector.
32507 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, ML->getSrc0(),
32509 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
32513 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
32514 TargetLowering::DAGCombinerInfo &DCI) {
32515 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
32519 EVT VT = ML->getValueType(0);
32521 // If we are loading the first and last elements of a vector, it is safe and
32522 // always faster to load the whole vector. Replace the masked load with a
32523 // vector load and select.
32524 unsigned NumElts = VT.getVectorNumElements();
32525 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
32526 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
32527 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
32528 if (LoadFirstElt && LoadLastElt) {
32529 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
32530 ML->getMemOperand());
32531 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, ML->getSrc0());
32532 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
32535 // Convert a masked load with a constant mask into a masked load and a select.
32536 // This allows the select operation to use a faster kind of select instruction
32537 // (for example, vblendvps -> vblendps).
32539 // Don't try this if the pass-through operand is already undefined. That would
32540 // cause an infinite loop because that's what we're about to create.
32541 if (ML->getSrc0().isUndef())
32544 // The new masked load has an undef pass-through operand. The select uses the
32545 // original pass-through operand.
32546 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
32547 ML->getMask(), DAG.getUNDEF(VT),
32548 ML->getMemoryVT(), ML->getMemOperand(),
32549 ML->getExtensionType());
32550 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, ML->getSrc0());
32552 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
32555 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
32556 TargetLowering::DAGCombinerInfo &DCI,
32557 const X86Subtarget &Subtarget) {
32558 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
32560 // TODO: Expanding load with constant mask may be optimized as well.
32561 if (Mld->isExpandingLoad())
32564 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
32565 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
32567 // TODO: Do some AVX512 subsets benefit from this transform?
32568 if (!Subtarget.hasAVX512())
32569 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
32573 if (Mld->getExtensionType() != ISD::SEXTLOAD)
32576 // Resolve extending loads.
32577 EVT VT = Mld->getValueType(0);
32578 unsigned NumElems = VT.getVectorNumElements();
32579 EVT LdVT = Mld->getMemoryVT();
32582 assert(LdVT != VT && "Cannot extend to the same type");
32583 unsigned ToSz = VT.getScalarSizeInBits();
32584 unsigned FromSz = LdVT.getScalarSizeInBits();
32585 // From/To sizes and ElemCount must be pow of two.
32586 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
32587 "Unexpected size for extending masked load");
32589 unsigned SizeRatio = ToSz / FromSz;
32590 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
32592 // Create a type on which we perform the shuffle.
32593 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
32594 LdVT.getScalarType(), NumElems*SizeRatio);
32595 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
32597 // Convert Src0 value.
32598 SDValue WideSrc0 = DAG.getBitcast(WideVecVT, Mld->getSrc0());
32599 if (!Mld->getSrc0().isUndef()) {
32600 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
32601 for (unsigned i = 0; i != NumElems; ++i)
32602 ShuffleVec[i] = i * SizeRatio;
32604 // Can't shuffle using an illegal type.
32605 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
32606 "WideVecVT should be legal");
32607 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
32608 DAG.getUNDEF(WideVecVT), ShuffleVec);
32610 // Prepare the new mask.
32612 SDValue Mask = Mld->getMask();
32613 if (Mask.getValueType() == VT) {
32614 // Mask and original value have the same type.
32615 NewMask = DAG.getBitcast(WideVecVT, Mask);
32616 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
32617 for (unsigned i = 0; i != NumElems; ++i)
32618 ShuffleVec[i] = i * SizeRatio;
32619 for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
32620 ShuffleVec[i] = NumElems * SizeRatio;
32621 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
32622 DAG.getConstant(0, dl, WideVecVT),
32625 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
32626 unsigned WidenNumElts = NumElems*SizeRatio;
32627 unsigned MaskNumElts = VT.getVectorNumElements();
32628 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
32631 unsigned NumConcat = WidenNumElts / MaskNumElts;
32632 SmallVector<SDValue, 16> Ops(NumConcat);
32633 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
32635 for (unsigned i = 1; i != NumConcat; ++i)
32638 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
32641 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
32642 Mld->getBasePtr(), NewMask, WideSrc0,
32643 Mld->getMemoryVT(), Mld->getMemOperand(),
32645 SDValue NewVec = getExtendInVec(X86ISD::VSEXT, dl, VT, WideLd, DAG);
32646 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
32649 /// If exactly one element of the mask is set for a non-truncating masked store,
32650 /// it is a vector extract and scalar store.
32651 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
32652 /// mask have already been optimized in IR, so we don't bother with those here.
32653 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
32654 SelectionDAG &DAG) {
32655 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
32656 // However, some target hooks may need to be added to know when the transform
32657 // is profitable. Endianness would also have to be considered.
32659 SDValue Addr, VecIndex;
32660 unsigned Alignment;
32661 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
32664 // Extract the one scalar element that is actually being stored.
32666 EVT VT = MS->getValue().getValueType();
32667 EVT EltVT = VT.getVectorElementType();
32668 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
32669 MS->getValue(), VecIndex);
32671 // Store that element at the appropriate offset from the base pointer.
32672 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
32673 Alignment, MS->getMemOperand()->getFlags());
32676 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
32677 const X86Subtarget &Subtarget) {
32678 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
32680 if (Mst->isCompressingStore())
32683 if (!Mst->isTruncatingStore())
32684 return reduceMaskedStoreToScalarStore(Mst, DAG);
32686 // Resolve truncating stores.
32687 EVT VT = Mst->getValue().getValueType();
32688 unsigned NumElems = VT.getVectorNumElements();
32689 EVT StVT = Mst->getMemoryVT();
32692 assert(StVT != VT && "Cannot truncate to the same type");
32693 unsigned FromSz = VT.getScalarSizeInBits();
32694 unsigned ToSz = StVT.getScalarSizeInBits();
32696 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32698 // The truncating store is legal in some cases. For example
32699 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
32700 // are designated for truncate store.
32701 // In this case we don't need any further transformations.
32702 if (TLI.isTruncStoreLegal(VT, StVT))
32705 // From/To sizes and ElemCount must be pow of two.
32706 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
32707 "Unexpected size for truncating masked store");
32708 // We are going to use the original vector elt for storing.
32709 // Accumulated smaller vector elements must be a multiple of the store size.
32710 assert (((NumElems * FromSz) % ToSz) == 0 &&
32711 "Unexpected ratio for truncating masked store");
32713 unsigned SizeRatio = FromSz / ToSz;
32714 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
32716 // Create a type on which we perform the shuffle.
32717 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
32718 StVT.getScalarType(), NumElems*SizeRatio);
32720 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
32722 SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
32723 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
32724 for (unsigned i = 0; i != NumElems; ++i)
32725 ShuffleVec[i] = i * SizeRatio;
32727 // Can't shuffle using an illegal type.
32728 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
32729 "WideVecVT should be legal");
32731 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
32732 DAG.getUNDEF(WideVecVT),
32736 SDValue Mask = Mst->getMask();
32737 if (Mask.getValueType() == VT) {
32738 // Mask and original value have the same type.
32739 NewMask = DAG.getBitcast(WideVecVT, Mask);
32740 for (unsigned i = 0; i != NumElems; ++i)
32741 ShuffleVec[i] = i * SizeRatio;
32742 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
32743 ShuffleVec[i] = NumElems*SizeRatio;
32744 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
32745 DAG.getConstant(0, dl, WideVecVT),
32748 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
32749 unsigned WidenNumElts = NumElems*SizeRatio;
32750 unsigned MaskNumElts = VT.getVectorNumElements();
32751 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
32754 unsigned NumConcat = WidenNumElts / MaskNumElts;
32755 SmallVector<SDValue, 16> Ops(NumConcat);
32756 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
32758 for (unsigned i = 1; i != NumConcat; ++i)
32761 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
32764 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
32765 Mst->getBasePtr(), NewMask, StVT,
32766 Mst->getMemOperand(), false);
32769 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
32770 const X86Subtarget &Subtarget) {
32771 StoreSDNode *St = cast<StoreSDNode>(N);
32772 EVT VT = St->getValue().getValueType();
32773 EVT StVT = St->getMemoryVT();
32775 SDValue StoredVal = St->getOperand(1);
32776 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32778 // If we are saving a concatenation of two XMM registers and 32-byte stores
32779 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
32781 unsigned AddressSpace = St->getAddressSpace();
32782 unsigned Alignment = St->getAlignment();
32783 if (VT.is256BitVector() && StVT == VT &&
32784 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
32785 AddressSpace, Alignment, &Fast) &&
32787 unsigned NumElems = VT.getVectorNumElements();
32791 SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl);
32792 SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl);
32794 SDValue Ptr0 = St->getBasePtr();
32795 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl);
32798 DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(),
32799 Alignment, St->getMemOperand()->getFlags());
32801 DAG.getStore(St->getChain(), dl, Value1, Ptr1, St->getPointerInfo(),
32802 std::min(16U, Alignment), St->getMemOperand()->getFlags());
32803 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
32806 // Optimize trunc store (of multiple scalars) to shuffle and store.
32807 // First, pack all of the elements in one place. Next, store to memory
32808 // in fewer chunks.
32809 if (St->isTruncatingStore() && VT.isVector()) {
32810 // Check if we can detect an AVG pattern from the truncation. If yes,
32811 // replace the trunc store by a normal store with the result of X86ISD::AVG
32813 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
32815 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
32816 St->getPointerInfo(), St->getAlignment(),
32817 St->getMemOperand()->getFlags());
32820 detectAVX512USatPattern(St->getValue(), St->getMemoryVT(), Subtarget))
32821 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
32822 dl, Val, St->getBasePtr(),
32823 St->getMemoryVT(), St->getMemOperand(), DAG);
32825 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32826 unsigned NumElems = VT.getVectorNumElements();
32827 assert(StVT != VT && "Cannot truncate to the same type");
32828 unsigned FromSz = VT.getScalarSizeInBits();
32829 unsigned ToSz = StVT.getScalarSizeInBits();
32831 // The truncating store is legal in some cases. For example
32832 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
32833 // are designated for truncate store.
32834 // In this case we don't need any further transformations.
32835 if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
32838 // From, To sizes and ElemCount must be pow of two
32839 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
32840 // We are going to use the original vector elt for storing.
32841 // Accumulated smaller vector elements must be a multiple of the store size.
32842 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
32844 unsigned SizeRatio = FromSz / ToSz;
32846 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
32848 // Create a type on which we perform the shuffle
32849 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
32850 StVT.getScalarType(), NumElems*SizeRatio);
32852 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
32854 SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
32855 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
32856 for (unsigned i = 0; i != NumElems; ++i)
32857 ShuffleVec[i] = i * SizeRatio;
32859 // Can't shuffle using an illegal type.
32860 if (!TLI.isTypeLegal(WideVecVT))
32863 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
32864 DAG.getUNDEF(WideVecVT),
32866 // At this point all of the data is stored at the bottom of the
32867 // register. We now need to save it to mem.
32869 // Find the largest store unit
32870 MVT StoreType = MVT::i8;
32871 for (MVT Tp : MVT::integer_valuetypes()) {
32872 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
32876 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
32877 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
32878 (64 <= NumElems * ToSz))
32879 StoreType = MVT::f64;
32881 // Bitcast the original vector into a vector of store-size units
32882 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
32883 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
32884 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
32885 SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
32886 SmallVector<SDValue, 8> Chains;
32887 SDValue Ptr = St->getBasePtr();
32889 // Perform one or more big stores into memory.
32890 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
32891 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
32892 StoreType, ShuffWide,
32893 DAG.getIntPtrConstant(i, dl));
32895 DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
32896 St->getAlignment(), St->getMemOperand()->getFlags());
32897 Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
32898 Chains.push_back(Ch);
32901 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
32904 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
32905 // the FP state in cases where an emms may be missing.
32906 // A preferable solution to the general problem is to figure out the right
32907 // places to insert EMMS. This qualifies as a quick hack.
32909 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
32910 if (VT.getSizeInBits() != 64)
32913 const Function *F = DAG.getMachineFunction().getFunction();
32914 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
32916 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
32917 if ((VT.isVector() ||
32918 (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
32919 isa<LoadSDNode>(St->getValue()) &&
32920 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
32921 St->getChain().hasOneUse() && !St->isVolatile()) {
32922 SDNode* LdVal = St->getValue().getNode();
32923 LoadSDNode *Ld = nullptr;
32924 int TokenFactorIndex = -1;
32925 SmallVector<SDValue, 8> Ops;
32926 SDNode* ChainVal = St->getChain().getNode();
32927 // Must be a store of a load. We currently handle two cases: the load
32928 // is a direct child, and it's under an intervening TokenFactor. It is
32929 // possible to dig deeper under nested TokenFactors.
32930 if (ChainVal == LdVal)
32931 Ld = cast<LoadSDNode>(St->getChain());
32932 else if (St->getValue().hasOneUse() &&
32933 ChainVal->getOpcode() == ISD::TokenFactor) {
32934 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
32935 if (ChainVal->getOperand(i).getNode() == LdVal) {
32936 TokenFactorIndex = i;
32937 Ld = cast<LoadSDNode>(St->getValue());
32939 Ops.push_back(ChainVal->getOperand(i));
32943 if (!Ld || !ISD::isNormalLoad(Ld))
32946 // If this is not the MMX case, i.e. we are just turning i64 load/store
32947 // into f64 load/store, avoid the transformation if there are multiple
32948 // uses of the loaded value.
32949 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
32954 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
32955 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
32957 if (Subtarget.is64Bit() || F64IsLegal) {
32958 MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
32959 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
32960 Ld->getPointerInfo(), Ld->getAlignment(),
32961 Ld->getMemOperand()->getFlags());
32962 SDValue NewChain = NewLd.getValue(1);
32963 if (TokenFactorIndex >= 0) {
32964 Ops.push_back(NewChain);
32965 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
32967 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
32968 St->getPointerInfo(), St->getAlignment(),
32969 St->getMemOperand()->getFlags());
32972 // Otherwise, lower to two pairs of 32-bit loads / stores.
32973 SDValue LoAddr = Ld->getBasePtr();
32974 SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
32976 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
32977 Ld->getPointerInfo(), Ld->getAlignment(),
32978 Ld->getMemOperand()->getFlags());
32979 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
32980 Ld->getPointerInfo().getWithOffset(4),
32981 MinAlign(Ld->getAlignment(), 4),
32982 Ld->getMemOperand()->getFlags());
32984 SDValue NewChain = LoLd.getValue(1);
32985 if (TokenFactorIndex >= 0) {
32986 Ops.push_back(LoLd);
32987 Ops.push_back(HiLd);
32988 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
32991 LoAddr = St->getBasePtr();
32992 HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
32995 DAG.getStore(NewChain, StDL, LoLd, LoAddr, St->getPointerInfo(),
32996 St->getAlignment(), St->getMemOperand()->getFlags());
32997 SDValue HiSt = DAG.getStore(
32998 NewChain, StDL, HiLd, HiAddr, St->getPointerInfo().getWithOffset(4),
32999 MinAlign(St->getAlignment(), 4), St->getMemOperand()->getFlags());
33000 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
33003 // This is similar to the above case, but here we handle a scalar 64-bit
33004 // integer store that is extracted from a vector on a 32-bit target.
33005 // If we have SSE2, then we can treat it like a floating-point double
33006 // to get past legalization. The execution dependencies fixup pass will
33007 // choose the optimal machine instruction for the store if this really is
33008 // an integer or v2f32 rather than an f64.
33009 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
33010 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
33011 SDValue OldExtract = St->getOperand(1);
33012 SDValue ExtOp0 = OldExtract.getOperand(0);
33013 unsigned VecSize = ExtOp0.getValueSizeInBits();
33014 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
33015 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
33016 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
33017 BitCast, OldExtract.getOperand(1));
33018 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
33019 St->getPointerInfo(), St->getAlignment(),
33020 St->getMemOperand()->getFlags());
33026 /// Return 'true' if this vector operation is "horizontal"
33027 /// and return the operands for the horizontal operation in LHS and RHS. A
33028 /// horizontal operation performs the binary operation on successive elements
33029 /// of its first operand, then on successive elements of its second operand,
33030 /// returning the resulting values in a vector. For example, if
33031 /// A = < float a0, float a1, float a2, float a3 >
33033 /// B = < float b0, float b1, float b2, float b3 >
33034 /// then the result of doing a horizontal operation on A and B is
33035 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
33036 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
33037 /// A horizontal-op B, for some already available A and B, and if so then LHS is
33038 /// set to A, RHS to B, and the routine returns 'true'.
33039 /// Note that the binary operation should have the property that if one of the
33040 /// operands is UNDEF then the result is UNDEF.
33041 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
33042 // Look for the following pattern: if
33043 // A = < float a0, float a1, float a2, float a3 >
33044 // B = < float b0, float b1, float b2, float b3 >
33046 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
33047 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
33048 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
33049 // which is A horizontal-op B.
33051 // At least one of the operands should be a vector shuffle.
33052 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
33053 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
33056 MVT VT = LHS.getSimpleValueType();
33058 assert((VT.is128BitVector() || VT.is256BitVector()) &&
33059 "Unsupported vector type for horizontal add/sub");
33061 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
33062 // operate independently on 128-bit lanes.
33063 unsigned NumElts = VT.getVectorNumElements();
33064 unsigned NumLanes = VT.getSizeInBits()/128;
33065 unsigned NumLaneElts = NumElts / NumLanes;
33066 assert((NumLaneElts % 2 == 0) &&
33067 "Vector type should have an even number of elements in each lane");
33068 unsigned HalfLaneElts = NumLaneElts/2;
33070 // View LHS in the form
33071 // LHS = VECTOR_SHUFFLE A, B, LMask
33072 // If LHS is not a shuffle then pretend it is the shuffle
33073 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
33074 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
33077 SmallVector<int, 16> LMask(NumElts);
33078 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
33079 if (!LHS.getOperand(0).isUndef())
33080 A = LHS.getOperand(0);
33081 if (!LHS.getOperand(1).isUndef())
33082 B = LHS.getOperand(1);
33083 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
33084 std::copy(Mask.begin(), Mask.end(), LMask.begin());
33086 if (!LHS.isUndef())
33088 for (unsigned i = 0; i != NumElts; ++i)
33092 // Likewise, view RHS in the form
33093 // RHS = VECTOR_SHUFFLE C, D, RMask
33095 SmallVector<int, 16> RMask(NumElts);
33096 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
33097 if (!RHS.getOperand(0).isUndef())
33098 C = RHS.getOperand(0);
33099 if (!RHS.getOperand(1).isUndef())
33100 D = RHS.getOperand(1);
33101 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
33102 std::copy(Mask.begin(), Mask.end(), RMask.begin());
33104 if (!RHS.isUndef())
33106 for (unsigned i = 0; i != NumElts; ++i)
33110 // Check that the shuffles are both shuffling the same vectors.
33111 if (!(A == C && B == D) && !(A == D && B == C))
33114 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
33115 if (!A.getNode() && !B.getNode())
33118 // If A and B occur in reverse order in RHS, then "swap" them (which means
33119 // rewriting the mask).
33121 ShuffleVectorSDNode::commuteMask(RMask);
33123 // At this point LHS and RHS are equivalent to
33124 // LHS = VECTOR_SHUFFLE A, B, LMask
33125 // RHS = VECTOR_SHUFFLE A, B, RMask
33126 // Check that the masks correspond to performing a horizontal operation.
33127 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
33128 for (unsigned i = 0; i != NumLaneElts; ++i) {
33129 int LIdx = LMask[i+l], RIdx = RMask[i+l];
33131 // Ignore any UNDEF components.
33132 if (LIdx < 0 || RIdx < 0 ||
33133 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
33134 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
33137 // Check that successive elements are being operated on. If not, this is
33138 // not a horizontal operation.
33139 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
33140 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
33141 if (!(LIdx == Index && RIdx == Index + 1) &&
33142 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
33147 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
33148 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
33152 /// Do target-specific dag combines on floating-point adds/subs.
33153 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
33154 const X86Subtarget &Subtarget) {
33155 EVT VT = N->getValueType(0);
33156 SDValue LHS = N->getOperand(0);
33157 SDValue RHS = N->getOperand(1);
33158 bool IsFadd = N->getOpcode() == ISD::FADD;
33159 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
33161 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
33162 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
33163 (Subtarget.hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
33164 isHorizontalBinOp(LHS, RHS, IsFadd)) {
33165 auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
33166 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
33171 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
33173 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
33174 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
33175 const X86Subtarget &Subtarget,
33177 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
33178 SDValue Src = N->getOperand(0);
33179 unsigned Opcode = Src.getOpcode();
33180 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33182 EVT VT = N->getValueType(0);
33183 EVT SrcVT = Src.getValueType();
33185 auto IsRepeatedOpOrFreeTruncation = [VT](SDValue Op0, SDValue Op1) {
33186 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
33188 // Repeated operand, so we are only trading one output truncation for
33189 // one input truncation.
33193 // See if either operand has been extended from a smaller/equal size to
33194 // the truncation size, allowing a truncation to combine with the extend.
33195 unsigned Opcode0 = Op0.getOpcode();
33196 if ((Opcode0 == ISD::ANY_EXTEND || Opcode0 == ISD::SIGN_EXTEND ||
33197 Opcode0 == ISD::ZERO_EXTEND) &&
33198 Op0.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
33201 unsigned Opcode1 = Op1.getOpcode();
33202 if ((Opcode1 == ISD::ANY_EXTEND || Opcode1 == ISD::SIGN_EXTEND ||
33203 Opcode1 == ISD::ZERO_EXTEND) &&
33204 Op1.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
33207 // See if either operand is a single use constant which can be constant
33209 SDValue BC0 = peekThroughOneUseBitcasts(Op0);
33210 SDValue BC1 = peekThroughOneUseBitcasts(Op1);
33211 return ISD::isBuildVectorOfConstantSDNodes(BC0.getNode()) ||
33212 ISD::isBuildVectorOfConstantSDNodes(BC1.getNode());
33215 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
33216 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
33217 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
33218 return DAG.getNode(Opcode, DL, VT, Trunc0, Trunc1);
33221 // Don't combine if the operation has other uses.
33222 if (!N->isOnlyUserOf(Src.getNode()))
33225 // Only support vector truncation for now.
33226 // TODO: i64 scalar math would benefit as well.
33227 if (!VT.isVector())
33230 // In most cases its only worth pre-truncating if we're only facing the cost
33231 // of one truncation.
33232 // i.e. if one of the inputs will constant fold or the input is repeated.
33237 SDValue Op0 = Src.getOperand(0);
33238 SDValue Op1 = Src.getOperand(1);
33239 if (TLI.isOperationLegalOrPromote(Opcode, VT) &&
33240 IsRepeatedOpOrFreeTruncation(Op0, Op1))
33241 return TruncateArithmetic(Op0, Op1);
33246 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
33247 // better to truncate if we have the chance.
33248 if (SrcVT.getScalarType() == MVT::i64 && TLI.isOperationLegal(Opcode, VT) &&
33249 !TLI.isOperationLegal(Opcode, SrcVT))
33250 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
33253 SDValue Op0 = Src.getOperand(0);
33254 SDValue Op1 = Src.getOperand(1);
33255 if (TLI.isOperationLegal(Opcode, VT) &&
33256 IsRepeatedOpOrFreeTruncation(Op0, Op1))
33257 return TruncateArithmetic(Op0, Op1);
33265 /// Truncate a group of v4i32 into v16i8/v8i16 using X86ISD::PACKUS.
33267 combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
33268 SmallVector<SDValue, 8> &Regs) {
33269 assert(Regs.size() > 0 && (Regs[0].getValueType() == MVT::v4i32 ||
33270 Regs[0].getValueType() == MVT::v2i64));
33271 EVT OutVT = N->getValueType(0);
33272 EVT OutSVT = OutVT.getVectorElementType();
33273 EVT InVT = Regs[0].getValueType();
33274 EVT InSVT = InVT.getVectorElementType();
33277 // First, use mask to unset all bits that won't appear in the result.
33278 assert((OutSVT == MVT::i8 || OutSVT == MVT::i16) &&
33279 "OutSVT can only be either i8 or i16.");
33281 APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits());
33282 SDValue MaskVal = DAG.getConstant(Mask, DL, InVT);
33283 for (auto &Reg : Regs)
33284 Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVal, Reg);
33286 MVT UnpackedVT, PackedVT;
33287 if (OutSVT == MVT::i8) {
33288 UnpackedVT = MVT::v8i16;
33289 PackedVT = MVT::v16i8;
33291 UnpackedVT = MVT::v4i32;
33292 PackedVT = MVT::v8i16;
33295 // In each iteration, truncate the type by a half size.
33296 auto RegNum = Regs.size();
33297 for (unsigned j = 1, e = InSVT.getSizeInBits() / OutSVT.getSizeInBits();
33298 j < e; j *= 2, RegNum /= 2) {
33299 for (unsigned i = 0; i < RegNum; i++)
33300 Regs[i] = DAG.getBitcast(UnpackedVT, Regs[i]);
33301 for (unsigned i = 0; i < RegNum / 2; i++)
33302 Regs[i] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[i * 2],
33306 // If the type of the result is v8i8, we need do one more X86ISD::PACKUS, and
33307 // then extract a subvector as the result since v8i8 is not a legal type.
33308 if (OutVT == MVT::v8i8) {
33309 Regs[0] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[0], Regs[0]);
33310 Regs[0] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, Regs[0],
33311 DAG.getIntPtrConstant(0, DL));
33313 } else if (RegNum > 1) {
33314 Regs.resize(RegNum);
33315 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Regs);
33320 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
33322 combineVectorTruncationWithPACKSS(SDNode *N, const X86Subtarget &Subtarget,
33324 SmallVector<SDValue, 8> &Regs) {
33325 assert(Regs.size() > 0 && Regs[0].getValueType() == MVT::v4i32);
33326 EVT OutVT = N->getValueType(0);
33329 // Shift left by 16 bits, then arithmetic-shift right by 16 bits.
33330 SDValue ShAmt = DAG.getConstant(16, DL, MVT::i32);
33331 for (auto &Reg : Regs) {
33332 Reg = getTargetVShiftNode(X86ISD::VSHLI, DL, MVT::v4i32, Reg, ShAmt,
33334 Reg = getTargetVShiftNode(X86ISD::VSRAI, DL, MVT::v4i32, Reg, ShAmt,
33338 for (unsigned i = 0, e = Regs.size() / 2; i < e; i++)
33339 Regs[i] = DAG.getNode(X86ISD::PACKSS, DL, MVT::v8i16, Regs[i * 2],
33342 if (Regs.size() > 2) {
33343 Regs.resize(Regs.size() / 2);
33344 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Regs);
33349 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
33350 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
33351 /// legalization the truncation will be translated into a BUILD_VECTOR with each
33352 /// element that is extracted from a vector and then truncated, and it is
33353 /// difficult to do this optimization based on them.
33354 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
33355 const X86Subtarget &Subtarget) {
33356 EVT OutVT = N->getValueType(0);
33357 if (!OutVT.isVector())
33360 SDValue In = N->getOperand(0);
33361 if (!In.getValueType().isSimple())
33364 EVT InVT = In.getValueType();
33365 unsigned NumElems = OutVT.getVectorNumElements();
33367 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
33368 // SSE2, and we need to take care of it specially.
33369 // AVX512 provides vpmovdb.
33370 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
33373 EVT OutSVT = OutVT.getVectorElementType();
33374 EVT InSVT = InVT.getVectorElementType();
33375 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
33376 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
33380 // SSSE3's pshufb results in less instructions in the cases below.
33381 if (Subtarget.hasSSSE3() && NumElems == 8 &&
33382 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
33383 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
33388 // Split a long vector into vectors of legal type.
33389 unsigned RegNum = InVT.getSizeInBits() / 128;
33390 SmallVector<SDValue, 8> SubVec(RegNum);
33391 unsigned NumSubRegElts = 128 / InSVT.getSizeInBits();
33392 EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts);
33394 for (unsigned i = 0; i < RegNum; i++)
33395 SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In,
33396 DAG.getIntPtrConstant(i * NumSubRegElts, DL));
33398 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
33399 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
33400 // truncate 2 x v4i32 to v8i16.
33401 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
33402 return combineVectorTruncationWithPACKUS(N, DAG, SubVec);
33403 else if (InSVT == MVT::i32)
33404 return combineVectorTruncationWithPACKSS(N, Subtarget, DAG, SubVec);
33409 /// This function transforms vector truncation of 'all or none' bits values.
33410 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS operations.
33411 static SDValue combineVectorSignBitsTruncation(SDNode *N, SDLoc &DL,
33413 const X86Subtarget &Subtarget) {
33414 // Requires SSE2 but AVX512 has fast truncate.
33415 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
33418 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
33421 SDValue In = N->getOperand(0);
33422 if (!In.getValueType().isSimple())
33425 MVT VT = N->getValueType(0).getSimpleVT();
33426 MVT SVT = VT.getScalarType();
33428 MVT InVT = In.getValueType().getSimpleVT();
33429 MVT InSVT = InVT.getScalarType();
33431 // Use PACKSS if the input is a splatted sign bit.
33432 // e.g. Comparison result, sext_in_reg, etc.
33433 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
33434 if (NumSignBits != InSVT.getSizeInBits())
33437 // Check we have a truncation suited for PACKSS.
33438 if (!VT.is128BitVector() && !VT.is256BitVector())
33440 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
33442 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
33445 return truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget);
33448 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
33449 const X86Subtarget &Subtarget) {
33450 EVT VT = N->getValueType(0);
33451 SDValue Src = N->getOperand(0);
33454 // Attempt to pre-truncate inputs to arithmetic ops instead.
33455 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
33458 // Try to detect AVG pattern first.
33459 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
33462 // Try to combine truncation with unsigned saturation.
33463 if (SDValue Val = combineTruncateWithUSat(Src, VT, DL, DAG, Subtarget))
33466 // The bitcast source is a direct mmx result.
33467 // Detect bitcasts between i32 to x86mmx
33468 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
33469 SDValue BCSrc = Src.getOperand(0);
33470 if (BCSrc.getValueType() == MVT::x86mmx)
33471 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
33474 // Try to truncate extended sign bits with PACKSS.
33475 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
33478 return combineVectorTruncation(N, DAG, Subtarget);
33481 /// Returns the negated value if the node \p N flips sign of FP value.
33483 /// FP-negation node may have different forms: FNEG(x) or FXOR (x, 0x80000000).
33484 /// AVX512F does not have FXOR, so FNEG is lowered as
33485 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
33486 /// In this case we go though all bitcasts.
33487 static SDValue isFNEG(SDNode *N) {
33488 if (N->getOpcode() == ISD::FNEG)
33489 return N->getOperand(0);
33491 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
33492 if (Op.getOpcode() != X86ISD::FXOR && Op.getOpcode() != ISD::XOR)
33495 SDValue Op1 = peekThroughBitcasts(Op.getOperand(1));
33496 if (!Op1.getValueType().isFloatingPoint())
33499 SDValue Op0 = peekThroughBitcasts(Op.getOperand(0));
33501 unsigned EltBits = Op1.getScalarValueSizeInBits();
33502 auto isSignMask = [&](const ConstantFP *C) {
33503 return C->getValueAPF().bitcastToAPInt() == APInt::getSignMask(EltBits);
33506 // There is more than one way to represent the same constant on
33507 // the different X86 targets. The type of the node may also depend on size.
33508 // - load scalar value and broadcast
33509 // - BUILD_VECTOR node
33510 // - load from a constant pool.
33511 // We check all variants here.
33512 if (Op1.getOpcode() == X86ISD::VBROADCAST) {
33513 if (auto *C = getTargetConstantFromNode(Op1.getOperand(0)))
33514 if (isSignMask(cast<ConstantFP>(C)))
33517 } else if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1)) {
33518 if (ConstantFPSDNode *CN = BV->getConstantFPSplatNode())
33519 if (isSignMask(CN->getConstantFPValue()))
33522 } else if (auto *C = getTargetConstantFromNode(Op1)) {
33523 if (C->getType()->isVectorTy()) {
33524 if (auto *SplatV = C->getSplatValue())
33525 if (isSignMask(cast<ConstantFP>(SplatV)))
33527 } else if (auto *FPConst = dyn_cast<ConstantFP>(C))
33528 if (isSignMask(FPConst))
33534 /// Do target-specific dag combines on floating point negations.
33535 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
33536 const X86Subtarget &Subtarget) {
33537 EVT OrigVT = N->getValueType(0);
33538 SDValue Arg = isFNEG(N);
33539 assert(Arg.getNode() && "N is expected to be an FNEG node");
33541 EVT VT = Arg.getValueType();
33542 EVT SVT = VT.getScalarType();
33545 // Let legalize expand this if it isn't a legal type yet.
33546 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
33549 // If we're negating a FMUL node on a target with FMA, then we can avoid the
33550 // use of a constant by performing (-0 - A*B) instead.
33551 // FIXME: Check rounding control flags as well once it becomes available.
33552 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
33553 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
33554 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
33555 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
33556 Arg.getOperand(1), Zero);
33557 return DAG.getBitcast(OrigVT, NewNode);
33560 // If we're negating an FMA node, then we can adjust the
33561 // instruction to include the extra negation.
33562 unsigned NewOpcode = 0;
33563 if (Arg.hasOneUse()) {
33564 switch (Arg.getOpcode()) {
33565 case X86ISD::FMADD: NewOpcode = X86ISD::FNMSUB; break;
33566 case X86ISD::FMSUB: NewOpcode = X86ISD::FNMADD; break;
33567 case X86ISD::FNMADD: NewOpcode = X86ISD::FMSUB; break;
33568 case X86ISD::FNMSUB: NewOpcode = X86ISD::FMADD; break;
33569 case X86ISD::FMADD_RND: NewOpcode = X86ISD::FNMSUB_RND; break;
33570 case X86ISD::FMSUB_RND: NewOpcode = X86ISD::FNMADD_RND; break;
33571 case X86ISD::FNMADD_RND: NewOpcode = X86ISD::FMSUB_RND; break;
33572 case X86ISD::FNMSUB_RND: NewOpcode = X86ISD::FMADD_RND; break;
33573 // We can't handle scalar intrinsic node here because it would only
33574 // invert one element and not the whole vector. But we could try to handle
33575 // a negation of the lower element only.
33579 return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT,
33580 Arg.getNode()->ops()));
33585 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
33586 const X86Subtarget &Subtarget) {
33587 MVT VT = N->getSimpleValueType(0);
33588 // If we have integer vector types available, use the integer opcodes.
33589 if (VT.isVector() && Subtarget.hasSSE2()) {
33592 MVT IntVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
33594 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
33595 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
33596 unsigned IntOpcode;
33597 switch (N->getOpcode()) {
33598 default: llvm_unreachable("Unexpected FP logic op");
33599 case X86ISD::FOR: IntOpcode = ISD::OR; break;
33600 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
33601 case X86ISD::FAND: IntOpcode = ISD::AND; break;
33602 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
33604 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
33605 return DAG.getBitcast(VT, IntOp);
33610 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
33611 TargetLowering::DAGCombinerInfo &DCI,
33612 const X86Subtarget &Subtarget) {
33613 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
33616 if (DCI.isBeforeLegalizeOps())
33619 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
33622 if (Subtarget.hasCMov())
33623 if (SDValue RV = combineIntegerAbs(N, DAG))
33626 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
33630 return combineFneg(N, DAG, Subtarget);
33635 static bool isNullFPScalarOrVectorConst(SDValue V) {
33636 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
33639 /// If a value is a scalar FP zero or a vector FP zero (potentially including
33640 /// undefined elements), return a zero constant that may be used to fold away
33641 /// that value. In the case of a vector, the returned constant will not contain
33642 /// undefined elements even if the input parameter does. This makes it suitable
33643 /// to be used as a replacement operand with operations (eg, bitwise-and) where
33644 /// an undef should not propagate.
33645 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
33646 const X86Subtarget &Subtarget) {
33647 if (!isNullFPScalarOrVectorConst(V))
33650 if (V.getValueType().isVector())
33651 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
33656 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
33657 const X86Subtarget &Subtarget) {
33658 SDValue N0 = N->getOperand(0);
33659 SDValue N1 = N->getOperand(1);
33660 EVT VT = N->getValueType(0);
33663 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
33664 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
33665 (VT == MVT::f64 && Subtarget.hasSSE2())))
33668 auto isAllOnesConstantFP = [](SDValue V) {
33669 auto *C = dyn_cast<ConstantFPSDNode>(V);
33670 return C && C->getConstantFPValue()->isAllOnesValue();
33673 // fand (fxor X, -1), Y --> fandn X, Y
33674 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
33675 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
33677 // fand X, (fxor Y, -1) --> fandn Y, X
33678 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
33679 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
33684 /// Do target-specific dag combines on X86ISD::FAND nodes.
33685 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
33686 const X86Subtarget &Subtarget) {
33687 // FAND(0.0, x) -> 0.0
33688 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
33691 // FAND(x, 0.0) -> 0.0
33692 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
33695 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
33698 return lowerX86FPLogicOp(N, DAG, Subtarget);
33701 /// Do target-specific dag combines on X86ISD::FANDN nodes.
33702 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
33703 const X86Subtarget &Subtarget) {
33704 // FANDN(0.0, x) -> x
33705 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
33706 return N->getOperand(1);
33708 // FANDN(x, 0.0) -> 0.0
33709 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
33712 return lowerX86FPLogicOp(N, DAG, Subtarget);
33715 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
33716 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
33717 const X86Subtarget &Subtarget) {
33718 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
33720 // F[X]OR(0.0, x) -> x
33721 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
33722 return N->getOperand(1);
33724 // F[X]OR(x, 0.0) -> x
33725 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
33726 return N->getOperand(0);
33729 if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
33732 return lowerX86FPLogicOp(N, DAG, Subtarget);
33735 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
33736 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
33737 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
33739 // Only perform optimizations if UnsafeMath is used.
33740 if (!DAG.getTarget().Options.UnsafeFPMath)
33743 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
33744 // into FMINC and FMAXC, which are Commutative operations.
33745 unsigned NewOp = 0;
33746 switch (N->getOpcode()) {
33747 default: llvm_unreachable("unknown opcode");
33748 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
33749 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
33752 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
33753 N->getOperand(0), N->getOperand(1));
33756 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
33757 const X86Subtarget &Subtarget) {
33758 if (Subtarget.useSoftFloat())
33761 // TODO: Check for global or instruction-level "nnan". In that case, we
33762 // should be able to lower to FMAX/FMIN alone.
33763 // TODO: If an operand is already known to be a NaN or not a NaN, this
33764 // should be an optional swap and FMAX/FMIN.
33766 EVT VT = N->getValueType(0);
33767 if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
33768 (Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
33769 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
33772 // This takes at least 3 instructions, so favor a library call when operating
33773 // on a scalar and minimizing code size.
33774 if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize())
33777 SDValue Op0 = N->getOperand(0);
33778 SDValue Op1 = N->getOperand(1);
33780 EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType(
33781 DAG.getDataLayout(), *DAG.getContext(), VT);
33783 // There are 4 possibilities involving NaN inputs, and these are the required
33787 // ----------------
33788 // Num | Max | Op0 |
33789 // Op0 ----------------
33790 // NaN | Op1 | NaN |
33791 // ----------------
33793 // The SSE FP max/min instructions were not designed for this case, but rather
33795 // Min = Op1 < Op0 ? Op1 : Op0
33796 // Max = Op1 > Op0 ? Op1 : Op0
33798 // So they always return Op0 if either input is a NaN. However, we can still
33799 // use those instructions for fmaxnum by selecting away a NaN input.
33801 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
33802 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
33803 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
33804 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType , Op0, Op0, ISD::SETUO);
33806 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
33807 // are NaN, the NaN value of Op1 is the result.
33808 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
33811 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
33812 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
33813 TargetLowering::DAGCombinerInfo &DCI,
33814 const X86Subtarget &Subtarget) {
33815 // ANDNP(0, x) -> x
33816 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
33817 return N->getOperand(1);
33819 // ANDNP(x, 0) -> 0
33820 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
33821 return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
33823 EVT VT = N->getValueType(0);
33825 // Attempt to recursively combine a bitmask ANDNP with shuffles.
33826 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
33828 SmallVector<int, 1> NonceMask; // Just a placeholder.
33829 NonceMask.push_back(0);
33830 if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, {},
33831 /*Depth*/ 1, /*HasVarMask*/ false, DAG,
33833 return SDValue(); // This routine will use CombineTo to replace N.
33839 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
33840 TargetLowering::DAGCombinerInfo &DCI) {
33841 // BT ignores high bits in the bit index operand.
33842 SDValue Op1 = N->getOperand(1);
33843 if (Op1.hasOneUse()) {
33844 unsigned BitWidth = Op1.getValueSizeInBits();
33845 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
33847 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
33848 !DCI.isBeforeLegalizeOps());
33849 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33850 if (TLI.ShrinkDemandedConstant(Op1, DemandedMask, TLO) ||
33851 TLI.SimplifyDemandedBits(Op1, DemandedMask, Known, TLO))
33852 DCI.CommitTargetLoweringOpt(TLO);
33857 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
33858 const X86Subtarget &Subtarget) {
33859 EVT VT = N->getValueType(0);
33860 if (!VT.isVector())
33863 SDValue N0 = N->getOperand(0);
33864 SDValue N1 = N->getOperand(1);
33865 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
33868 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
33869 // both SSE and AVX2 since there is no sign-extended shift right
33870 // operation on a vector with 64-bit elements.
33871 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
33872 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
33873 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
33874 N0.getOpcode() == ISD::SIGN_EXTEND)) {
33875 SDValue N00 = N0.getOperand(0);
33877 // EXTLOAD has a better solution on AVX2,
33878 // it may be replaced with X86ISD::VSEXT node.
33879 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
33880 if (!ISD::isNormalLoad(N00.getNode()))
33883 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
33884 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
33886 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
33892 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
33893 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
33894 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
33895 /// opportunities to combine math ops, use an LEA, or use a complex addressing
33896 /// mode. This can eliminate extend, add, and shift instructions.
33897 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
33898 const X86Subtarget &Subtarget) {
33899 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
33900 Ext->getOpcode() != ISD::ZERO_EXTEND)
33903 // TODO: This should be valid for other integer types.
33904 EVT VT = Ext->getValueType(0);
33905 if (VT != MVT::i64)
33908 SDValue Add = Ext->getOperand(0);
33909 if (Add.getOpcode() != ISD::ADD)
33912 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
33913 bool NSW = Add->getFlags().hasNoSignedWrap();
33914 bool NUW = Add->getFlags().hasNoUnsignedWrap();
33916 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
33918 if ((Sext && !NSW) || (!Sext && !NUW))
33921 // Having a constant operand to the 'add' ensures that we are not increasing
33922 // the instruction count because the constant is extended for free below.
33923 // A constant operand can also become the displacement field of an LEA.
33924 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
33928 // Don't make the 'add' bigger if there's no hope of combining it with some
33929 // other 'add' or 'shl' instruction.
33930 // TODO: It may be profitable to generate simpler LEA instructions in place
33931 // of single 'add' instructions, but the cost model for selecting an LEA
33932 // currently has a high threshold.
33933 bool HasLEAPotential = false;
33934 for (auto *User : Ext->uses()) {
33935 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
33936 HasLEAPotential = true;
33940 if (!HasLEAPotential)
33943 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
33944 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
33945 SDValue AddOp0 = Add.getOperand(0);
33946 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
33947 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
33949 // The wider add is guaranteed to not wrap because both operands are
33952 Flags.setNoSignedWrap(NSW);
33953 Flags.setNoUnsignedWrap(NUW);
33954 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
33957 /// (i8,i32 {s/z}ext ({s/u}divrem (i8 x, i8 y)) ->
33958 /// (i8,i32 ({s/u}divrem_sext_hreg (i8 x, i8 y)
33959 /// This exposes the {s/z}ext to the sdivrem lowering, so that it directly
33960 /// extends from AH (which we otherwise need to do contortions to access).
33961 static SDValue getDivRem8(SDNode *N, SelectionDAG &DAG) {
33962 SDValue N0 = N->getOperand(0);
33963 auto OpcodeN = N->getOpcode();
33964 auto OpcodeN0 = N0.getOpcode();
33965 if (!((OpcodeN == ISD::SIGN_EXTEND && OpcodeN0 == ISD::SDIVREM) ||
33966 (OpcodeN == ISD::ZERO_EXTEND && OpcodeN0 == ISD::UDIVREM)))
33969 EVT VT = N->getValueType(0);
33970 EVT InVT = N0.getValueType();
33971 if (N0.getResNo() != 1 || InVT != MVT::i8 || VT != MVT::i32)
33974 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
33975 auto DivRemOpcode = OpcodeN0 == ISD::SDIVREM ? X86ISD::SDIVREM8_SEXT_HREG
33976 : X86ISD::UDIVREM8_ZEXT_HREG;
33977 SDValue R = DAG.getNode(DivRemOpcode, SDLoc(N), NodeTys, N0.getOperand(0),
33979 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
33980 return R.getValue(1);
33983 /// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
33984 /// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
33985 /// with UNDEFs) of the input to vectors of the same size as the target type
33986 /// which then extends the lowest elements.
33987 static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
33988 TargetLowering::DAGCombinerInfo &DCI,
33989 const X86Subtarget &Subtarget) {
33990 unsigned Opcode = N->getOpcode();
33991 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
33993 if (!DCI.isBeforeLegalizeOps())
33995 if (!Subtarget.hasSSE2())
33998 SDValue N0 = N->getOperand(0);
33999 EVT VT = N->getValueType(0);
34000 EVT SVT = VT.getScalarType();
34001 EVT InVT = N0.getValueType();
34002 EVT InSVT = InVT.getScalarType();
34004 // Input type must be a vector and we must be extending legal integer types.
34005 if (!VT.isVector())
34007 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
34009 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
34012 // On AVX2+ targets, if the input/output types are both legal then we will be
34013 // able to use SIGN_EXTEND/ZERO_EXTEND directly.
34014 if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
34015 DAG.getTargetLoweringInfo().isTypeLegal(InVT))
34020 auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
34021 EVT InVT = N.getValueType();
34022 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
34023 Size / InVT.getScalarSizeInBits());
34024 SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
34025 DAG.getUNDEF(InVT));
34027 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
34030 // If target-size is less than 128-bits, extend to a type that would extend
34031 // to 128 bits, extend that and extract the original target vector.
34032 if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
34033 unsigned Scale = 128 / VT.getSizeInBits();
34035 EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
34036 SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
34037 SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
34038 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
34039 DAG.getIntPtrConstant(0, DL));
34042 // If target-size is 128-bits (or 256-bits on AVX2 target), then convert to
34043 // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
34044 // Also use this if we don't have SSE41 to allow the legalizer do its job.
34045 if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
34046 (VT.is256BitVector() && Subtarget.hasInt256()) ||
34047 (VT.is512BitVector() && Subtarget.hasAVX512())) {
34048 SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
34049 return Opcode == ISD::SIGN_EXTEND
34050 ? DAG.getSignExtendVectorInReg(ExOp, DL, VT)
34051 : DAG.getZeroExtendVectorInReg(ExOp, DL, VT);
34054 auto SplitAndExtendInReg = [&](unsigned SplitSize) {
34055 unsigned NumVecs = VT.getSizeInBits() / SplitSize;
34056 unsigned NumSubElts = SplitSize / SVT.getSizeInBits();
34057 EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
34058 EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
34060 SmallVector<SDValue, 8> Opnds;
34061 for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
34062 SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
34063 DAG.getIntPtrConstant(Offset, DL));
34064 SrcVec = ExtendVecSize(DL, SrcVec, SplitSize);
34065 SrcVec = Opcode == ISD::SIGN_EXTEND
34066 ? DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT)
34067 : DAG.getZeroExtendVectorInReg(SrcVec, DL, SubVT);
34068 Opnds.push_back(SrcVec);
34070 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
34073 // On pre-AVX2 targets, split into 128-bit nodes of
34074 // ISD::*_EXTEND_VECTOR_INREG.
34075 if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128))
34076 return SplitAndExtendInReg(128);
34078 // On pre-AVX512 targets, split into 256-bit nodes of
34079 // ISD::*_EXTEND_VECTOR_INREG.
34080 if (!Subtarget.hasAVX512() && !(VT.getSizeInBits() % 256))
34081 return SplitAndExtendInReg(256);
34086 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
34087 TargetLowering::DAGCombinerInfo &DCI,
34088 const X86Subtarget &Subtarget) {
34089 SDValue N0 = N->getOperand(0);
34090 EVT VT = N->getValueType(0);
34091 EVT InVT = N0.getValueType();
34094 if (SDValue DivRem8 = getDivRem8(N, DAG))
34097 if (!DCI.isBeforeLegalizeOps()) {
34098 if (InVT == MVT::i1) {
34099 SDValue Zero = DAG.getConstant(0, DL, VT);
34100 SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
34101 return DAG.getSelect(DL, VT, N0, AllOnes, Zero);
34106 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
34107 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
34108 // Invert and sign-extend a boolean is the same as zero-extend and subtract
34109 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
34110 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
34111 // sext (xor Bool, -1) --> sub (zext Bool), 1
34112 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
34113 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
34116 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
34119 if (Subtarget.hasAVX() && VT.is256BitVector())
34120 if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
34123 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
34129 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
34130 const X86Subtarget &Subtarget) {
34132 EVT VT = N->getValueType(0);
34134 // Let legalize expand this if it isn't a legal type yet.
34135 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
34138 EVT ScalarVT = VT.getScalarType();
34139 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
34142 SDValue A = N->getOperand(0);
34143 SDValue B = N->getOperand(1);
34144 SDValue C = N->getOperand(2);
34146 auto invertIfNegative = [](SDValue &V) {
34147 if (SDValue NegVal = isFNEG(V.getNode())) {
34154 // Do not convert the passthru input of scalar intrinsics.
34155 // FIXME: We could allow negations of the lower element only.
34156 bool NegA = N->getOpcode() != X86ISD::FMADDS1_RND && invertIfNegative(A);
34157 bool NegB = invertIfNegative(B);
34158 bool NegC = N->getOpcode() != X86ISD::FMADDS3_RND && invertIfNegative(C);
34160 // Negative multiplication when NegA xor NegB
34161 bool NegMul = (NegA != NegB);
34163 unsigned NewOpcode;
34165 NewOpcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
34167 NewOpcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
34170 if (N->getOpcode() == X86ISD::FMADD_RND) {
34171 switch (NewOpcode) {
34172 case X86ISD::FMADD: NewOpcode = X86ISD::FMADD_RND; break;
34173 case X86ISD::FMSUB: NewOpcode = X86ISD::FMSUB_RND; break;
34174 case X86ISD::FNMADD: NewOpcode = X86ISD::FNMADD_RND; break;
34175 case X86ISD::FNMSUB: NewOpcode = X86ISD::FNMSUB_RND; break;
34177 } else if (N->getOpcode() == X86ISD::FMADDS1_RND) {
34178 switch (NewOpcode) {
34179 case X86ISD::FMADD: NewOpcode = X86ISD::FMADDS1_RND; break;
34180 case X86ISD::FMSUB: NewOpcode = X86ISD::FMSUBS1_RND; break;
34181 case X86ISD::FNMADD: NewOpcode = X86ISD::FNMADDS1_RND; break;
34182 case X86ISD::FNMSUB: NewOpcode = X86ISD::FNMSUBS1_RND; break;
34184 } else if (N->getOpcode() == X86ISD::FMADDS3_RND) {
34185 switch (NewOpcode) {
34186 case X86ISD::FMADD: NewOpcode = X86ISD::FMADDS3_RND; break;
34187 case X86ISD::FMSUB: NewOpcode = X86ISD::FMSUBS3_RND; break;
34188 case X86ISD::FNMADD: NewOpcode = X86ISD::FNMADDS3_RND; break;
34189 case X86ISD::FNMSUB: NewOpcode = X86ISD::FNMSUBS3_RND; break;
34192 assert((N->getOpcode() == X86ISD::FMADD || N->getOpcode() == ISD::FMA) &&
34193 "Unexpected opcode!");
34194 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
34197 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
34200 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
34201 TargetLowering::DAGCombinerInfo &DCI,
34202 const X86Subtarget &Subtarget) {
34203 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
34204 // (and (i32 x86isd::setcc_carry), 1)
34205 // This eliminates the zext. This transformation is necessary because
34206 // ISD::SETCC is always legalized to i8.
34208 SDValue N0 = N->getOperand(0);
34209 EVT VT = N->getValueType(0);
34211 if (N0.getOpcode() == ISD::AND &&
34213 N0.getOperand(0).hasOneUse()) {
34214 SDValue N00 = N0.getOperand(0);
34215 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
34216 if (!isOneConstant(N0.getOperand(1)))
34218 return DAG.getNode(ISD::AND, dl, VT,
34219 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
34220 N00.getOperand(0), N00.getOperand(1)),
34221 DAG.getConstant(1, dl, VT));
34225 if (N0.getOpcode() == ISD::TRUNCATE &&
34227 N0.getOperand(0).hasOneUse()) {
34228 SDValue N00 = N0.getOperand(0);
34229 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
34230 return DAG.getNode(ISD::AND, dl, VT,
34231 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
34232 N00.getOperand(0), N00.getOperand(1)),
34233 DAG.getConstant(1, dl, VT));
34237 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
34240 if (VT.is256BitVector())
34241 if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
34244 if (SDValue DivRem8 = getDivRem8(N, DAG))
34247 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
34250 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
34256 /// Try to map a 128-bit or larger integer comparison to vector instructions
34257 /// before type legalization splits it up into chunks.
34258 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
34259 const X86Subtarget &Subtarget) {
34260 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
34261 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
34263 // We're looking for an oversized integer equality comparison, but ignore a
34264 // comparison with zero because that gets special treatment in EmitTest().
34265 SDValue X = SetCC->getOperand(0);
34266 SDValue Y = SetCC->getOperand(1);
34267 EVT OpVT = X.getValueType();
34268 unsigned OpSize = OpVT.getSizeInBits();
34269 if (!OpVT.isScalarInteger() || OpSize < 128 || isNullConstant(Y))
34272 // TODO: Use PXOR + PTEST for SSE4.1 or later?
34273 // TODO: Add support for AVX-512.
34274 EVT VT = SetCC->getValueType(0);
34276 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
34277 (OpSize == 256 && Subtarget.hasAVX2())) {
34278 EVT VecVT = OpSize == 128 ? MVT::v16i8 : MVT::v32i8;
34279 SDValue VecX = DAG.getBitcast(VecVT, X);
34280 SDValue VecY = DAG.getBitcast(VecVT, Y);
34282 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
34283 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
34284 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
34285 // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
34286 // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
34287 SDValue Cmp = DAG.getNode(X86ISD::PCMPEQ, DL, VecVT, VecX, VecY);
34288 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
34289 SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
34291 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
34297 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
34298 const X86Subtarget &Subtarget) {
34299 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
34300 SDValue LHS = N->getOperand(0);
34301 SDValue RHS = N->getOperand(1);
34302 EVT VT = N->getValueType(0);
34305 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
34306 EVT OpVT = LHS.getValueType();
34307 // 0-x == y --> x+y == 0
34308 // 0-x != y --> x+y != 0
34309 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
34311 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
34312 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
34314 // x == 0-y --> x+y == 0
34315 // x != 0-y --> x+y != 0
34316 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
34318 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
34319 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
34322 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
34326 if (VT.getScalarType() == MVT::i1 &&
34327 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
34329 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
34330 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
34331 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
34333 if (!IsSEXT0 || !IsVZero1) {
34334 // Swap the operands and update the condition code.
34335 std::swap(LHS, RHS);
34336 CC = ISD::getSetCCSwappedOperands(CC);
34338 IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
34339 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
34340 IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
34343 if (IsSEXT0 && IsVZero1) {
34344 assert(VT == LHS.getOperand(0).getValueType() &&
34345 "Uexpected operand type");
34346 if (CC == ISD::SETGT)
34347 return DAG.getConstant(0, DL, VT);
34348 if (CC == ISD::SETLE)
34349 return DAG.getConstant(1, DL, VT);
34350 if (CC == ISD::SETEQ || CC == ISD::SETGE)
34351 return DAG.getNOT(DL, LHS.getOperand(0), VT);
34353 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
34354 "Unexpected condition code!");
34355 return LHS.getOperand(0);
34359 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
34360 // to avoid scalarization via legalization because v4i32 is not a legal type.
34361 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
34362 LHS.getValueType() == MVT::v4f32)
34363 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
34368 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG) {
34370 // Gather and Scatter instructions use k-registers for masks. The type of
34371 // the masks is v*i1. So the mask will be truncated anyway.
34372 // The SIGN_EXTEND_INREG my be dropped.
34373 SDValue Mask = N->getOperand(2);
34374 if (Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
34375 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
34376 NewOps[2] = Mask.getOperand(0);
34377 DAG.UpdateNodeOperands(N, NewOps);
34382 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
34383 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
34384 const X86Subtarget &Subtarget) {
34386 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
34387 SDValue EFLAGS = N->getOperand(1);
34389 // Try to simplify the EFLAGS and condition code operands.
34390 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG))
34391 return getSETCC(CC, Flags, DL, DAG);
34396 /// Optimize branch condition evaluation.
34397 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
34398 const X86Subtarget &Subtarget) {
34400 SDValue EFLAGS = N->getOperand(3);
34401 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
34403 // Try to simplify the EFLAGS and condition code operands.
34404 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
34405 // RAUW them under us.
34406 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
34407 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
34408 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
34409 N->getOperand(1), Cond, Flags);
34415 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
34416 SelectionDAG &DAG) {
34417 // Take advantage of vector comparisons producing 0 or -1 in each lane to
34418 // optimize away operation when it's from a constant.
34420 // The general transformation is:
34421 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
34422 // AND(VECTOR_CMP(x,y), constant2)
34423 // constant2 = UNARYOP(constant)
34425 // Early exit if this isn't a vector operation, the operand of the
34426 // unary operation isn't a bitwise AND, or if the sizes of the operations
34427 // aren't the same.
34428 EVT VT = N->getValueType(0);
34429 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
34430 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
34431 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
34434 // Now check that the other operand of the AND is a constant. We could
34435 // make the transformation for non-constant splats as well, but it's unclear
34436 // that would be a benefit as it would not eliminate any operations, just
34437 // perform one more step in scalar code before moving to the vector unit.
34438 if (BuildVectorSDNode *BV =
34439 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
34440 // Bail out if the vector isn't a constant.
34441 if (!BV->isConstant())
34444 // Everything checks out. Build up the new and improved node.
34446 EVT IntVT = BV->getValueType(0);
34447 // Create a new constant of the appropriate type for the transformed
34449 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
34450 // The AND node needs bitcasts to/from an integer vector type around it.
34451 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
34452 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
34453 N->getOperand(0)->getOperand(0), MaskConst);
34454 SDValue Res = DAG.getBitcast(VT, NewAnd);
34461 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
34462 const X86Subtarget &Subtarget) {
34463 SDValue Op0 = N->getOperand(0);
34464 EVT VT = N->getValueType(0);
34465 EVT InVT = Op0.getValueType();
34466 EVT InSVT = InVT.getScalarType();
34467 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34469 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
34470 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
34471 if (InVT.isVector() && (InSVT == MVT::i8 || InSVT == MVT::i16)) {
34473 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
34474 InVT.getVectorNumElements());
34475 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
34477 if (TLI.isOperationLegal(ISD::UINT_TO_FP, DstVT))
34478 return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
34480 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
34483 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
34484 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
34485 // the optimization here.
34486 if (DAG.SignBitIsZero(Op0))
34487 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
34492 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
34493 const X86Subtarget &Subtarget) {
34494 // First try to optimize away the conversion entirely when it's
34495 // conditionally from a constant. Vectors only.
34496 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
34499 // Now move on to more general possibilities.
34500 SDValue Op0 = N->getOperand(0);
34501 EVT VT = N->getValueType(0);
34502 EVT InVT = Op0.getValueType();
34503 EVT InSVT = InVT.getScalarType();
34505 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
34506 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
34507 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
34508 if (InVT.isVector() &&
34509 (InSVT == MVT::i8 || InSVT == MVT::i16 ||
34510 (InSVT == MVT::i1 && !DAG.getTargetLoweringInfo().isTypeLegal(InVT)))) {
34512 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
34513 InVT.getVectorNumElements());
34514 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
34515 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
34518 // Without AVX512DQ we only support i64 to float scalar conversion. For both
34519 // vectors and scalars, see if we know that the upper bits are all the sign
34520 // bit, in which case we can truncate the input to i32 and convert from that.
34521 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
34522 unsigned BitWidth = InVT.getScalarSizeInBits();
34523 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
34524 if (NumSignBits >= (BitWidth - 31)) {
34525 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), 32);
34526 if (InVT.isVector())
34527 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
34528 InVT.getVectorNumElements());
34530 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
34531 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
34535 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
34536 // a 32-bit target where SSE doesn't support i64->FP operations.
34537 if (!Subtarget.useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
34538 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
34539 EVT LdVT = Ld->getValueType(0);
34541 // This transformation is not supported if the result type is f16 or f128.
34542 if (VT == MVT::f16 || VT == MVT::f128)
34545 if (!Ld->isVolatile() && !VT.isVector() &&
34546 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
34547 !Subtarget.is64Bit() && LdVT == MVT::i64) {
34548 SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
34549 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
34550 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
34557 // Optimize RES, EFLAGS = X86ISD::ADD LHS, RHS
34558 static SDValue combineX86ADD(SDNode *N, SelectionDAG &DAG,
34559 X86TargetLowering::DAGCombinerInfo &DCI) {
34560 // When legalizing carry, we create carries via add X, -1
34561 // If that comes from an actual carry, via setcc, we use the
34563 if (isAllOnesConstant(N->getOperand(1)) && N->hasAnyUseOfValue(1)) {
34564 SDValue Carry = N->getOperand(0);
34565 while (Carry.getOpcode() == ISD::TRUNCATE ||
34566 Carry.getOpcode() == ISD::ZERO_EXTEND ||
34567 Carry.getOpcode() == ISD::SIGN_EXTEND ||
34568 Carry.getOpcode() == ISD::ANY_EXTEND ||
34569 (Carry.getOpcode() == ISD::AND &&
34570 isOneConstant(Carry.getOperand(1))))
34571 Carry = Carry.getOperand(0);
34573 if (Carry.getOpcode() == X86ISD::SETCC ||
34574 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
34575 if (Carry.getConstantOperandVal(0) == X86::COND_B)
34576 return DCI.CombineTo(N, SDValue(N, 0), Carry.getOperand(1));
34583 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
34584 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
34585 X86TargetLowering::DAGCombinerInfo &DCI) {
34586 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
34587 // the result is either zero or one (depending on the input carry bit).
34588 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
34589 if (X86::isZeroNode(N->getOperand(0)) &&
34590 X86::isZeroNode(N->getOperand(1)) &&
34591 // We don't have a good way to replace an EFLAGS use, so only do this when
34593 SDValue(N, 1).use_empty()) {
34595 EVT VT = N->getValueType(0);
34596 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
34597 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
34598 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
34599 DAG.getConstant(X86::COND_B, DL,
34602 DAG.getConstant(1, DL, VT));
34603 return DCI.CombineTo(N, Res1, CarryOut);
34609 /// Materialize "setb reg" as "sbb reg,reg", since it produces an all-ones bit
34610 /// which is more useful than 0/1 in some cases.
34611 static SDValue materializeSBB(SDNode *N, SDValue EFLAGS, SelectionDAG &DAG) {
34613 // "Condition code B" is also known as "the carry flag" (CF).
34614 SDValue CF = DAG.getConstant(X86::COND_B, DL, MVT::i8);
34615 SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, CF, EFLAGS);
34616 MVT VT = N->getSimpleValueType(0);
34618 return DAG.getNode(ISD::AND, DL, VT, SBB, DAG.getConstant(1, DL, VT));
34620 assert(VT == MVT::i1 && "Unexpected type for SETCC node");
34621 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SBB);
34624 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
34625 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
34626 /// with CMP+{ADC, SBB}.
34627 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
34628 bool IsSub = N->getOpcode() == ISD::SUB;
34629 SDValue X = N->getOperand(0);
34630 SDValue Y = N->getOperand(1);
34632 // If this is an add, canonicalize a zext operand to the RHS.
34633 // TODO: Incomplete? What if both sides are zexts?
34634 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
34635 Y.getOpcode() != ISD::ZERO_EXTEND)
34638 // Look through a one-use zext.
34639 bool PeekedThroughZext = false;
34640 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
34641 Y = Y.getOperand(0);
34642 PeekedThroughZext = true;
34645 // If this is an add, canonicalize a setcc operand to the RHS.
34646 // TODO: Incomplete? What if both sides are setcc?
34647 // TODO: Should we allow peeking through a zext of the other operand?
34648 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
34649 Y.getOpcode() != X86ISD::SETCC)
34652 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
34656 EVT VT = N->getValueType(0);
34657 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
34659 if (CC == X86::COND_B) {
34660 // X + SETB Z --> X + (mask SBB Z, Z)
34661 // X - SETB Z --> X - (mask SBB Z, Z)
34662 // TODO: Produce ADC/SBB here directly and avoid SETCC_CARRY?
34663 SDValue SBB = materializeSBB(Y.getNode(), Y.getOperand(1), DAG);
34664 if (SBB.getValueSizeInBits() != VT.getSizeInBits())
34665 SBB = DAG.getZExtOrTrunc(SBB, DL, VT);
34666 return DAG.getNode(IsSub ? ISD::SUB : ISD::ADD, DL, VT, X, SBB);
34669 if (CC == X86::COND_A) {
34670 SDValue EFLAGS = Y->getOperand(1);
34671 // Try to convert COND_A into COND_B in an attempt to facilitate
34672 // materializing "setb reg".
34674 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
34675 // cannot take an immediate as its first operand.
34677 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
34678 EFLAGS.getValueType().isInteger() &&
34679 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
34680 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
34681 EFLAGS.getNode()->getVTList(),
34682 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
34683 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
34684 SDValue SBB = materializeSBB(Y.getNode(), NewEFLAGS, DAG);
34685 if (SBB.getValueSizeInBits() != VT.getSizeInBits())
34686 SBB = DAG.getZExtOrTrunc(SBB, DL, VT);
34687 return DAG.getNode(IsSub ? ISD::SUB : ISD::ADD, DL, VT, X, SBB);
34691 if (CC != X86::COND_E && CC != X86::COND_NE)
34694 SDValue Cmp = Y.getOperand(1);
34695 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
34696 !X86::isZeroNode(Cmp.getOperand(1)) ||
34697 !Cmp.getOperand(0).getValueType().isInteger())
34700 // (cmp Z, 1) sets the carry flag if Z is 0.
34701 SDValue Z = Cmp.getOperand(0);
34702 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z,
34703 DAG.getConstant(1, DL, Z.getValueType()));
34705 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
34707 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
34708 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
34709 if (CC == X86::COND_NE)
34710 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
34711 DAG.getConstant(-1ULL, DL, VT), NewCmp);
34713 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
34714 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
34715 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
34716 DAG.getConstant(0, DL, VT), NewCmp);
34719 static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
34720 const X86Subtarget &Subtarget) {
34721 SDValue MulOp = N->getOperand(0);
34722 SDValue Phi = N->getOperand(1);
34724 if (MulOp.getOpcode() != ISD::MUL)
34725 std::swap(MulOp, Phi);
34726 if (MulOp.getOpcode() != ISD::MUL)
34730 if (!canReduceVMulWidth(MulOp.getNode(), DAG, Mode) || Mode == MULU16)
34733 EVT VT = N->getValueType(0);
34735 unsigned RegSize = 128;
34736 if (Subtarget.hasBWI())
34738 else if (Subtarget.hasAVX2())
34740 unsigned VectorSize = VT.getVectorNumElements() * 16;
34741 // If the vector size is less than 128, or greater than the supported RegSize,
34742 // do not use PMADD.
34743 if (VectorSize < 128 || VectorSize > RegSize)
34747 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
34748 VT.getVectorNumElements());
34749 EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
34750 VT.getVectorNumElements() / 2);
34752 // Shrink the operands of mul.
34753 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
34754 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
34756 // Madd vector size is half of the original vector size
34757 SDValue Madd = DAG.getNode(X86ISD::VPMADDWD, DL, MAddVT, N0, N1);
34758 // Fill the rest of the output with 0
34759 SDValue Zero = getZeroVector(Madd.getSimpleValueType(), Subtarget, DAG, DL);
34760 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
34761 return DAG.getNode(ISD::ADD, DL, VT, Concat, Phi);
34764 static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
34765 const X86Subtarget &Subtarget) {
34767 EVT VT = N->getValueType(0);
34768 SDValue Op0 = N->getOperand(0);
34769 SDValue Op1 = N->getOperand(1);
34771 // TODO: There's nothing special about i32, any integer type above i16 should
34772 // work just as well.
34773 if (!VT.isVector() || !VT.isSimple() ||
34774 !(VT.getVectorElementType() == MVT::i32))
34777 unsigned RegSize = 128;
34778 if (Subtarget.hasBWI())
34780 else if (Subtarget.hasAVX2())
34783 // We only handle v16i32 for SSE2 / v32i32 for AVX2 / v64i32 for AVX512.
34784 // TODO: We should be able to handle larger vectors by splitting them before
34785 // feeding them into several SADs, and then reducing over those.
34786 if (VT.getSizeInBits() / 4 > RegSize)
34789 // We know N is a reduction add, which means one of its operands is a phi.
34790 // To match SAD, we need the other operand to be a vector select.
34791 SDValue SelectOp, Phi;
34792 if (Op0.getOpcode() == ISD::VSELECT) {
34795 } else if (Op1.getOpcode() == ISD::VSELECT) {
34801 // Check whether we have an abs-diff pattern feeding into the select.
34802 if(!detectZextAbsDiff(SelectOp, Op0, Op1))
34805 // SAD pattern detected. Now build a SAD instruction and an addition for
34806 // reduction. Note that the number of elements of the result of SAD is less
34807 // than the number of elements of its input. Therefore, we could only update
34808 // part of elements in the reduction vector.
34809 SDValue Sad = createPSADBW(DAG, Op0, Op1, DL);
34811 // The output of PSADBW is a vector of i64.
34812 // We need to turn the vector of i64 into a vector of i32.
34813 // If the reduction vector is at least as wide as the psadbw result, just
34814 // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
34816 MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
34817 if (VT.getSizeInBits() >= ResVT.getSizeInBits())
34818 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
34820 Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
34822 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
34823 // Update part of elements of the reduction vector. This is done by first
34824 // extracting a sub-vector from it, updating this sub-vector, and inserting
34826 SDValue SubPhi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Phi,
34827 DAG.getIntPtrConstant(0, DL));
34828 SDValue Res = DAG.getNode(ISD::ADD, DL, ResVT, Sad, SubPhi);
34829 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Phi, Res,
34830 DAG.getIntPtrConstant(0, DL));
34832 return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi);
34835 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
34836 const X86Subtarget &Subtarget) {
34837 const SDNodeFlags Flags = N->getFlags();
34838 if (Flags.hasVectorReduction()) {
34839 if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
34841 if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
34844 EVT VT = N->getValueType(0);
34845 SDValue Op0 = N->getOperand(0);
34846 SDValue Op1 = N->getOperand(1);
34848 // Try to synthesize horizontal adds from adds of shuffles.
34849 if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
34850 (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
34851 isHorizontalBinOp(Op0, Op1, true))
34852 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
34854 return combineAddOrSubToADCOrSBB(N, DAG);
34857 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
34858 const X86Subtarget &Subtarget) {
34859 SDValue Op0 = N->getOperand(0);
34860 SDValue Op1 = N->getOperand(1);
34862 // X86 can't encode an immediate LHS of a sub. See if we can push the
34863 // negation into a preceding instruction.
34864 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
34865 // If the RHS of the sub is a XOR with one use and a constant, invert the
34866 // immediate. Then add one to the LHS of the sub so we can turn
34867 // X-Y -> X+~Y+1, saving one register.
34868 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
34869 isa<ConstantSDNode>(Op1.getOperand(1))) {
34870 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
34871 EVT VT = Op0.getValueType();
34872 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
34874 DAG.getConstant(~XorC, SDLoc(Op1), VT));
34875 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
34876 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
34880 // Try to synthesize horizontal subs from subs of shuffles.
34881 EVT VT = N->getValueType(0);
34882 if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
34883 (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
34884 isHorizontalBinOp(Op0, Op1, false))
34885 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
34887 return combineAddOrSubToADCOrSBB(N, DAG);
34890 static SDValue combineVSZext(SDNode *N, SelectionDAG &DAG,
34891 TargetLowering::DAGCombinerInfo &DCI,
34892 const X86Subtarget &Subtarget) {
34893 if (DCI.isBeforeLegalize())
34897 unsigned Opcode = N->getOpcode();
34898 MVT VT = N->getSimpleValueType(0);
34899 MVT SVT = VT.getVectorElementType();
34900 unsigned NumElts = VT.getVectorNumElements();
34901 unsigned EltSizeInBits = SVT.getSizeInBits();
34903 SDValue Op = N->getOperand(0);
34904 MVT OpVT = Op.getSimpleValueType();
34905 MVT OpEltVT = OpVT.getVectorElementType();
34906 unsigned OpEltSizeInBits = OpEltVT.getSizeInBits();
34907 unsigned InputBits = OpEltSizeInBits * NumElts;
34909 // Perform any constant folding.
34910 // FIXME: Reduce constant pool usage and don't fold when OptSize is enabled.
34912 SmallVector<APInt, 64> EltBits;
34913 if (getTargetConstantBitsFromNode(Op, OpEltSizeInBits, UndefElts, EltBits)) {
34914 APInt Undefs(NumElts, 0);
34915 SmallVector<APInt, 4> Vals(NumElts, APInt(EltSizeInBits, 0));
34917 (Opcode == X86ISD::VZEXT) || (Opcode == ISD::ZERO_EXTEND_VECTOR_INREG);
34918 for (unsigned i = 0; i != NumElts; ++i) {
34919 if (UndefElts[i]) {
34923 Vals[i] = IsZEXT ? EltBits[i].zextOrTrunc(EltSizeInBits)
34924 : EltBits[i].sextOrTrunc(EltSizeInBits);
34926 return getConstVector(Vals, Undefs, VT, DAG, DL);
34929 // (vzext (bitcast (vzext (x)) -> (vzext x)
34930 // TODO: (vsext (bitcast (vsext (x)) -> (vsext x)
34931 SDValue V = peekThroughBitcasts(Op);
34932 if (Opcode == X86ISD::VZEXT && V != Op && V.getOpcode() == X86ISD::VZEXT) {
34933 MVT InnerVT = V.getSimpleValueType();
34934 MVT InnerEltVT = InnerVT.getVectorElementType();
34936 // If the element sizes match exactly, we can just do one larger vzext. This
34937 // is always an exact type match as vzext operates on integer types.
34938 if (OpEltVT == InnerEltVT) {
34939 assert(OpVT == InnerVT && "Types must match for vzext!");
34940 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
34943 // The only other way we can combine them is if only a single element of the
34944 // inner vzext is used in the input to the outer vzext.
34945 if (InnerEltVT.getSizeInBits() < InputBits)
34948 // In this case, the inner vzext is completely dead because we're going to
34949 // only look at bits inside of the low element. Just do the outer vzext on
34950 // a bitcast of the input to the inner.
34951 return DAG.getNode(X86ISD::VZEXT, DL, VT, DAG.getBitcast(OpVT, V));
34954 // Check if we can bypass extracting and re-inserting an element of an input
34955 // vector. Essentially:
34956 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
34957 // TODO: Add X86ISD::VSEXT support
34958 if (Opcode == X86ISD::VZEXT &&
34959 V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
34960 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
34961 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
34962 SDValue ExtractedV = V.getOperand(0);
34963 SDValue OrigV = ExtractedV.getOperand(0);
34964 if (isNullConstant(ExtractedV.getOperand(1))) {
34965 MVT OrigVT = OrigV.getSimpleValueType();
34966 // Extract a subvector if necessary...
34967 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
34968 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
34969 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
34970 OrigVT.getVectorNumElements() / Ratio);
34971 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
34972 DAG.getIntPtrConstant(0, DL));
34974 Op = DAG.getBitcast(OpVT, OrigV);
34975 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
34982 /// Canonicalize (LSUB p, 1) -> (LADD p, -1).
34983 static SDValue combineLockSub(SDNode *N, SelectionDAG &DAG,
34984 const X86Subtarget &Subtarget) {
34985 SDValue Chain = N->getOperand(0);
34986 SDValue LHS = N->getOperand(1);
34987 SDValue RHS = N->getOperand(2);
34988 MVT VT = RHS.getSimpleValueType();
34991 auto *C = dyn_cast<ConstantSDNode>(RHS);
34992 if (!C || C->getZExtValue() != 1)
34995 RHS = DAG.getConstant(-1, DL, VT);
34996 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
34997 return DAG.getMemIntrinsicNode(X86ISD::LADD, DL,
34998 DAG.getVTList(MVT::i32, MVT::Other),
34999 {Chain, LHS, RHS}, VT, MMO);
35002 // TEST (AND a, b) ,(AND a, b) -> TEST a, b
35003 static SDValue combineTestM(SDNode *N, SelectionDAG &DAG) {
35004 SDValue Op0 = N->getOperand(0);
35005 SDValue Op1 = N->getOperand(1);
35007 if (Op0 != Op1 || Op1->getOpcode() != ISD::AND)
35010 EVT VT = N->getValueType(0);
35013 return DAG.getNode(X86ISD::TESTM, DL, VT,
35014 Op0->getOperand(0), Op0->getOperand(1));
35017 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
35018 const X86Subtarget &Subtarget) {
35019 MVT VT = N->getSimpleValueType(0);
35022 if (N->getOperand(0) == N->getOperand(1)) {
35023 if (N->getOpcode() == X86ISD::PCMPEQ)
35024 return getOnesVector(VT, DAG, DL);
35025 if (N->getOpcode() == X86ISD::PCMPGT)
35026 return getZeroVector(VT, Subtarget, DAG, DL);
35032 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
35033 TargetLowering::DAGCombinerInfo &DCI,
35034 const X86Subtarget &Subtarget) {
35035 if (DCI.isBeforeLegalizeOps())
35039 SDValue Vec = N->getOperand(0);
35040 SDValue SubVec = N->getOperand(1);
35041 SDValue Idx = N->getOperand(2);
35043 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
35044 MVT OpVT = N->getSimpleValueType(0);
35045 MVT SubVecVT = SubVec.getSimpleValueType();
35047 // If this is an insert of an extract, combine to a shuffle. Don't do this
35048 // if the insert or extract can be represented with a subvector operation.
35049 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
35050 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
35051 (IdxVal != 0 || !Vec.isUndef())) {
35052 int ExtIdxVal = cast<ConstantSDNode>(SubVec.getOperand(1))->getZExtValue();
35053 if (ExtIdxVal != 0) {
35054 int VecNumElts = OpVT.getVectorNumElements();
35055 int SubVecNumElts = SubVecVT.getVectorNumElements();
35056 SmallVector<int, 64> Mask(VecNumElts);
35057 // First create an identity shuffle mask.
35058 for (int i = 0; i != VecNumElts; ++i)
35060 // Now insert the extracted portion.
35061 for (int i = 0; i != SubVecNumElts; ++i)
35062 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
35064 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
35068 // Fold two 16-byte or 32-byte subvector loads into one 32-byte or 64-byte
35070 // (insert_subvector (insert_subvector undef, (load16 addr), 0),
35071 // (load16 addr + 16), Elts/2)
35074 // (insert_subvector (insert_subvector undef, (load32 addr), 0),
35075 // (load32 addr + 32), Elts/2)
35077 // or a 16-byte or 32-byte broadcast:
35078 // (insert_subvector (insert_subvector undef, (load16 addr), 0),
35079 // (load16 addr), Elts/2)
35080 // --> X86SubVBroadcast(load16 addr)
35082 // (insert_subvector (insert_subvector undef, (load32 addr), 0),
35083 // (load32 addr), Elts/2)
35084 // --> X86SubVBroadcast(load32 addr)
35085 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
35086 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
35087 OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2) {
35088 auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2));
35089 if (Idx2 && Idx2->getZExtValue() == 0) {
35090 SDValue SubVec2 = Vec.getOperand(1);
35091 // If needed, look through bitcasts to get to the load.
35092 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(SubVec2))) {
35094 unsigned Alignment = FirstLd->getAlignment();
35095 unsigned AS = FirstLd->getAddressSpace();
35096 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
35097 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
35098 OpVT, AS, Alignment, &Fast) && Fast) {
35099 SDValue Ops[] = {SubVec2, SubVec};
35100 if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false))
35104 // If lower/upper loads are the same and the only users of the load, then
35105 // lower to a VBROADCASTF128/VBROADCASTI128/etc.
35106 if (auto *Ld = dyn_cast<LoadSDNode>(peekThroughOneUseBitcasts(SubVec2))) {
35107 if (SubVec2 == SubVec && ISD::isNormalLoad(Ld) &&
35108 SDNode::areOnlyUsersOf({N, Vec.getNode()}, SubVec2.getNode())) {
35109 return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec);
35112 // If this is subv_broadcast insert into both halves, use a larger
35114 if (SubVec.getOpcode() == X86ISD::SUBV_BROADCAST && SubVec == SubVec2) {
35115 return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT,
35116 SubVec.getOperand(0));
35125 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
35126 DAGCombinerInfo &DCI) const {
35127 SelectionDAG &DAG = DCI.DAG;
35128 switch (N->getOpcode()) {
35130 case ISD::EXTRACT_VECTOR_ELT:
35131 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
35132 case X86ISD::PEXTRW:
35133 case X86ISD::PEXTRB:
35134 return combineExtractVectorElt_SSE(N, DAG, DCI, Subtarget);
35135 case ISD::INSERT_SUBVECTOR:
35136 return combineInsertSubvector(N, DAG, DCI, Subtarget);
35139 case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
35140 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
35141 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
35142 case ISD::ADD: return combineAdd(N, DAG, Subtarget);
35143 case ISD::SUB: return combineSub(N, DAG, Subtarget);
35144 case X86ISD::ADD: return combineX86ADD(N, DAG, DCI);
35145 case X86ISD::ADC: return combineADC(N, DAG, DCI);
35146 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
35149 case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget);
35150 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
35151 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
35152 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
35153 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
35154 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
35155 case ISD::STORE: return combineStore(N, DAG, Subtarget);
35156 case ISD::MSTORE: return combineMaskedStore(N, DAG, Subtarget);
35157 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
35158 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
35160 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
35161 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
35162 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
35163 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
35164 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
35165 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
35167 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
35169 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
35171 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
35172 case X86ISD::BT: return combineBT(N, DAG, DCI);
35173 case ISD::ANY_EXTEND:
35174 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
35175 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
35176 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
35177 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
35178 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
35179 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
35180 case X86ISD::VSHLI:
35181 case X86ISD::VSRAI:
35182 case X86ISD::VSRLI:
35183 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
35184 case ISD::SIGN_EXTEND_VECTOR_INREG:
35185 case ISD::ZERO_EXTEND_VECTOR_INREG:
35186 case X86ISD::VSEXT:
35187 case X86ISD::VZEXT: return combineVSZext(N, DAG, DCI, Subtarget);
35188 case X86ISD::PINSRB:
35189 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
35190 case X86ISD::SHUFP: // Handle all target specific shuffles
35191 case X86ISD::INSERTPS:
35192 case X86ISD::PALIGNR:
35193 case X86ISD::VSHLDQ:
35194 case X86ISD::VSRLDQ:
35195 case X86ISD::BLENDI:
35196 case X86ISD::UNPCKH:
35197 case X86ISD::UNPCKL:
35198 case X86ISD::MOVHLPS:
35199 case X86ISD::MOVLHPS:
35200 case X86ISD::PSHUFB:
35201 case X86ISD::PSHUFD:
35202 case X86ISD::PSHUFHW:
35203 case X86ISD::PSHUFLW:
35204 case X86ISD::MOVSHDUP:
35205 case X86ISD::MOVSLDUP:
35206 case X86ISD::MOVDDUP:
35207 case X86ISD::MOVSS:
35208 case X86ISD::MOVSD:
35209 case X86ISD::VPPERM:
35210 case X86ISD::VPERMI:
35211 case X86ISD::VPERMV:
35212 case X86ISD::VPERMV3:
35213 case X86ISD::VPERMIV3:
35214 case X86ISD::VPERMIL2:
35215 case X86ISD::VPERMILPI:
35216 case X86ISD::VPERMILPV:
35217 case X86ISD::VPERM2X128:
35218 case X86ISD::VZEXT_MOVL:
35219 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
35220 case X86ISD::FMADD:
35221 case X86ISD::FMADD_RND:
35222 case X86ISD::FMADDS1_RND:
35223 case X86ISD::FMADDS3_RND:
35224 case ISD::FMA: return combineFMA(N, DAG, Subtarget);
35226 case ISD::MSCATTER: return combineGatherScatter(N, DAG);
35227 case X86ISD::LSUB: return combineLockSub(N, DAG, Subtarget);
35228 case X86ISD::TESTM: return combineTestM(N, DAG);
35229 case X86ISD::PCMPEQ:
35230 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
35236 /// Return true if the target has native support for the specified value type
35237 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
35238 /// i16 is legal, but undesirable since i16 instruction encodings are longer and
35239 /// some i16 instructions are slow.
35240 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
35241 if (!isTypeLegal(VT))
35243 if (VT != MVT::i16)
35250 case ISD::SIGN_EXTEND:
35251 case ISD::ZERO_EXTEND:
35252 case ISD::ANY_EXTEND:
35265 /// This function checks if any of the users of EFLAGS copies the EFLAGS. We
35266 /// know that the code that lowers COPY of EFLAGS has to use the stack, and if
35267 /// we don't adjust the stack we clobber the first frame index.
35268 /// See X86InstrInfo::copyPhysReg.
35269 static bool hasCopyImplyingStackAdjustment(const MachineFunction &MF) {
35270 const MachineRegisterInfo &MRI = MF.getRegInfo();
35271 return any_of(MRI.reg_instructions(X86::EFLAGS),
35272 [](const MachineInstr &RI) { return RI.isCopy(); });
35275 void X86TargetLowering::finalizeLowering(MachineFunction &MF) const {
35276 if (hasCopyImplyingStackAdjustment(MF)) {
35277 MachineFrameInfo &MFI = MF.getFrameInfo();
35278 MFI.setHasCopyImplyingStackAdjustment(true);
35281 TargetLoweringBase::finalizeLowering(MF);
35284 /// This method query the target whether it is beneficial for dag combiner to
35285 /// promote the specified node. If true, it should return the desired promotion
35286 /// type by reference.
35287 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
35288 EVT VT = Op.getValueType();
35289 if (VT != MVT::i16)
35292 bool Promote = false;
35293 bool Commute = false;
35294 switch (Op.getOpcode()) {
35296 case ISD::SIGN_EXTEND:
35297 case ISD::ZERO_EXTEND:
35298 case ISD::ANY_EXTEND:
35303 SDValue N0 = Op.getOperand(0);
35304 // Look out for (store (shl (load), x)).
35305 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
35318 SDValue N0 = Op.getOperand(0);
35319 SDValue N1 = Op.getOperand(1);
35320 if (!Commute && MayFoldLoad(N1))
35322 // Avoid disabling potential load folding opportunities.
35323 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
35325 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
35335 //===----------------------------------------------------------------------===//
35336 // X86 Inline Assembly Support
35337 //===----------------------------------------------------------------------===//
35339 // Helper to match a string separated by whitespace.
35340 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
35341 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
35343 for (StringRef Piece : Pieces) {
35344 if (!S.startswith(Piece)) // Check if the piece matches.
35347 S = S.substr(Piece.size());
35348 StringRef::size_type Pos = S.find_first_not_of(" \t");
35349 if (Pos == 0) // We matched a prefix.
35358 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
35360 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
35361 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
35362 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
35363 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
35365 if (AsmPieces.size() == 3)
35367 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
35374 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
35375 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
35377 const std::string &AsmStr = IA->getAsmString();
35379 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
35380 if (!Ty || Ty->getBitWidth() % 16 != 0)
35383 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
35384 SmallVector<StringRef, 4> AsmPieces;
35385 SplitString(AsmStr, AsmPieces, ";\n");
35387 switch (AsmPieces.size()) {
35388 default: return false;
35390 // FIXME: this should verify that we are targeting a 486 or better. If not,
35391 // we will turn this bswap into something that will be lowered to logical
35392 // ops instead of emitting the bswap asm. For now, we don't support 486 or
35393 // lower so don't worry about this.
35395 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
35396 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
35397 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
35398 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
35399 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
35400 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
35401 // No need to check constraints, nothing other than the equivalent of
35402 // "=r,0" would be valid here.
35403 return IntrinsicLowering::LowerToByteSwap(CI);
35406 // rorw $$8, ${0:w} --> llvm.bswap.i16
35407 if (CI->getType()->isIntegerTy(16) &&
35408 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
35409 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
35410 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
35412 StringRef ConstraintsStr = IA->getConstraintString();
35413 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
35414 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
35415 if (clobbersFlagRegisters(AsmPieces))
35416 return IntrinsicLowering::LowerToByteSwap(CI);
35420 if (CI->getType()->isIntegerTy(32) &&
35421 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
35422 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
35423 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
35424 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
35426 StringRef ConstraintsStr = IA->getConstraintString();
35427 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
35428 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
35429 if (clobbersFlagRegisters(AsmPieces))
35430 return IntrinsicLowering::LowerToByteSwap(CI);
35433 if (CI->getType()->isIntegerTy(64)) {
35434 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
35435 if (Constraints.size() >= 2 &&
35436 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
35437 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
35438 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
35439 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
35440 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
35441 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
35442 return IntrinsicLowering::LowerToByteSwap(CI);
35450 /// Given a constraint letter, return the type of constraint for this target.
35451 X86TargetLowering::ConstraintType
35452 X86TargetLowering::getConstraintType(StringRef Constraint) const {
35453 if (Constraint.size() == 1) {
35454 switch (Constraint[0]) {
35466 return C_RegisterClass;
35467 case 'k': // AVX512 masking registers.
35491 else if (Constraint.size() == 2) {
35492 switch (Constraint[0]) {
35496 switch (Constraint[1]) {
35504 return TargetLowering::getConstraintType(Constraint);
35507 /// Examine constraint type and operand type and determine a weight value.
35508 /// This object must already have been set up with the operand type
35509 /// and the current alternative constraint selected.
35510 TargetLowering::ConstraintWeight
35511 X86TargetLowering::getSingleConstraintMatchWeight(
35512 AsmOperandInfo &info, const char *constraint) const {
35513 ConstraintWeight weight = CW_Invalid;
35514 Value *CallOperandVal = info.CallOperandVal;
35515 // If we don't have a value, we can't do a match,
35516 // but allow it at the lowest weight.
35517 if (!CallOperandVal)
35519 Type *type = CallOperandVal->getType();
35520 // Look at the constraint type.
35521 switch (*constraint) {
35523 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
35535 if (CallOperandVal->getType()->isIntegerTy())
35536 weight = CW_SpecificReg;
35541 if (type->isFloatingPointTy())
35542 weight = CW_SpecificReg;
35545 if (type->isX86_MMXTy() && Subtarget.hasMMX())
35546 weight = CW_SpecificReg;
35549 // Other "Y<x>" (e.g. "Yk") constraints should be implemented below.
35550 if (constraint[1] == 'k') {
35551 // Support for 'Yk' (similarly to the 'k' variant below).
35552 weight = CW_SpecificReg;
35555 // Else fall through (handle "Y" constraint).
35558 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
35559 weight = CW_Register;
35562 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
35563 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasFp256()))
35564 weight = CW_Register;
35567 // Enable conditional vector operations using %k<#> registers.
35568 weight = CW_SpecificReg;
35571 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
35572 if (C->getZExtValue() <= 31)
35573 weight = CW_Constant;
35577 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35578 if (C->getZExtValue() <= 63)
35579 weight = CW_Constant;
35583 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35584 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
35585 weight = CW_Constant;
35589 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35590 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
35591 weight = CW_Constant;
35595 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35596 if (C->getZExtValue() <= 3)
35597 weight = CW_Constant;
35601 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35602 if (C->getZExtValue() <= 0xff)
35603 weight = CW_Constant;
35608 if (isa<ConstantFP>(CallOperandVal)) {
35609 weight = CW_Constant;
35613 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35614 if ((C->getSExtValue() >= -0x80000000LL) &&
35615 (C->getSExtValue() <= 0x7fffffffLL))
35616 weight = CW_Constant;
35620 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
35621 if (C->getZExtValue() <= 0xffffffff)
35622 weight = CW_Constant;
35629 /// Try to replace an X constraint, which matches anything, with another that
35630 /// has more specific requirements based on the type of the corresponding
35632 const char *X86TargetLowering::
35633 LowerXConstraint(EVT ConstraintVT) const {
35634 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
35635 // 'f' like normal targets.
35636 if (ConstraintVT.isFloatingPoint()) {
35637 if (Subtarget.hasSSE2())
35639 if (Subtarget.hasSSE1())
35643 return TargetLowering::LowerXConstraint(ConstraintVT);
35646 /// Lower the specified operand into the Ops vector.
35647 /// If it is invalid, don't add anything to Ops.
35648 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
35649 std::string &Constraint,
35650 std::vector<SDValue>&Ops,
35651 SelectionDAG &DAG) const {
35654 // Only support length 1 constraints for now.
35655 if (Constraint.length() > 1) return;
35657 char ConstraintLetter = Constraint[0];
35658 switch (ConstraintLetter) {
35661 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35662 if (C->getZExtValue() <= 31) {
35663 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35664 Op.getValueType());
35670 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35671 if (C->getZExtValue() <= 63) {
35672 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35673 Op.getValueType());
35679 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35680 if (isInt<8>(C->getSExtValue())) {
35681 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35682 Op.getValueType());
35688 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35689 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
35690 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
35691 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
35692 Op.getValueType());
35698 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35699 if (C->getZExtValue() <= 3) {
35700 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35701 Op.getValueType());
35707 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35708 if (C->getZExtValue() <= 255) {
35709 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35710 Op.getValueType());
35716 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35717 if (C->getZExtValue() <= 127) {
35718 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35719 Op.getValueType());
35725 // 32-bit signed value
35726 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35727 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
35728 C->getSExtValue())) {
35729 // Widen to 64 bits here to get it sign extended.
35730 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
35733 // FIXME gcc accepts some relocatable values here too, but only in certain
35734 // memory models; it's complicated.
35739 // 32-bit unsigned value
35740 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
35741 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
35742 C->getZExtValue())) {
35743 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
35744 Op.getValueType());
35748 // FIXME gcc accepts some relocatable values here too, but only in certain
35749 // memory models; it's complicated.
35753 // Literal immediates are always ok.
35754 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
35755 // Widen to 64 bits here to get it sign extended.
35756 Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64);
35760 // In any sort of PIC mode addresses need to be computed at runtime by
35761 // adding in a register or some sort of table lookup. These can't
35762 // be used as immediates.
35763 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
35766 // If we are in non-pic codegen mode, we allow the address of a global (with
35767 // an optional displacement) to be used with 'i'.
35768 GlobalAddressSDNode *GA = nullptr;
35769 int64_t Offset = 0;
35771 // Match either (GA), (GA+C), (GA+C1+C2), etc.
35773 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
35774 Offset += GA->getOffset();
35776 } else if (Op.getOpcode() == ISD::ADD) {
35777 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
35778 Offset += C->getZExtValue();
35779 Op = Op.getOperand(0);
35782 } else if (Op.getOpcode() == ISD::SUB) {
35783 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
35784 Offset += -C->getZExtValue();
35785 Op = Op.getOperand(0);
35790 // Otherwise, this isn't something we can handle, reject it.
35794 const GlobalValue *GV = GA->getGlobal();
35795 // If we require an extra load to get this address, as in PIC mode, we
35796 // can't accept it.
35797 if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV)))
35800 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
35801 GA->getValueType(0), Offset);
35806 if (Result.getNode()) {
35807 Ops.push_back(Result);
35810 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
35813 /// Check if \p RC is a general purpose register class.
35814 /// I.e., GR* or one of their variant.
35815 static bool isGRClass(const TargetRegisterClass &RC) {
35816 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
35817 RC.hasSuperClassEq(&X86::GR16RegClass) ||
35818 RC.hasSuperClassEq(&X86::GR32RegClass) ||
35819 RC.hasSuperClassEq(&X86::GR64RegClass) ||
35820 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
35823 /// Check if \p RC is a vector register class.
35824 /// I.e., FR* / VR* or one of their variant.
35825 static bool isFRClass(const TargetRegisterClass &RC) {
35826 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
35827 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
35828 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
35829 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
35830 RC.hasSuperClassEq(&X86::VR512RegClass);
35833 std::pair<unsigned, const TargetRegisterClass *>
35834 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
35835 StringRef Constraint,
35837 // First, see if this is a constraint that directly corresponds to an LLVM
35839 if (Constraint.size() == 1) {
35840 // GCC Constraint Letters
35841 switch (Constraint[0]) {
35843 // TODO: Slight differences here in allocation order and leaving
35844 // RIP in the class. Do they matter any more here than they do
35845 // in the normal allocation?
35847 if (Subtarget.hasAVX512()) {
35848 // Only supported in AVX512 or later.
35849 switch (VT.SimpleTy) {
35852 return std::make_pair(0U, &X86::VK32RegClass);
35854 return std::make_pair(0U, &X86::VK16RegClass);
35856 return std::make_pair(0U, &X86::VK8RegClass);
35858 return std::make_pair(0U, &X86::VK1RegClass);
35860 return std::make_pair(0U, &X86::VK64RegClass);
35864 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
35865 if (Subtarget.is64Bit()) {
35866 if (VT == MVT::i32 || VT == MVT::f32)
35867 return std::make_pair(0U, &X86::GR32RegClass);
35868 if (VT == MVT::i16)
35869 return std::make_pair(0U, &X86::GR16RegClass);
35870 if (VT == MVT::i8 || VT == MVT::i1)
35871 return std::make_pair(0U, &X86::GR8RegClass);
35872 if (VT == MVT::i64 || VT == MVT::f64)
35873 return std::make_pair(0U, &X86::GR64RegClass);
35877 // 32-bit fallthrough
35878 case 'Q': // Q_REGS
35879 if (VT == MVT::i32 || VT == MVT::f32)
35880 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
35881 if (VT == MVT::i16)
35882 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
35883 if (VT == MVT::i8 || VT == MVT::i1)
35884 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
35885 if (VT == MVT::i64)
35886 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
35888 case 'r': // GENERAL_REGS
35889 case 'l': // INDEX_REGS
35890 if (VT == MVT::i8 || VT == MVT::i1)
35891 return std::make_pair(0U, &X86::GR8RegClass);
35892 if (VT == MVT::i16)
35893 return std::make_pair(0U, &X86::GR16RegClass);
35894 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
35895 return std::make_pair(0U, &X86::GR32RegClass);
35896 return std::make_pair(0U, &X86::GR64RegClass);
35897 case 'R': // LEGACY_REGS
35898 if (VT == MVT::i8 || VT == MVT::i1)
35899 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
35900 if (VT == MVT::i16)
35901 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
35902 if (VT == MVT::i32 || !Subtarget.is64Bit())
35903 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
35904 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
35905 case 'f': // FP Stack registers.
35906 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
35907 // value to the correct fpstack register class.
35908 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
35909 return std::make_pair(0U, &X86::RFP32RegClass);
35910 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
35911 return std::make_pair(0U, &X86::RFP64RegClass);
35912 return std::make_pair(0U, &X86::RFP80RegClass);
35913 case 'y': // MMX_REGS if MMX allowed.
35914 if (!Subtarget.hasMMX()) break;
35915 return std::make_pair(0U, &X86::VR64RegClass);
35916 case 'Y': // SSE_REGS if SSE2 allowed
35917 if (!Subtarget.hasSSE2()) break;
35920 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
35921 if (!Subtarget.hasSSE1()) break;
35922 bool VConstraint = (Constraint[0] == 'v');
35924 switch (VT.SimpleTy) {
35926 // Scalar SSE types.
35929 if (VConstraint && Subtarget.hasAVX512() && Subtarget.hasVLX())
35930 return std::make_pair(0U, &X86::FR32XRegClass);
35931 return std::make_pair(0U, &X86::FR32RegClass);
35934 if (VConstraint && Subtarget.hasVLX())
35935 return std::make_pair(0U, &X86::FR64XRegClass);
35936 return std::make_pair(0U, &X86::FR64RegClass);
35937 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
35945 if (VConstraint && Subtarget.hasVLX())
35946 return std::make_pair(0U, &X86::VR128XRegClass);
35947 return std::make_pair(0U, &X86::VR128RegClass);
35955 if (VConstraint && Subtarget.hasVLX())
35956 return std::make_pair(0U, &X86::VR256XRegClass);
35957 return std::make_pair(0U, &X86::VR256RegClass);
35962 return std::make_pair(0U, &X86::VR512RegClass);
35966 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
35967 switch (Constraint[1]) {
35971 // This register class doesn't allocate k0 for masked vector operation.
35972 if (Subtarget.hasAVX512()) { // Only supported in AVX512.
35973 switch (VT.SimpleTy) {
35976 return std::make_pair(0U, &X86::VK32WMRegClass);
35978 return std::make_pair(0U, &X86::VK16WMRegClass);
35980 return std::make_pair(0U, &X86::VK8WMRegClass);
35982 return std::make_pair(0U, &X86::VK1WMRegClass);
35984 return std::make_pair(0U, &X86::VK64WMRegClass);
35991 // Use the default implementation in TargetLowering to convert the register
35992 // constraint into a member of a register class.
35993 std::pair<unsigned, const TargetRegisterClass*> Res;
35994 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
35996 // Not found as a standard register?
35998 // Map st(0) -> st(7) -> ST0
35999 if (Constraint.size() == 7 && Constraint[0] == '{' &&
36000 tolower(Constraint[1]) == 's' &&
36001 tolower(Constraint[2]) == 't' &&
36002 Constraint[3] == '(' &&
36003 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
36004 Constraint[5] == ')' &&
36005 Constraint[6] == '}') {
36007 Res.first = X86::FP0+Constraint[4]-'0';
36008 Res.second = &X86::RFP80RegClass;
36012 // GCC allows "st(0)" to be called just plain "st".
36013 if (StringRef("{st}").equals_lower(Constraint)) {
36014 Res.first = X86::FP0;
36015 Res.second = &X86::RFP80RegClass;
36020 if (StringRef("{flags}").equals_lower(Constraint)) {
36021 Res.first = X86::EFLAGS;
36022 Res.second = &X86::CCRRegClass;
36026 // 'A' means [ER]AX + [ER]DX.
36027 if (Constraint == "A") {
36028 if (Subtarget.is64Bit()) {
36029 Res.first = X86::RAX;
36030 Res.second = &X86::GR64_ADRegClass;
36032 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
36033 "Expecting 64, 32 or 16 bit subtarget");
36034 Res.first = X86::EAX;
36035 Res.second = &X86::GR32_ADRegClass;
36042 // Otherwise, check to see if this is a register class of the wrong value
36043 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
36044 // turn into {ax},{dx}.
36045 // MVT::Other is used to specify clobber names.
36046 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
36047 return Res; // Correct type already, nothing to do.
36049 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
36050 // return "eax". This should even work for things like getting 64bit integer
36051 // registers when given an f64 type.
36052 const TargetRegisterClass *Class = Res.second;
36053 // The generic code will match the first register class that contains the
36054 // given register. Thus, based on the ordering of the tablegened file,
36055 // the "plain" GR classes might not come first.
36056 // Therefore, use a helper method.
36057 if (isGRClass(*Class)) {
36058 unsigned Size = VT.getSizeInBits();
36059 if (Size == 1) Size = 8;
36060 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
36062 Res.first = DestReg;
36063 Res.second = Size == 8 ? &X86::GR8RegClass
36064 : Size == 16 ? &X86::GR16RegClass
36065 : Size == 32 ? &X86::GR32RegClass
36066 : &X86::GR64RegClass;
36067 assert(Res.second->contains(Res.first) && "Register in register class");
36069 // No register found/type mismatch.
36071 Res.second = nullptr;
36073 } else if (isFRClass(*Class)) {
36074 // Handle references to XMM physical registers that got mapped into the
36075 // wrong class. This can happen with constraints like {xmm0} where the
36076 // target independent register mapper will just pick the first match it can
36077 // find, ignoring the required type.
36079 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
36080 if (VT == MVT::f32 || VT == MVT::i32)
36081 Res.second = &X86::FR32RegClass;
36082 else if (VT == MVT::f64 || VT == MVT::i64)
36083 Res.second = &X86::FR64RegClass;
36084 else if (TRI->isTypeLegalForClass(X86::VR128RegClass, VT))
36085 Res.second = &X86::VR128RegClass;
36086 else if (TRI->isTypeLegalForClass(X86::VR256RegClass, VT))
36087 Res.second = &X86::VR256RegClass;
36088 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
36089 Res.second = &X86::VR512RegClass;
36091 // Type mismatch and not a clobber: Return an error;
36093 Res.second = nullptr;
36100 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
36101 const AddrMode &AM, Type *Ty,
36102 unsigned AS) const {
36103 // Scaling factors are not free at all.
36104 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
36105 // will take 2 allocations in the out of order engine instead of 1
36106 // for plain addressing mode, i.e. inst (reg1).
36108 // vaddps (%rsi,%drx), %ymm0, %ymm1
36109 // Requires two allocations (one for the load, one for the computation)
36111 // vaddps (%rsi), %ymm0, %ymm1
36112 // Requires just 1 allocation, i.e., freeing allocations for other operations
36113 // and having less micro operations to execute.
36115 // For some X86 architectures, this is even worse because for instance for
36116 // stores, the complex addressing mode forces the instruction to use the
36117 // "load" ports instead of the dedicated "store" port.
36118 // E.g., on Haswell:
36119 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
36120 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
36121 if (isLegalAddressingMode(DL, AM, Ty, AS))
36122 // Scale represents reg2 * scale, thus account for 1
36123 // as soon as we use a second register.
36124 return AM.Scale != 0;
36128 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
36129 // Integer division on x86 is expensive. However, when aggressively optimizing
36130 // for code size, we prefer to use a div instruction, as it is usually smaller
36131 // than the alternative sequence.
36132 // The exception to this is vector division. Since x86 doesn't have vector
36133 // integer division, leaving the division as-is is a loss even in terms of
36134 // size, because it will have to be scalarized, while the alternative code
36135 // sequence can be performed in vector form.
36137 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
36138 return OptSize && !VT.isVector();
36141 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
36142 if (!Subtarget.is64Bit())
36145 // Update IsSplitCSR in X86MachineFunctionInfo.
36146 X86MachineFunctionInfo *AFI =
36147 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
36148 AFI->setIsSplitCSR(true);
36151 void X86TargetLowering::insertCopiesSplitCSR(
36152 MachineBasicBlock *Entry,
36153 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
36154 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36155 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
36159 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36160 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
36161 MachineBasicBlock::iterator MBBI = Entry->begin();
36162 for (const MCPhysReg *I = IStart; *I; ++I) {
36163 const TargetRegisterClass *RC = nullptr;
36164 if (X86::GR64RegClass.contains(*I))
36165 RC = &X86::GR64RegClass;
36167 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
36169 unsigned NewVR = MRI->createVirtualRegister(RC);
36170 // Create copy from CSR to a virtual register.
36171 // FIXME: this currently does not emit CFI pseudo-instructions, it works
36172 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
36173 // nounwind. If we want to generalize this later, we may need to emit
36174 // CFI pseudo-instructions.
36175 assert(Entry->getParent()->getFunction()->hasFnAttribute(
36176 Attribute::NoUnwind) &&
36177 "Function should be nounwind in insertCopiesSplitCSR!");
36178 Entry->addLiveIn(*I);
36179 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
36182 // Insert the copy-back instructions right before the terminator.
36183 for (auto *Exit : Exits)
36184 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
36185 TII->get(TargetOpcode::COPY), *I)
36190 bool X86TargetLowering::supportSwiftError() const {
36191 return Subtarget.is64Bit();