1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86ShuffleDecodeConstantPool.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/EHPersonalities.h"
30 #include "llvm/CodeGen/IntrinsicLowering.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineJumpTableInfo.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/WinEHFuncInfo.h"
38 #include "llvm/IR/CallSite.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalAlias.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/MC/MCAsmInfo.h"
48 #include "llvm/MC/MCContext.h"
49 #include "llvm/MC/MCExpr.h"
50 #include "llvm/MC/MCSymbol.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Target/TargetOptions.h"
56 #include "X86IntrinsicsInfo.h"
62 #define DEBUG_TYPE "x86-isel"
64 STATISTIC(NumTailCalls, "Number of tail calls");
66 static cl::opt<bool> ExperimentalVectorWideningLegalization(
67 "x86-experimental-vector-widening-legalization", cl::init(false),
68 cl::desc("Enable an experimental vector type legalization through widening "
69 "rather than promotion."),
72 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
73 const X86Subtarget &STI)
74 : TargetLowering(TM), Subtarget(STI) {
75 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
76 X86ScalarSSEf64 = Subtarget.hasSSE2();
77 X86ScalarSSEf32 = Subtarget.hasSSE1();
78 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
80 // Set up the TargetLowering object.
82 // X86 is weird. It always uses i8 for shift amounts and setcc results.
83 setBooleanContents(ZeroOrOneBooleanContent);
84 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
85 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
87 // For 64-bit, since we have so many registers, use the ILP scheduler.
88 // For 32-bit, use the register pressure specific scheduling.
89 // For Atom, always use ILP scheduling.
90 if (Subtarget.isAtom())
91 setSchedulingPreference(Sched::ILP);
92 else if (Subtarget.is64Bit())
93 setSchedulingPreference(Sched::ILP);
95 setSchedulingPreference(Sched::RegPressure);
96 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
97 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
99 // Bypass expensive divides on Atom when compiling with O2.
100 if (TM.getOptLevel() >= CodeGenOpt::Default) {
101 if (Subtarget.hasSlowDivide32())
102 addBypassSlowDiv(32, 8);
103 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
104 addBypassSlowDiv(64, 16);
107 if (Subtarget.isTargetKnownWindowsMSVC()) {
108 // Setup Windows compiler runtime calls.
109 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
110 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
111 setLibcallName(RTLIB::SREM_I64, "_allrem");
112 setLibcallName(RTLIB::UREM_I64, "_aullrem");
113 setLibcallName(RTLIB::MUL_I64, "_allmul");
114 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
115 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
116 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
117 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
118 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
121 if (Subtarget.isTargetDarwin()) {
122 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
123 setUseUnderscoreSetJmp(false);
124 setUseUnderscoreLongJmp(false);
125 } else if (Subtarget.isTargetWindowsGNU()) {
126 // MS runtime is weird: it exports _setjmp, but longjmp!
127 setUseUnderscoreSetJmp(true);
128 setUseUnderscoreLongJmp(false);
130 setUseUnderscoreSetJmp(true);
131 setUseUnderscoreLongJmp(true);
134 // Set up the register classes.
135 addRegisterClass(MVT::i8, &X86::GR8RegClass);
136 addRegisterClass(MVT::i16, &X86::GR16RegClass);
137 addRegisterClass(MVT::i32, &X86::GR32RegClass);
138 if (Subtarget.is64Bit())
139 addRegisterClass(MVT::i64, &X86::GR64RegClass);
141 for (MVT VT : MVT::integer_valuetypes())
142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
144 // We don't accept any truncstore of integer registers.
145 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
146 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
147 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
148 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
149 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
150 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
152 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
154 // SETOEQ and SETUNE require checking two conditions.
155 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
156 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
157 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
158 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
159 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
160 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
162 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
164 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
165 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
166 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
168 if (Subtarget.is64Bit()) {
169 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
170 // f32/f64 are legal, f80 is custom.
171 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
173 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
174 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
175 } else if (!Subtarget.useSoftFloat()) {
176 // We have an algorithm for SSE2->double, and we turn this into a
177 // 64-bit FILD followed by conditional FADD for other targets.
178 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
179 // We have an algorithm for SSE2, and we turn this into a 64-bit
180 // FILD or VCVTUSI2SS/SD for other targets.
181 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
184 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
186 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
187 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
189 if (!Subtarget.useSoftFloat()) {
190 // SSE has no i16 to fp conversion, only i32.
191 if (X86ScalarSSEf32) {
192 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
193 // f32 and f64 cases are Legal, f80 case is not
194 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
196 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
197 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
200 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
201 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
204 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
206 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
207 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
209 if (!Subtarget.useSoftFloat()) {
210 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
211 // are Legal, f80 is custom lowered.
212 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
213 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
215 if (X86ScalarSSEf32) {
216 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
217 // f32 and f64 cases are Legal, f80 case is not
218 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
220 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
221 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
224 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
225 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
226 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
229 // Handle FP_TO_UINT by promoting the destination to a larger signed
231 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
232 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
233 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
235 if (Subtarget.is64Bit()) {
236 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
237 // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
238 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
239 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
241 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
242 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
244 } else if (!Subtarget.useSoftFloat()) {
245 // Since AVX is a superset of SSE3, only check for SSE here.
246 if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
247 // Expand FP_TO_UINT into a select.
248 // FIXME: We would like to use a Custom expander here eventually to do
249 // the optimal thing for SSE vs. the default expansion in the legalizer.
250 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
252 // With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
253 // With SSE3 we can use fisttpll to convert to a signed i64; without
254 // SSE, we're stuck with a fistpll.
255 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
257 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
260 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
261 if (!X86ScalarSSEf64) {
262 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
263 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
264 if (Subtarget.is64Bit()) {
265 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
266 // Without SSE, i64->f64 goes through memory.
267 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
269 } else if (!Subtarget.is64Bit())
270 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
272 // Scalar integer divide and remainder are lowered to use operations that
273 // produce two results, to match the available instructions. This exposes
274 // the two-result form to trivial CSE, which is able to combine x/y and x%y
275 // into a single instruction.
277 // Scalar integer multiply-high is also lowered to use two-result
278 // operations, to match the available instructions. However, plain multiply
279 // (low) operations are left as Legal, as there are single-result
280 // instructions for this in x86. Using the two-result multiply instructions
281 // when both high and low results are needed must be arranged by dagcombine.
282 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
283 setOperationAction(ISD::MULHS, VT, Expand);
284 setOperationAction(ISD::MULHU, VT, Expand);
285 setOperationAction(ISD::SDIV, VT, Expand);
286 setOperationAction(ISD::UDIV, VT, Expand);
287 setOperationAction(ISD::SREM, VT, Expand);
288 setOperationAction(ISD::UREM, VT, Expand);
290 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
291 setOperationAction(ISD::ADDC, VT, Custom);
292 setOperationAction(ISD::ADDE, VT, Custom);
293 setOperationAction(ISD::SUBC, VT, Custom);
294 setOperationAction(ISD::SUBE, VT, Custom);
297 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
298 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
299 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
300 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
301 setOperationAction(ISD::BR_CC, VT, Expand);
302 setOperationAction(ISD::SELECT_CC, VT, Expand);
304 if (Subtarget.is64Bit())
305 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
306 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
307 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
308 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
309 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
311 setOperationAction(ISD::FREM , MVT::f32 , Expand);
312 setOperationAction(ISD::FREM , MVT::f64 , Expand);
313 setOperationAction(ISD::FREM , MVT::f80 , Expand);
314 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
316 // Promote the i8 variants and force them on up to i32 which has a shorter
318 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
319 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
320 if (!Subtarget.hasBMI()) {
321 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
322 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
323 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
324 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
325 if (Subtarget.is64Bit()) {
326 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
327 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
331 if (Subtarget.hasLZCNT()) {
332 // When promoting the i8 variants, force them to i32 for a shorter
334 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
335 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
337 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
338 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
339 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
340 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
341 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
342 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
343 if (Subtarget.is64Bit()) {
344 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
345 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
349 // Special handling for half-precision floating point conversions.
350 // If we don't have F16C support, then lower half float conversions
351 // into library calls.
352 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
353 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
354 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
357 // There's never any support for operations beyond MVT::f32.
358 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
359 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
360 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
361 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
363 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
364 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
365 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
366 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
367 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
368 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
370 if (Subtarget.hasPOPCNT()) {
371 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
373 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
374 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
375 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
376 if (Subtarget.is64Bit())
377 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
380 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
382 if (!Subtarget.hasMOVBE())
383 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
385 // These should be promoted to a larger select which is supported.
386 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
387 // X86 wants to expand cmov itself.
388 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
389 setOperationAction(ISD::SELECT, VT, Custom);
390 setOperationAction(ISD::SETCC, VT, Custom);
392 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
393 if (VT == MVT::i64 && !Subtarget.is64Bit())
395 setOperationAction(ISD::SELECT, VT, Custom);
396 setOperationAction(ISD::SETCC, VT, Custom);
397 setOperationAction(ISD::SETCCE, VT, Custom);
399 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
400 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
401 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
402 // support continuation, user-level threading, and etc.. As a result, no
403 // other SjLj exception interfaces are implemented and please don't build
404 // your own exception handling based on them.
405 // LLVM/Clang supports zero-cost DWARF exception handling.
406 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
407 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
408 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
409 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
410 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
413 for (auto VT : { MVT::i32, MVT::i64 }) {
414 if (VT == MVT::i64 && !Subtarget.is64Bit())
416 setOperationAction(ISD::ConstantPool , VT, Custom);
417 setOperationAction(ISD::JumpTable , VT, Custom);
418 setOperationAction(ISD::GlobalAddress , VT, Custom);
419 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
420 setOperationAction(ISD::ExternalSymbol , VT, Custom);
421 setOperationAction(ISD::BlockAddress , VT, Custom);
423 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
424 for (auto VT : { MVT::i32, MVT::i64 }) {
425 if (VT == MVT::i64 && !Subtarget.is64Bit())
427 setOperationAction(ISD::SHL_PARTS, VT, Custom);
428 setOperationAction(ISD::SRA_PARTS, VT, Custom);
429 setOperationAction(ISD::SRL_PARTS, VT, Custom);
432 if (Subtarget.hasSSE1())
433 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
435 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
437 // Expand certain atomics
438 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
439 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
440 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
441 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
442 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
443 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
444 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
445 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
448 if (Subtarget.hasCmpxchg16b()) {
449 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
452 // FIXME - use subtarget debug flags
453 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
454 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
455 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
456 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
459 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
460 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
462 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
463 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
465 setOperationAction(ISD::TRAP, MVT::Other, Legal);
466 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
468 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
469 setOperationAction(ISD::VASTART , MVT::Other, Custom);
470 setOperationAction(ISD::VAEND , MVT::Other, Expand);
471 bool Is64Bit = Subtarget.is64Bit();
472 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
473 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
475 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
476 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
478 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
480 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
481 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
482 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
484 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
485 // f32 and f64 use SSE.
486 // Set up the FP register classes.
487 addRegisterClass(MVT::f32, &X86::FR32RegClass);
488 addRegisterClass(MVT::f64, &X86::FR64RegClass);
490 for (auto VT : { MVT::f32, MVT::f64 }) {
491 // Use ANDPD to simulate FABS.
492 setOperationAction(ISD::FABS, VT, Custom);
494 // Use XORP to simulate FNEG.
495 setOperationAction(ISD::FNEG, VT, Custom);
497 // Use ANDPD and ORPD to simulate FCOPYSIGN.
498 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
500 // We don't support sin/cos/fmod
501 setOperationAction(ISD::FSIN , VT, Expand);
502 setOperationAction(ISD::FCOS , VT, Expand);
503 setOperationAction(ISD::FSINCOS, VT, Expand);
506 // Lower this to MOVMSK plus an AND.
507 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
508 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
510 // Expand FP immediates into loads from the stack, except for the special
512 addLegalFPImmediate(APFloat(+0.0)); // xorpd
513 addLegalFPImmediate(APFloat(+0.0f)); // xorps
514 } else if (UseX87 && X86ScalarSSEf32) {
515 // Use SSE for f32, x87 for f64.
516 // Set up the FP register classes.
517 addRegisterClass(MVT::f32, &X86::FR32RegClass);
518 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
520 // Use ANDPS to simulate FABS.
521 setOperationAction(ISD::FABS , MVT::f32, Custom);
523 // Use XORP to simulate FNEG.
524 setOperationAction(ISD::FNEG , MVT::f32, Custom);
526 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
528 // Use ANDPS and ORPS to simulate FCOPYSIGN.
529 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
530 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
532 // We don't support sin/cos/fmod
533 setOperationAction(ISD::FSIN , MVT::f32, Expand);
534 setOperationAction(ISD::FCOS , MVT::f32, Expand);
535 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
537 // Special cases we handle for FP constants.
538 addLegalFPImmediate(APFloat(+0.0f)); // xorps
539 addLegalFPImmediate(APFloat(+0.0)); // FLD0
540 addLegalFPImmediate(APFloat(+1.0)); // FLD1
541 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
542 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
544 if (!TM.Options.UnsafeFPMath) {
545 setOperationAction(ISD::FSIN , MVT::f64, Expand);
546 setOperationAction(ISD::FCOS , MVT::f64, Expand);
547 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
550 // f32 and f64 in x87.
551 // Set up the FP register classes.
552 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
553 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
555 for (auto VT : { MVT::f32, MVT::f64 }) {
556 setOperationAction(ISD::UNDEF, VT, Expand);
557 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
559 if (!TM.Options.UnsafeFPMath) {
560 setOperationAction(ISD::FSIN , VT, Expand);
561 setOperationAction(ISD::FCOS , VT, Expand);
562 setOperationAction(ISD::FSINCOS, VT, Expand);
565 addLegalFPImmediate(APFloat(+0.0)); // FLD0
566 addLegalFPImmediate(APFloat(+1.0)); // FLD1
567 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
568 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
569 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
570 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
571 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
572 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
575 // We don't support FMA.
576 setOperationAction(ISD::FMA, MVT::f64, Expand);
577 setOperationAction(ISD::FMA, MVT::f32, Expand);
579 // Long double always uses X87, except f128 in MMX.
581 if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
582 addRegisterClass(MVT::f128, &X86::FR128RegClass);
583 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
584 setOperationAction(ISD::FABS , MVT::f128, Custom);
585 setOperationAction(ISD::FNEG , MVT::f128, Custom);
586 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
589 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
590 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
591 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
593 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
594 addLegalFPImmediate(TmpFlt); // FLD0
596 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
599 APFloat TmpFlt2(+1.0);
600 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
602 addLegalFPImmediate(TmpFlt2); // FLD1
603 TmpFlt2.changeSign();
604 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
607 if (!TM.Options.UnsafeFPMath) {
608 setOperationAction(ISD::FSIN , MVT::f80, Expand);
609 setOperationAction(ISD::FCOS , MVT::f80, Expand);
610 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
613 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
614 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
615 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
616 setOperationAction(ISD::FRINT, MVT::f80, Expand);
617 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
618 setOperationAction(ISD::FMA, MVT::f80, Expand);
621 // Always use a library call for pow.
622 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
623 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
624 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
626 setOperationAction(ISD::FLOG, MVT::f80, Expand);
627 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
628 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
629 setOperationAction(ISD::FEXP, MVT::f80, Expand);
630 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
631 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
632 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
634 // Some FP actions are always expanded for vector types.
635 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
636 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
637 setOperationAction(ISD::FSIN, VT, Expand);
638 setOperationAction(ISD::FSINCOS, VT, Expand);
639 setOperationAction(ISD::FCOS, VT, Expand);
640 setOperationAction(ISD::FREM, VT, Expand);
641 setOperationAction(ISD::FPOWI, VT, Expand);
642 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
643 setOperationAction(ISD::FPOW, VT, Expand);
644 setOperationAction(ISD::FLOG, VT, Expand);
645 setOperationAction(ISD::FLOG2, VT, Expand);
646 setOperationAction(ISD::FLOG10, VT, Expand);
647 setOperationAction(ISD::FEXP, VT, Expand);
648 setOperationAction(ISD::FEXP2, VT, Expand);
651 // First set operation action for all vector types to either promote
652 // (for widening) or expand (for scalarization). Then we will selectively
653 // turn on ones that can be effectively codegen'd.
654 for (MVT VT : MVT::vector_valuetypes()) {
655 setOperationAction(ISD::SDIV, VT, Expand);
656 setOperationAction(ISD::UDIV, VT, Expand);
657 setOperationAction(ISD::SREM, VT, Expand);
658 setOperationAction(ISD::UREM, VT, Expand);
659 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
660 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
661 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
662 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
663 setOperationAction(ISD::FMA, VT, Expand);
664 setOperationAction(ISD::FFLOOR, VT, Expand);
665 setOperationAction(ISD::FCEIL, VT, Expand);
666 setOperationAction(ISD::FTRUNC, VT, Expand);
667 setOperationAction(ISD::FRINT, VT, Expand);
668 setOperationAction(ISD::FNEARBYINT, VT, Expand);
669 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
670 setOperationAction(ISD::MULHS, VT, Expand);
671 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
672 setOperationAction(ISD::MULHU, VT, Expand);
673 setOperationAction(ISD::SDIVREM, VT, Expand);
674 setOperationAction(ISD::UDIVREM, VT, Expand);
675 setOperationAction(ISD::CTPOP, VT, Expand);
676 setOperationAction(ISD::CTTZ, VT, Expand);
677 setOperationAction(ISD::CTLZ, VT, Expand);
678 setOperationAction(ISD::ROTL, VT, Expand);
679 setOperationAction(ISD::ROTR, VT, Expand);
680 setOperationAction(ISD::BSWAP, VT, Expand);
681 setOperationAction(ISD::SETCC, VT, Expand);
682 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
683 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
684 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
685 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
686 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
687 setOperationAction(ISD::TRUNCATE, VT, Expand);
688 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
689 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
690 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
691 setOperationAction(ISD::SELECT_CC, VT, Expand);
692 for (MVT InnerVT : MVT::vector_valuetypes()) {
693 setTruncStoreAction(InnerVT, VT, Expand);
695 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
696 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
698 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
699 // types, we have to deal with them whether we ask for Expansion or not.
700 // Setting Expand causes its own optimisation problems though, so leave
702 if (VT.getVectorElementType() == MVT::i1)
703 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
705 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
706 // split/scalarized right now.
707 if (VT.getVectorElementType() == MVT::f16)
708 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
712 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
713 // with -msoft-float, disable use of MMX as well.
714 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
715 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
716 // No operations on x86mmx supported, everything uses intrinsics.
719 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
720 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
722 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
723 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
724 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
725 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
726 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
727 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
728 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
729 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
732 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
733 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
735 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
736 // registers cannot be used even for integer operations.
737 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
738 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
739 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
740 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
742 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
743 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
744 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
745 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
746 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
747 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
748 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
749 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
750 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
751 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
752 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
753 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
755 setOperationAction(ISD::SMAX, MVT::v8i16, Legal);
756 setOperationAction(ISD::UMAX, MVT::v16i8, Legal);
757 setOperationAction(ISD::SMIN, MVT::v8i16, Legal);
758 setOperationAction(ISD::UMIN, MVT::v16i8, Legal);
760 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
761 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
762 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
763 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
765 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
766 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
767 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
768 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
769 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
771 setOperationAction(ISD::CTPOP, MVT::v16i8, Custom);
772 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom);
773 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
774 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
776 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
777 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
778 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
779 // ISD::CTTZ v2i64 - scalarization is faster.
781 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
782 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
783 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
784 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
785 setOperationAction(ISD::VSELECT, VT, Custom);
786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
789 // We support custom legalizing of sext and anyext loads for specific
790 // memory vector types which we can load as a scalar (or sequence of
791 // scalars) and extend in-register to a legal 128-bit vector type. For sext
792 // loads these must work with a single scalar load.
793 for (MVT VT : MVT::integer_vector_valuetypes()) {
794 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
795 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
796 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
797 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
798 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
799 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
800 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
801 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
802 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
805 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
806 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
808 setOperationAction(ISD::VSELECT, VT, Custom);
810 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
813 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
814 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
817 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
818 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
819 setOperationPromotedToType(ISD::AND, VT, MVT::v2i64);
820 setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
821 setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
822 setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
823 setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
826 // Custom lower v2i64 and v2f64 selects.
827 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
828 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
830 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
831 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
833 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
835 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
836 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
837 // As there is no 64-bit GPR available, we need build a special custom
838 // sequence to convert from v2i32 to v2f32.
839 if (!Subtarget.is64Bit())
840 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
842 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
843 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
845 for (MVT VT : MVT::fp_vector_valuetypes())
846 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
848 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
849 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
850 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
852 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
853 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
854 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
856 for (auto VT : { MVT::v8i16, MVT::v16i8 }) {
857 setOperationAction(ISD::SRL, VT, Custom);
858 setOperationAction(ISD::SHL, VT, Custom);
859 setOperationAction(ISD::SRA, VT, Custom);
862 // In the customized shift lowering, the legal cases in AVX2 will be
864 for (auto VT : { MVT::v4i32, MVT::v2i64 }) {
865 setOperationAction(ISD::SRL, VT, Custom);
866 setOperationAction(ISD::SHL, VT, Custom);
867 setOperationAction(ISD::SRA, VT, Custom);
871 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
872 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
873 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
874 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
875 // ISD::CTLZ v4i32 - scalarization is faster.
876 // ISD::CTLZ v2i64 - scalarization is faster.
879 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
880 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
881 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
882 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
883 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
884 setOperationAction(ISD::FRINT, RoundedTy, Legal);
885 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
888 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
889 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
890 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
891 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
892 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
893 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
894 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
895 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
897 // FIXME: Do we need to handle scalar-to-vector here?
898 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
900 // We directly match byte blends in the backend as they match the VSELECT
902 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
904 // SSE41 brings specific instructions for doing vector sign extend even in
905 // cases where we don't have SRA.
906 for (MVT VT : MVT::integer_vector_valuetypes()) {
907 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
908 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
909 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
912 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
913 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
914 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
915 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
916 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
917 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
918 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
920 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
921 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
922 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
923 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
924 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
925 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
927 // i8 vectors are custom because the source register and source
928 // source memory operand types are not the same width.
929 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
932 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
933 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
934 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
935 setOperationAction(ISD::ROTL, VT, Custom);
937 // XOP can efficiently perform BITREVERSE with VPPERM.
938 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
939 setOperationAction(ISD::BITREVERSE, VT, Custom);
941 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
942 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
943 setOperationAction(ISD::BITREVERSE, VT, Custom);
946 if (!Subtarget.useSoftFloat() && Subtarget.hasFp256()) {
947 bool HasInt256 = Subtarget.hasInt256();
949 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
950 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
951 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
952 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
953 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
954 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
956 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
957 setOperationAction(ISD::FFLOOR, VT, Legal);
958 setOperationAction(ISD::FCEIL, VT, Legal);
959 setOperationAction(ISD::FTRUNC, VT, Legal);
960 setOperationAction(ISD::FRINT, VT, Legal);
961 setOperationAction(ISD::FNEARBYINT, VT, Legal);
962 setOperationAction(ISD::FNEG, VT, Custom);
963 setOperationAction(ISD::FABS, VT, Custom);
966 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
967 // even though v8i16 is a legal type.
968 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
969 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
970 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
972 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
973 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
974 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
976 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
977 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
979 for (MVT VT : MVT::fp_vector_valuetypes())
980 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
982 for (auto VT : { MVT::v32i8, MVT::v16i16 }) {
983 setOperationAction(ISD::SRL, VT, Custom);
984 setOperationAction(ISD::SHL, VT, Custom);
985 setOperationAction(ISD::SRA, VT, Custom);
988 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
989 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
990 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
991 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
993 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
994 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
995 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
997 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
998 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
999 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1000 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1001 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1002 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1003 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1004 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1005 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1006 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1007 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1008 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1009 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1011 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1012 setOperationAction(ISD::CTPOP, VT, Custom);
1013 setOperationAction(ISD::CTTZ, VT, Custom);
1016 // ISD::CTLZ v8i32/v4i64 - scalarization is faster without AVX2
1017 // as we end up splitting the 256-bit vectors.
1018 for (auto VT : { MVT::v32i8, MVT::v16i16 })
1019 setOperationAction(ISD::CTLZ, VT, Custom);
1022 for (auto VT : { MVT::v8i32, MVT::v4i64 })
1023 setOperationAction(ISD::CTLZ, VT, Custom);
1025 if (Subtarget.hasAnyFMA()) {
1026 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1027 MVT::v2f64, MVT::v4f64 })
1028 setOperationAction(ISD::FMA, VT, Legal);
1031 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1032 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1033 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1036 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1037 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1038 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1039 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1041 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1042 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1044 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1045 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1046 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1047 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1049 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1050 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1051 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1052 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1053 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1057 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
1058 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i32, Custom);
1059 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i16, Custom);
1061 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1062 // when we have a 256bit-wide blend with immediate.
1063 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1065 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1066 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1067 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1068 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1069 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1070 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1071 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1073 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1074 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1075 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1076 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1077 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1078 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1081 // In the customized shift lowering, the legal cases in AVX2 will be
1083 for (auto VT : { MVT::v8i32, MVT::v4i64 }) {
1084 setOperationAction(ISD::SRL, VT, Custom);
1085 setOperationAction(ISD::SHL, VT, Custom);
1086 setOperationAction(ISD::SRA, VT, Custom);
1089 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1090 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1091 setOperationAction(ISD::MLOAD, VT, Legal);
1092 setOperationAction(ISD::MSTORE, VT, Legal);
1095 // Extract subvector is special because the value type
1096 // (result) is 128-bit but the source is 256-bit wide.
1097 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1098 MVT::v4f32, MVT::v2f64 }) {
1099 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1102 // Custom lower several nodes for 256-bit types.
1103 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1104 MVT::v8f32, MVT::v4f64 }) {
1105 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1106 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1107 setOperationAction(ISD::VSELECT, VT, Custom);
1108 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1109 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1110 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1111 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1112 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1116 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1118 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1119 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1120 setOperationPromotedToType(ISD::AND, VT, MVT::v4i64);
1121 setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
1122 setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
1123 setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
1124 setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
1128 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1129 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1130 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1131 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1132 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1134 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1135 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1136 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1138 for (MVT VT : MVT::fp_vector_valuetypes())
1139 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1141 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1142 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1143 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1144 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1145 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1146 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1147 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1149 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1150 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1151 setOperationAction(ISD::SETCCE, MVT::i1, Custom);
1152 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
1153 setOperationAction(ISD::XOR, MVT::i1, Legal);
1154 setOperationAction(ISD::OR, MVT::i1, Legal);
1155 setOperationAction(ISD::AND, MVT::i1, Legal);
1156 setOperationAction(ISD::SUB, MVT::i1, Custom);
1157 setOperationAction(ISD::ADD, MVT::i1, Custom);
1158 setOperationAction(ISD::MUL, MVT::i1, Custom);
1160 for (MVT VT : {MVT::v2i64, MVT::v4i32, MVT::v8i32, MVT::v4i64, MVT::v8i16,
1161 MVT::v16i8, MVT::v16i16, MVT::v32i8, MVT::v16i32,
1162 MVT::v8i64, MVT::v32i16, MVT::v64i8}) {
1163 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
1164 setLoadExtAction(ISD::SEXTLOAD, VT, MaskVT, Custom);
1165 setLoadExtAction(ISD::ZEXTLOAD, VT, MaskVT, Custom);
1166 setLoadExtAction(ISD::EXTLOAD, VT, MaskVT, Custom);
1167 setTruncStoreAction(VT, MaskVT, Custom);
1170 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1171 setOperationAction(ISD::FNEG, VT, Custom);
1172 setOperationAction(ISD::FABS, VT, Custom);
1173 setOperationAction(ISD::FMA, VT, Legal);
1176 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1177 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1178 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1179 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1180 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1181 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1182 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1183 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1184 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1185 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1186 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1187 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1188 setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom);
1189 setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Custom);
1190 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1191 setOperationAction(ISD::UINT_TO_FP, MVT::v16i1, Custom);
1192 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1193 setOperationAction(ISD::UINT_TO_FP, MVT::v8i1, Custom);
1194 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1195 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1196 setOperationAction(ISD::SINT_TO_FP, MVT::v2i1, Custom);
1197 setOperationAction(ISD::UINT_TO_FP, MVT::v2i1, Custom);
1198 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1199 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1201 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1202 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1203 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1204 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1205 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1206 if (Subtarget.hasVLX()){
1207 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1208 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1209 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1210 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1211 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1213 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1214 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1215 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1216 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1217 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1219 setOperationAction(ISD::MLOAD, MVT::v8i32, Custom);
1220 setOperationAction(ISD::MLOAD, MVT::v8f32, Custom);
1221 setOperationAction(ISD::MSTORE, MVT::v8i32, Custom);
1222 setOperationAction(ISD::MSTORE, MVT::v8f32, Custom);
1224 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1225 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1226 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1227 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
1228 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
1229 setOperationAction(ISD::VSELECT, MVT::v8i1, Expand);
1230 setOperationAction(ISD::VSELECT, MVT::v16i1, Expand);
1231 if (Subtarget.hasDQI()) {
1232 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1233 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1234 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1235 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1236 if (Subtarget.hasVLX()) {
1237 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Legal);
1238 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1239 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Legal);
1240 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1241 setOperationAction(ISD::FP_TO_SINT, MVT::v4i64, Legal);
1242 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1243 setOperationAction(ISD::FP_TO_UINT, MVT::v4i64, Legal);
1244 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1247 if (Subtarget.hasVLX()) {
1248 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1249 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1250 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1251 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1252 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1253 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1254 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1255 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i64, Custom);
1259 // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
1260 setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1261 setLoadExtAction(ISD::EXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1262 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1263 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1264 setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1265 setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1266 setLoadExtAction(ISD::EXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1267 setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1268 setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1269 setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1272 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1273 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1274 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1275 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1276 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1277 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1278 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1279 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1280 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1281 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1282 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1283 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1284 if (Subtarget.hasDQI()) {
1285 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Custom);
1286 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i64, Custom);
1288 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1289 setOperationAction(ISD::FFLOOR, VT, Legal);
1290 setOperationAction(ISD::FCEIL, VT, Legal);
1291 setOperationAction(ISD::FTRUNC, VT, Legal);
1292 setOperationAction(ISD::FRINT, VT, Legal);
1293 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1296 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1297 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1298 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1299 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1300 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
1302 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1303 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1305 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1307 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1308 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1309 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
1310 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1311 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1312 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1313 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1314 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1315 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1316 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1317 setOperationAction(ISD::SELECT, MVT::v16i1, Custom);
1318 setOperationAction(ISD::SELECT, MVT::v8i1, Custom);
1320 setOperationAction(ISD::SMAX, MVT::v16i32, Legal);
1321 setOperationAction(ISD::SMAX, MVT::v8i64, Legal);
1322 setOperationAction(ISD::UMAX, MVT::v16i32, Legal);
1323 setOperationAction(ISD::UMAX, MVT::v8i64, Legal);
1324 setOperationAction(ISD::SMIN, MVT::v16i32, Legal);
1325 setOperationAction(ISD::SMIN, MVT::v8i64, Legal);
1326 setOperationAction(ISD::UMIN, MVT::v16i32, Legal);
1327 setOperationAction(ISD::UMIN, MVT::v8i64, Legal);
1329 setOperationAction(ISD::ADD, MVT::v8i1, Expand);
1330 setOperationAction(ISD::ADD, MVT::v16i1, Expand);
1331 setOperationAction(ISD::SUB, MVT::v8i1, Expand);
1332 setOperationAction(ISD::SUB, MVT::v16i1, Expand);
1333 setOperationAction(ISD::MUL, MVT::v8i1, Expand);
1334 setOperationAction(ISD::MUL, MVT::v16i1, Expand);
1336 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1338 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1339 setOperationAction(ISD::SRL, VT, Custom);
1340 setOperationAction(ISD::SHL, VT, Custom);
1341 setOperationAction(ISD::SRA, VT, Custom);
1342 setOperationAction(ISD::AND, VT, Legal);
1343 setOperationAction(ISD::OR, VT, Legal);
1344 setOperationAction(ISD::XOR, VT, Legal);
1345 setOperationAction(ISD::CTPOP, VT, Custom);
1346 setOperationAction(ISD::CTTZ, VT, Custom);
1349 if (Subtarget.hasCDI()) {
1350 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1351 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1353 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1354 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1355 setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
1356 setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
1358 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
1359 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
1361 if (Subtarget.hasVLX()) {
1362 setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
1363 setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
1364 setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
1365 setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
1367 setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
1368 setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
1369 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1370 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1373 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
1374 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
1375 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
1376 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
1377 } // Subtarget.hasCDI()
1379 if (Subtarget.hasDQI()) {
1380 if (Subtarget.hasVLX()) {
1381 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
1382 setOperationAction(ISD::MUL, MVT::v4i64, Legal);
1384 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1386 // Custom lower several nodes.
1387 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1388 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1389 setOperationAction(ISD::MGATHER, VT, Custom);
1390 setOperationAction(ISD::MSCATTER, VT, Custom);
1392 // Extract subvector is special because the value type
1393 // (result) is 256-bit but the source is 512-bit wide.
1394 // 128-bit was made Custom under AVX1.
1395 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1396 MVT::v8f32, MVT::v4f64 })
1397 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1398 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1,
1399 MVT::v16i1, MVT::v32i1, MVT::v64i1 })
1400 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1402 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1403 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1404 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1405 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1406 setOperationAction(ISD::VSELECT, VT, Legal);
1407 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1408 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1409 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1410 setOperationAction(ISD::MLOAD, VT, Legal);
1411 setOperationAction(ISD::MSTORE, VT, Legal);
1412 setOperationAction(ISD::MGATHER, VT, Legal);
1413 setOperationAction(ISD::MSCATTER, VT, Custom);
1415 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
1416 setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
1420 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1421 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1422 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1424 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1425 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1427 setOperationAction(ISD::ADD, MVT::v32i1, Expand);
1428 setOperationAction(ISD::ADD, MVT::v64i1, Expand);
1429 setOperationAction(ISD::SUB, MVT::v32i1, Expand);
1430 setOperationAction(ISD::SUB, MVT::v64i1, Expand);
1431 setOperationAction(ISD::MUL, MVT::v32i1, Expand);
1432 setOperationAction(ISD::MUL, MVT::v64i1, Expand);
1434 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1435 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1436 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1437 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1438 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1439 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1440 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1441 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1442 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1443 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1444 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1445 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1446 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Custom);
1447 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Custom);
1448 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1449 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1450 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1451 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1452 setOperationAction(ISD::SELECT, MVT::v32i1, Custom);
1453 setOperationAction(ISD::SELECT, MVT::v64i1, Custom);
1454 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1455 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1456 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1457 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1458 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1459 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1460 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1461 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1462 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1463 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i1, Custom);
1464 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i1, Custom);
1465 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1466 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1467 setOperationAction(ISD::VSELECT, MVT::v32i16, Legal);
1468 setOperationAction(ISD::VSELECT, MVT::v64i8, Legal);
1469 setOperationAction(ISD::TRUNCATE, MVT::v32i1, Custom);
1470 setOperationAction(ISD::TRUNCATE, MVT::v64i1, Custom);
1471 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1472 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i1, Custom);
1473 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i1, Custom);
1474 setOperationAction(ISD::BUILD_VECTOR, MVT::v32i1, Custom);
1475 setOperationAction(ISD::BUILD_VECTOR, MVT::v64i1, Custom);
1476 setOperationAction(ISD::VSELECT, MVT::v32i1, Expand);
1477 setOperationAction(ISD::VSELECT, MVT::v64i1, Expand);
1478 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1480 setOperationAction(ISD::SMAX, MVT::v64i8, Legal);
1481 setOperationAction(ISD::SMAX, MVT::v32i16, Legal);
1482 setOperationAction(ISD::UMAX, MVT::v64i8, Legal);
1483 setOperationAction(ISD::UMAX, MVT::v32i16, Legal);
1484 setOperationAction(ISD::SMIN, MVT::v64i8, Legal);
1485 setOperationAction(ISD::SMIN, MVT::v32i16, Legal);
1486 setOperationAction(ISD::UMIN, MVT::v64i8, Legal);
1487 setOperationAction(ISD::UMIN, MVT::v32i16, Legal);
1489 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1490 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1491 if (Subtarget.hasVLX())
1492 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1494 LegalizeAction Action = Subtarget.hasVLX() ? Legal : Custom;
1495 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1496 setOperationAction(ISD::MLOAD, VT, Action);
1497 setOperationAction(ISD::MSTORE, VT, Action);
1500 if (Subtarget.hasCDI()) {
1501 setOperationAction(ISD::CTLZ, MVT::v32i16, Custom);
1502 setOperationAction(ISD::CTLZ, MVT::v64i8, Custom);
1505 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1506 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1507 setOperationAction(ISD::VSELECT, VT, Legal);
1508 setOperationAction(ISD::SRL, VT, Custom);
1509 setOperationAction(ISD::SHL, VT, Custom);
1510 setOperationAction(ISD::SRA, VT, Custom);
1511 setOperationAction(ISD::MLOAD, VT, Legal);
1512 setOperationAction(ISD::MSTORE, VT, Legal);
1513 setOperationAction(ISD::CTPOP, VT, Custom);
1514 setOperationAction(ISD::CTTZ, VT, Custom);
1516 setOperationPromotedToType(ISD::AND, VT, MVT::v8i64);
1517 setOperationPromotedToType(ISD::OR, VT, MVT::v8i64);
1518 setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64);
1521 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) {
1522 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1523 if (Subtarget.hasVLX()) {
1524 // FIXME. This commands are available on SSE/AVX2, add relevant patterns.
1525 setLoadExtAction(ExtType, MVT::v16i16, MVT::v16i8, Legal);
1526 setLoadExtAction(ExtType, MVT::v8i16, MVT::v8i8, Legal);
1531 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1532 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1533 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1535 setOperationAction(ISD::ADD, MVT::v2i1, Expand);
1536 setOperationAction(ISD::ADD, MVT::v4i1, Expand);
1537 setOperationAction(ISD::SUB, MVT::v2i1, Expand);
1538 setOperationAction(ISD::SUB, MVT::v4i1, Expand);
1539 setOperationAction(ISD::MUL, MVT::v2i1, Expand);
1540 setOperationAction(ISD::MUL, MVT::v4i1, Expand);
1542 setOperationAction(ISD::TRUNCATE, MVT::v2i1, Custom);
1543 setOperationAction(ISD::TRUNCATE, MVT::v4i1, Custom);
1544 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1545 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1546 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
1547 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1548 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom);
1549 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom);
1550 setOperationAction(ISD::SELECT, MVT::v4i1, Custom);
1551 setOperationAction(ISD::SELECT, MVT::v2i1, Custom);
1552 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1553 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i1, Custom);
1554 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i1, Custom);
1555 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i1, Custom);
1556 setOperationAction(ISD::VSELECT, MVT::v2i1, Expand);
1557 setOperationAction(ISD::VSELECT, MVT::v4i1, Expand);
1559 for (auto VT : { MVT::v4i32, MVT::v8i32 }) {
1560 setOperationAction(ISD::AND, VT, Legal);
1561 setOperationAction(ISD::OR, VT, Legal);
1562 setOperationAction(ISD::XOR, VT, Legal);
1565 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1566 setOperationAction(ISD::SMAX, VT, Legal);
1567 setOperationAction(ISD::UMAX, VT, Legal);
1568 setOperationAction(ISD::SMIN, VT, Legal);
1569 setOperationAction(ISD::UMIN, VT, Legal);
1573 // We want to custom lower some of our intrinsics.
1574 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1575 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1576 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1577 if (!Subtarget.is64Bit()) {
1578 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1579 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1582 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1583 // handle type legalization for these operations here.
1585 // FIXME: We really should do custom legalization for addition and
1586 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1587 // than generic legalization for 64-bit multiplication-with-overflow, though.
1588 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1589 if (VT == MVT::i64 && !Subtarget.is64Bit())
1591 // Add/Sub/Mul with overflow operations are custom lowered.
1592 setOperationAction(ISD::SADDO, VT, Custom);
1593 setOperationAction(ISD::UADDO, VT, Custom);
1594 setOperationAction(ISD::SSUBO, VT, Custom);
1595 setOperationAction(ISD::USUBO, VT, Custom);
1596 setOperationAction(ISD::SMULO, VT, Custom);
1597 setOperationAction(ISD::UMULO, VT, Custom);
1600 if (!Subtarget.is64Bit()) {
1601 // These libcalls are not available in 32-bit.
1602 setLibcallName(RTLIB::SHL_I128, nullptr);
1603 setLibcallName(RTLIB::SRL_I128, nullptr);
1604 setLibcallName(RTLIB::SRA_I128, nullptr);
1607 // Combine sin / cos into one node or libcall if possible.
1608 if (Subtarget.hasSinCos()) {
1609 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1610 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1611 if (Subtarget.isTargetDarwin()) {
1612 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1613 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1614 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1615 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1619 if (Subtarget.isTargetWin64()) {
1620 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1621 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1622 setOperationAction(ISD::SREM, MVT::i128, Custom);
1623 setOperationAction(ISD::UREM, MVT::i128, Custom);
1624 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1625 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1628 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1629 // is. We should promote the value to 64-bits to solve this.
1630 // This is what the CRT headers do - `fmodf` is an inline header
1631 // function casting to f64 and calling `fmod`.
1632 if (Subtarget.is32Bit() && Subtarget.isTargetKnownWindowsMSVC())
1633 for (ISD::NodeType Op :
1634 {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
1635 ISD::FLOG10, ISD::FPOW, ISD::FSIN})
1636 if (isOperationExpand(Op, MVT::f32))
1637 setOperationAction(Op, MVT::f32, Promote);
1639 // We have target-specific dag combine patterns for the following nodes:
1640 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1641 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1642 setTargetDAGCombine(ISD::BITCAST);
1643 setTargetDAGCombine(ISD::VSELECT);
1644 setTargetDAGCombine(ISD::SELECT);
1645 setTargetDAGCombine(ISD::SHL);
1646 setTargetDAGCombine(ISD::SRA);
1647 setTargetDAGCombine(ISD::SRL);
1648 setTargetDAGCombine(ISD::OR);
1649 setTargetDAGCombine(ISD::AND);
1650 setTargetDAGCombine(ISD::ADD);
1651 setTargetDAGCombine(ISD::FADD);
1652 setTargetDAGCombine(ISD::FSUB);
1653 setTargetDAGCombine(ISD::FNEG);
1654 setTargetDAGCombine(ISD::FMA);
1655 setTargetDAGCombine(ISD::FMINNUM);
1656 setTargetDAGCombine(ISD::FMAXNUM);
1657 setTargetDAGCombine(ISD::SUB);
1658 setTargetDAGCombine(ISD::LOAD);
1659 setTargetDAGCombine(ISD::MLOAD);
1660 setTargetDAGCombine(ISD::STORE);
1661 setTargetDAGCombine(ISD::MSTORE);
1662 setTargetDAGCombine(ISD::TRUNCATE);
1663 setTargetDAGCombine(ISD::ZERO_EXTEND);
1664 setTargetDAGCombine(ISD::ANY_EXTEND);
1665 setTargetDAGCombine(ISD::SIGN_EXTEND);
1666 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1667 setTargetDAGCombine(ISD::SINT_TO_FP);
1668 setTargetDAGCombine(ISD::UINT_TO_FP);
1669 setTargetDAGCombine(ISD::SETCC);
1670 setTargetDAGCombine(ISD::MUL);
1671 setTargetDAGCombine(ISD::XOR);
1672 setTargetDAGCombine(ISD::MSCATTER);
1673 setTargetDAGCombine(ISD::MGATHER);
1675 computeRegisterProperties(Subtarget.getRegisterInfo());
1677 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1678 MaxStoresPerMemsetOptSize = 8;
1679 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1680 MaxStoresPerMemcpyOptSize = 4;
1681 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1682 MaxStoresPerMemmoveOptSize = 4;
1683 setPrefLoopAlignment(4); // 2^4 bytes.
1685 // An out-of-order CPU can speculatively execute past a predictable branch,
1686 // but a conditional move could be stalled by an expensive earlier operation.
1687 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
1688 EnableExtLdPromotion = true;
1689 setPrefFunctionAlignment(4); // 2^4 bytes.
1691 verifyIntrinsicTables();
1694 // This has so far only been implemented for 64-bit MachO.
1695 bool X86TargetLowering::useLoadStackGuardNode() const {
1696 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
1699 TargetLoweringBase::LegalizeTypeAction
1700 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1701 if (ExperimentalVectorWideningLegalization &&
1702 VT.getVectorNumElements() != 1 &&
1703 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1704 return TypeWidenVector;
1706 return TargetLoweringBase::getPreferredVectorAction(VT);
1709 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
1710 LLVMContext& Context,
1713 return Subtarget.hasAVX512() ? MVT::i1: MVT::i8;
1715 if (VT.isSimple()) {
1716 MVT VVT = VT.getSimpleVT();
1717 const unsigned NumElts = VVT.getVectorNumElements();
1718 MVT EltVT = VVT.getVectorElementType();
1719 if (VVT.is512BitVector()) {
1720 if (Subtarget.hasAVX512())
1721 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1722 EltVT == MVT::f32 || EltVT == MVT::f64)
1724 case 8: return MVT::v8i1;
1725 case 16: return MVT::v16i1;
1727 if (Subtarget.hasBWI())
1728 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1730 case 32: return MVT::v32i1;
1731 case 64: return MVT::v64i1;
1735 if (Subtarget.hasBWI() && Subtarget.hasVLX())
1736 return MVT::getVectorVT(MVT::i1, NumElts);
1738 if (!isTypeLegal(VT) && getTypeAction(Context, VT) == TypePromoteInteger) {
1739 EVT LegalVT = getTypeToTransformTo(Context, VT);
1740 EltVT = LegalVT.getVectorElementType().getSimpleVT();
1743 if (Subtarget.hasVLX() && EltVT.getSizeInBits() >= 32)
1745 case 2: return MVT::v2i1;
1746 case 4: return MVT::v4i1;
1747 case 8: return MVT::v8i1;
1751 return VT.changeVectorElementTypeToInteger();
1754 /// Helper for getByValTypeAlignment to determine
1755 /// the desired ByVal argument alignment.
1756 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1759 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1760 if (VTy->getBitWidth() == 128)
1762 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1763 unsigned EltAlign = 0;
1764 getMaxByValAlign(ATy->getElementType(), EltAlign);
1765 if (EltAlign > MaxAlign)
1766 MaxAlign = EltAlign;
1767 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1768 for (auto *EltTy : STy->elements()) {
1769 unsigned EltAlign = 0;
1770 getMaxByValAlign(EltTy, EltAlign);
1771 if (EltAlign > MaxAlign)
1772 MaxAlign = EltAlign;
1779 /// Return the desired alignment for ByVal aggregate
1780 /// function arguments in the caller parameter area. For X86, aggregates
1781 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1782 /// are at 4-byte boundaries.
1783 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
1784 const DataLayout &DL) const {
1785 if (Subtarget.is64Bit()) {
1786 // Max of 8 and alignment of type.
1787 unsigned TyAlign = DL.getABITypeAlignment(Ty);
1794 if (Subtarget.hasSSE1())
1795 getMaxByValAlign(Ty, Align);
1799 /// Returns the target specific optimal type for load
1800 /// and store operations as a result of memset, memcpy, and memmove
1801 /// lowering. If DstAlign is zero that means it's safe to destination
1802 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1803 /// means there isn't a need to check it against alignment requirement,
1804 /// probably because the source does not need to be loaded. If 'IsMemset' is
1805 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1806 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1807 /// source is constant so it does not need to be loaded.
1808 /// It returns EVT::Other if the type should be determined using generic
1809 /// target-independent logic.
1811 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1812 unsigned DstAlign, unsigned SrcAlign,
1813 bool IsMemset, bool ZeroMemset,
1815 MachineFunction &MF) const {
1816 const Function *F = MF.getFunction();
1817 if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1819 (!Subtarget.isUnalignedMem16Slow() ||
1820 ((DstAlign == 0 || DstAlign >= 16) &&
1821 (SrcAlign == 0 || SrcAlign >= 16)))) {
1822 // FIXME: Check if unaligned 32-byte accesses are slow.
1823 if (Size >= 32 && Subtarget.hasAVX()) {
1824 // Although this isn't a well-supported type for AVX1, we'll let
1825 // legalization and shuffle lowering produce the optimal codegen. If we
1826 // choose an optimal type with a vector element larger than a byte,
1827 // getMemsetStores() may create an intermediate splat (using an integer
1828 // multiply) before we splat as a vector.
1831 if (Subtarget.hasSSE2())
1833 // TODO: Can SSE1 handle a byte vector?
1834 if (Subtarget.hasSSE1())
1836 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
1837 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
1838 // Do not use f64 to lower memcpy if source is string constant. It's
1839 // better to use i32 to avoid the loads.
1840 // Also, do not use f64 to lower memset unless this is a memset of zeros.
1841 // The gymnastics of splatting a byte value into an XMM register and then
1842 // only using 8-byte stores (because this is a CPU with slow unaligned
1843 // 16-byte accesses) makes that a loser.
1847 // This is a compromise. If we reach here, unaligned accesses may be slow on
1848 // this target. However, creating smaller, aligned accesses could be even
1849 // slower and would certainly be a lot more code.
1850 if (Subtarget.is64Bit() && Size >= 8)
1855 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1857 return X86ScalarSSEf32;
1858 else if (VT == MVT::f64)
1859 return X86ScalarSSEf64;
1864 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1869 switch (VT.getSizeInBits()) {
1871 // 8-byte and under are always assumed to be fast.
1875 *Fast = !Subtarget.isUnalignedMem16Slow();
1878 *Fast = !Subtarget.isUnalignedMem32Slow();
1880 // TODO: What about AVX-512 (512-bit) accesses?
1883 // Misaligned accesses of any size are always allowed.
1887 /// Return the entry encoding for a jump table in the
1888 /// current function. The returned value is a member of the
1889 /// MachineJumpTableInfo::JTEntryKind enum.
1890 unsigned X86TargetLowering::getJumpTableEncoding() const {
1891 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1893 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
1894 return MachineJumpTableInfo::EK_Custom32;
1896 // Otherwise, use the normal jump table encoding heuristics.
1897 return TargetLowering::getJumpTableEncoding();
1900 bool X86TargetLowering::useSoftFloat() const {
1901 return Subtarget.useSoftFloat();
1905 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1906 const MachineBasicBlock *MBB,
1907 unsigned uid,MCContext &Ctx) const{
1908 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
1909 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1911 return MCSymbolRefExpr::create(MBB->getSymbol(),
1912 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1915 /// Returns relocation base for the given PIC jumptable.
1916 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1917 SelectionDAG &DAG) const {
1918 if (!Subtarget.is64Bit())
1919 // This doesn't have SDLoc associated with it, but is not really the
1920 // same as a Register.
1921 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
1922 getPointerTy(DAG.getDataLayout()));
1926 /// This returns the relocation base for the given PIC jumptable,
1927 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1928 const MCExpr *X86TargetLowering::
1929 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1930 MCContext &Ctx) const {
1931 // X86-64 uses RIP relative addressing based on the jump table label.
1932 if (Subtarget.isPICStyleRIPRel())
1933 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1935 // Otherwise, the reference is relative to the PIC base.
1936 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
1939 std::pair<const TargetRegisterClass *, uint8_t>
1940 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1942 const TargetRegisterClass *RRC = nullptr;
1944 switch (VT.SimpleTy) {
1946 return TargetLowering::findRepresentativeClass(TRI, VT);
1947 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1948 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1951 RRC = &X86::VR64RegClass;
1953 case MVT::f32: case MVT::f64:
1954 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1955 case MVT::v4f32: case MVT::v2f64:
1956 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1958 RRC = &X86::VR128RegClass;
1961 return std::make_pair(RRC, Cost);
1964 unsigned X86TargetLowering::getAddressSpace() const {
1965 if (Subtarget.is64Bit())
1966 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
1970 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
1971 // glibc has a special slot for the stack guard in tcbhead_t, use it instead
1972 // of the usual global variable (see sysdeps/{i386,x86_64}/nptl/tls.h)
1973 if (!Subtarget.isTargetGlibc())
1974 return TargetLowering::getIRStackGuard(IRB);
1976 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1978 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
1979 unsigned AddressSpace = getAddressSpace();
1980 return ConstantExpr::getIntToPtr(
1981 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
1982 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
1985 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
1986 // MSVC CRT provides functionalities for stack protection.
1987 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
1988 // MSVC CRT has a global variable holding security cookie.
1989 M.getOrInsertGlobal("__security_cookie",
1990 Type::getInt8PtrTy(M.getContext()));
1992 // MSVC CRT has a function to validate security cookie.
1993 auto *SecurityCheckCookie = cast<Function>(
1994 M.getOrInsertFunction("__security_check_cookie",
1995 Type::getVoidTy(M.getContext()),
1996 Type::getInt8PtrTy(M.getContext()), nullptr));
1997 SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall);
1998 SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
2001 // glibc has a special slot for the stack guard.
2002 if (Subtarget.isTargetGlibc())
2004 TargetLowering::insertSSPDeclarations(M);
2007 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2008 // MSVC CRT has a global variable holding security cookie.
2009 if (Subtarget.getTargetTriple().isOSMSVCRT())
2010 return M.getGlobalVariable("__security_cookie");
2011 return TargetLowering::getSDagStackGuard(M);
2014 Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2015 // MSVC CRT has a function to validate security cookie.
2016 if (Subtarget.getTargetTriple().isOSMSVCRT())
2017 return M.getFunction("__security_check_cookie");
2018 return TargetLowering::getSSPStackGuardCheck(M);
2021 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2022 if (!Subtarget.isTargetAndroid())
2023 return TargetLowering::getSafeStackPointerLocation(IRB);
2025 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2026 // definition of TLS_SLOT_SAFESTACK in
2027 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2028 unsigned AddressSpace, Offset;
2030 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2032 Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2033 AddressSpace = getAddressSpace();
2034 return ConstantExpr::getIntToPtr(
2035 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2036 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2039 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2040 unsigned DestAS) const {
2041 assert(SrcAS != DestAS && "Expected different address spaces!");
2043 return SrcAS < 256 && DestAS < 256;
2046 //===----------------------------------------------------------------------===//
2047 // Return Value Calling Convention Implementation
2048 //===----------------------------------------------------------------------===//
2050 #include "X86GenCallingConv.inc"
2052 bool X86TargetLowering::CanLowerReturn(
2053 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2054 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2055 SmallVector<CCValAssign, 16> RVLocs;
2056 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2057 return CCInfo.CheckReturn(Outs, RetCC_X86);
2060 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2061 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2066 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2068 const SmallVectorImpl<ISD::OutputArg> &Outs,
2069 const SmallVectorImpl<SDValue> &OutVals,
2070 const SDLoc &dl, SelectionDAG &DAG) const {
2071 MachineFunction &MF = DAG.getMachineFunction();
2072 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2074 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2075 report_fatal_error("X86 interrupts may not return any value");
2077 SmallVector<CCValAssign, 16> RVLocs;
2078 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2079 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2082 SmallVector<SDValue, 6> RetOps;
2083 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2084 // Operand #1 = Bytes To Pop
2085 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2088 // Copy the result values into the output registers.
2089 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2090 CCValAssign &VA = RVLocs[i];
2091 assert(VA.isRegLoc() && "Can only return in registers!");
2092 SDValue ValToCopy = OutVals[i];
2093 EVT ValVT = ValToCopy.getValueType();
2095 // Promote values to the appropriate types.
2096 if (VA.getLocInfo() == CCValAssign::SExt)
2097 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2098 else if (VA.getLocInfo() == CCValAssign::ZExt)
2099 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2100 else if (VA.getLocInfo() == CCValAssign::AExt) {
2101 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2102 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2104 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2106 else if (VA.getLocInfo() == CCValAssign::BCvt)
2107 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2109 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2110 "Unexpected FP-extend for return value.");
2112 // If this is x86-64, and we disabled SSE, we can't return FP values,
2113 // or SSE or MMX vectors.
2114 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2115 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2116 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
2117 report_fatal_error("SSE register return with SSE disabled");
2119 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2120 // llvm-gcc has never done it right and no one has noticed, so this
2121 // should be OK for now.
2122 if (ValVT == MVT::f64 &&
2123 (Subtarget.is64Bit() && !Subtarget.hasSSE2()))
2124 report_fatal_error("SSE2 register return with SSE2 disabled");
2126 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2127 // the RET instruction and handled by the FP Stackifier.
2128 if (VA.getLocReg() == X86::FP0 ||
2129 VA.getLocReg() == X86::FP1) {
2130 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2131 // change the value to the FP stack register class.
2132 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2133 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2134 RetOps.push_back(ValToCopy);
2135 // Don't emit a copytoreg.
2139 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2140 // which is returned in RAX / RDX.
2141 if (Subtarget.is64Bit()) {
2142 if (ValVT == MVT::x86mmx) {
2143 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2144 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2145 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2147 // If we don't have SSE2 available, convert to v4f32 so the generated
2148 // register is legal.
2149 if (!Subtarget.hasSSE2())
2150 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2155 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2156 Flag = Chain.getValue(1);
2157 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2160 // Swift calling convention does not require we copy the sret argument
2161 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2163 // All x86 ABIs require that for returning structs by value we copy
2164 // the sret argument into %rax/%eax (depending on ABI) for the return.
2165 // We saved the argument into a virtual register in the entry block,
2166 // so now we copy the value out and into %rax/%eax.
2168 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2169 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2170 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2171 // either case FuncInfo->setSRetReturnReg() will have been called.
2172 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2173 // When we have both sret and another return value, we should use the
2174 // original Chain stored in RetOps[0], instead of the current Chain updated
2175 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2177 // For the case of sret and another return value, we have
2178 // Chain_0 at the function entry
2179 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2180 // If we use Chain_1 in getCopyFromReg, we will have
2181 // Val = getCopyFromReg(Chain_1)
2182 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2184 // getCopyToReg(Chain_0) will be glued together with
2185 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2186 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2187 // Data dependency from Unit B to Unit A due to usage of Val in
2188 // getCopyToReg(Chain_1, Val)
2189 // Chain dependency from Unit A to Unit B
2191 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2192 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2193 getPointerTy(MF.getDataLayout()));
2196 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2197 X86::RAX : X86::EAX;
2198 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2199 Flag = Chain.getValue(1);
2201 // RAX/EAX now acts like a return value.
2203 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2206 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2207 const MCPhysReg *I =
2208 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2211 if (X86::GR64RegClass.contains(*I))
2212 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2214 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2218 RetOps[0] = Chain; // Update chain.
2220 // Add the flag if we have it.
2222 RetOps.push_back(Flag);
2224 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2225 if (CallConv == CallingConv::X86_INTR)
2226 opcode = X86ISD::IRET;
2227 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2230 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2231 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2234 SDValue TCChain = Chain;
2235 SDNode *Copy = *N->use_begin();
2236 if (Copy->getOpcode() == ISD::CopyToReg) {
2237 // If the copy has a glue operand, we conservatively assume it isn't safe to
2238 // perform a tail call.
2239 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2241 TCChain = Copy->getOperand(0);
2242 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2245 bool HasRet = false;
2246 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2248 if (UI->getOpcode() != X86ISD::RET_FLAG)
2250 // If we are returning more than one value, we can definitely
2251 // not make a tail call see PR19530
2252 if (UI->getNumOperands() > 4)
2254 if (UI->getNumOperands() == 4 &&
2255 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2267 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2268 ISD::NodeType ExtendKind) const {
2269 MVT ReturnMVT = MVT::i32;
2271 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2272 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2273 // The ABI does not require i1, i8 or i16 to be extended.
2275 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2276 // always extending i8/i16 return values, so keep doing that for now.
2278 ReturnMVT = MVT::i8;
2281 EVT MinVT = getRegisterType(Context, ReturnMVT);
2282 return VT.bitsLT(MinVT) ? MinVT : VT;
2285 /// Lower the result values of a call into the
2286 /// appropriate copies out of appropriate physical registers.
2288 SDValue X86TargetLowering::LowerCallResult(
2289 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2290 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2291 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2293 // Assign locations to each value returned by this call.
2294 SmallVector<CCValAssign, 16> RVLocs;
2295 bool Is64Bit = Subtarget.is64Bit();
2296 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2298 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2300 // Copy all of the result registers out of their specified physreg.
2301 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2302 CCValAssign &VA = RVLocs[i];
2303 EVT CopyVT = VA.getLocVT();
2305 // If this is x86-64, and we disabled SSE, we can't return FP values
2306 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2307 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget.hasSSE1())) {
2308 report_fatal_error("SSE register return with SSE disabled");
2311 // If we prefer to use the value in xmm registers, copy it out as f80 and
2312 // use a truncate to move it from fp stack reg to xmm reg.
2313 bool RoundAfterCopy = false;
2314 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2315 isScalarFPTypeInSSEReg(VA.getValVT())) {
2316 if (!Subtarget.hasX87())
2317 report_fatal_error("X87 register return with X87 disabled");
2319 RoundAfterCopy = (CopyVT != VA.getLocVT());
2322 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2323 CopyVT, InFlag).getValue(1);
2324 SDValue Val = Chain.getValue(0);
2327 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2328 // This truncation won't change the value.
2329 DAG.getIntPtrConstant(1, dl));
2331 if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
2332 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2334 InFlag = Chain.getValue(2);
2335 InVals.push_back(Val);
2341 //===----------------------------------------------------------------------===//
2342 // C & StdCall & Fast Calling Convention implementation
2343 //===----------------------------------------------------------------------===//
2344 // StdCall calling convention seems to be standard for many Windows' API
2345 // routines and around. It differs from C calling convention just a little:
2346 // callee should clean up the stack, not caller. Symbols should be also
2347 // decorated in some fancy way :) It doesn't support any vector arguments.
2348 // For info on fast calling convention see Fast Calling Convention (tail call)
2349 // implementation LowerX86_32FastCCCallTo.
2351 /// CallIsStructReturn - Determines whether a call uses struct return
2353 enum StructReturnType {
2358 static StructReturnType
2359 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsMCU) {
2361 return NotStructReturn;
2363 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2364 if (!Flags.isSRet())
2365 return NotStructReturn;
2366 if (Flags.isInReg() || IsMCU)
2367 return RegStructReturn;
2368 return StackStructReturn;
2371 /// Determines whether a function uses struct return semantics.
2372 static StructReturnType
2373 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsMCU) {
2375 return NotStructReturn;
2377 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2378 if (!Flags.isSRet())
2379 return NotStructReturn;
2380 if (Flags.isInReg() || IsMCU)
2381 return RegStructReturn;
2382 return StackStructReturn;
2385 /// Make a copy of an aggregate at address specified by "Src" to address
2386 /// "Dst" with size and alignment information specified by the specific
2387 /// parameter attribute. The copy will be passed as a byval function parameter.
2388 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
2389 SDValue Chain, ISD::ArgFlagsTy Flags,
2390 SelectionDAG &DAG, const SDLoc &dl) {
2391 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2393 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2394 /*isVolatile*/false, /*AlwaysInline=*/true,
2395 /*isTailCall*/false,
2396 MachinePointerInfo(), MachinePointerInfo());
2399 /// Return true if the calling convention is one that we can guarantee TCO for.
2400 static bool canGuaranteeTCO(CallingConv::ID CC) {
2401 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2402 CC == CallingConv::HiPE || CC == CallingConv::HHVM);
2405 /// Return true if we might ever do TCO for calls with this calling convention.
2406 static bool mayTailCallThisCC(CallingConv::ID CC) {
2408 // C calling conventions:
2409 case CallingConv::C:
2410 case CallingConv::X86_64_Win64:
2411 case CallingConv::X86_64_SysV:
2412 // Callee pop conventions:
2413 case CallingConv::X86_ThisCall:
2414 case CallingConv::X86_StdCall:
2415 case CallingConv::X86_VectorCall:
2416 case CallingConv::X86_FastCall:
2419 return canGuaranteeTCO(CC);
2423 /// Return true if the function is being made into a tailcall target by
2424 /// changing its ABI.
2425 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2426 return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
2429 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2431 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2432 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2436 CallingConv::ID CalleeCC = CS.getCallingConv();
2437 if (!mayTailCallThisCC(CalleeCC))
2444 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
2445 const SmallVectorImpl<ISD::InputArg> &Ins,
2446 const SDLoc &dl, SelectionDAG &DAG,
2447 const CCValAssign &VA,
2448 MachineFrameInfo *MFI, unsigned i) const {
2449 // Create the nodes corresponding to a load from this parameter slot.
2450 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2451 bool AlwaysUseMutable = shouldGuaranteeTCO(
2452 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2453 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2456 // If value is passed by pointer we have address passed instead of the value
2458 bool ExtendedInMem = VA.isExtInLoc() &&
2459 VA.getValVT().getScalarType() == MVT::i1;
2461 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
2462 ValVT = VA.getLocVT();
2464 ValVT = VA.getValVT();
2466 // Calculate SP offset of interrupt parameter, re-arrange the slot normally
2467 // taken by a return address.
2469 if (CallConv == CallingConv::X86_INTR) {
2470 const X86Subtarget& Subtarget =
2471 static_cast<const X86Subtarget&>(DAG.getSubtarget());
2472 // X86 interrupts may take one or two arguments.
2473 // On the stack there will be no return address as in regular call.
2474 // Offset of last argument need to be set to -4/-8 bytes.
2475 // Where offset of the first argument out of two, should be set to 0 bytes.
2476 Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1);
2479 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2480 // changed with more analysis.
2481 // In case of tail call optimization mark all arguments mutable. Since they
2482 // could be overwritten by lowering of arguments in case of a tail call.
2483 if (Flags.isByVal()) {
2484 unsigned Bytes = Flags.getByValSize();
2485 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2486 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2487 // Adjust SP offset of interrupt parameter.
2488 if (CallConv == CallingConv::X86_INTR) {
2489 MFI->setObjectOffset(FI, Offset);
2491 return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2493 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2494 VA.getLocMemOffset(), isImmutable);
2496 // Set SExt or ZExt flag.
2497 if (VA.getLocInfo() == CCValAssign::ZExt) {
2498 MFI->setObjectZExt(FI, true);
2499 } else if (VA.getLocInfo() == CCValAssign::SExt) {
2500 MFI->setObjectSExt(FI, true);
2503 // Adjust SP offset of interrupt parameter.
2504 if (CallConv == CallingConv::X86_INTR) {
2505 MFI->setObjectOffset(FI, Offset);
2508 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2509 SDValue Val = DAG.getLoad(
2510 ValVT, dl, Chain, FIN,
2511 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
2512 return ExtendedInMem ?
2513 DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val) : Val;
2517 // FIXME: Get this from tablegen.
2518 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2519 const X86Subtarget &Subtarget) {
2520 assert(Subtarget.is64Bit());
2522 if (Subtarget.isCallingConvWin64(CallConv)) {
2523 static const MCPhysReg GPR64ArgRegsWin64[] = {
2524 X86::RCX, X86::RDX, X86::R8, X86::R9
2526 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2529 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2530 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2532 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2535 // FIXME: Get this from tablegen.
2536 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2537 CallingConv::ID CallConv,
2538 const X86Subtarget &Subtarget) {
2539 assert(Subtarget.is64Bit());
2540 if (Subtarget.isCallingConvWin64(CallConv)) {
2541 // The XMM registers which might contain var arg parameters are shadowed
2542 // in their paired GPR. So we only need to save the GPR to their home
2544 // TODO: __vectorcall will change this.
2548 const Function *Fn = MF.getFunction();
2549 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2550 bool isSoftFloat = Subtarget.useSoftFloat();
2551 assert(!(isSoftFloat && NoImplicitFloatOps) &&
2552 "SSE register cannot be used when SSE is disabled!");
2553 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
2554 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2558 static const MCPhysReg XMMArgRegs64Bit[] = {
2559 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2560 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2562 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2565 SDValue X86TargetLowering::LowerFormalArguments(
2566 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
2567 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2568 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2569 MachineFunction &MF = DAG.getMachineFunction();
2570 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2571 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
2573 const Function *Fn = MF.getFunction();
2574 if (Fn->hasExternalLinkage() &&
2575 Subtarget.isTargetCygMing() &&
2576 Fn->getName() == "main")
2577 FuncInfo->setForceFramePointer(true);
2579 MachineFrameInfo *MFI = MF.getFrameInfo();
2580 bool Is64Bit = Subtarget.is64Bit();
2581 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
2583 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
2584 "Var args not supported with calling convention fastcc, ghc or hipe");
2586 if (CallConv == CallingConv::X86_INTR) {
2587 bool isLegal = Ins.size() == 1 ||
2588 (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) ||
2589 (!Is64Bit && Ins[1].VT == MVT::i32)));
2591 report_fatal_error("X86 interrupts may take one or two arguments");
2594 // Assign locations to all of the incoming arguments.
2595 SmallVector<CCValAssign, 16> ArgLocs;
2596 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2598 // Allocate shadow area for Win64
2600 CCInfo.AllocateStack(32, 8);
2602 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2604 unsigned LastVal = ~0U;
2606 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2607 CCValAssign &VA = ArgLocs[i];
2608 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2610 assert(VA.getValNo() != LastVal &&
2611 "Don't support value assigned to multiple locs yet");
2613 LastVal = VA.getValNo();
2615 if (VA.isRegLoc()) {
2616 EVT RegVT = VA.getLocVT();
2617 const TargetRegisterClass *RC;
2618 if (RegVT == MVT::i32)
2619 RC = &X86::GR32RegClass;
2620 else if (Is64Bit && RegVT == MVT::i64)
2621 RC = &X86::GR64RegClass;
2622 else if (RegVT == MVT::f32)
2623 RC = &X86::FR32RegClass;
2624 else if (RegVT == MVT::f64)
2625 RC = &X86::FR64RegClass;
2626 else if (RegVT == MVT::f128)
2627 RC = &X86::FR128RegClass;
2628 else if (RegVT.is512BitVector())
2629 RC = &X86::VR512RegClass;
2630 else if (RegVT.is256BitVector())
2631 RC = &X86::VR256RegClass;
2632 else if (RegVT.is128BitVector())
2633 RC = &X86::VR128RegClass;
2634 else if (RegVT == MVT::x86mmx)
2635 RC = &X86::VR64RegClass;
2636 else if (RegVT == MVT::i1)
2637 RC = &X86::VK1RegClass;
2638 else if (RegVT == MVT::v8i1)
2639 RC = &X86::VK8RegClass;
2640 else if (RegVT == MVT::v16i1)
2641 RC = &X86::VK16RegClass;
2642 else if (RegVT == MVT::v32i1)
2643 RC = &X86::VK32RegClass;
2644 else if (RegVT == MVT::v64i1)
2645 RC = &X86::VK64RegClass;
2647 llvm_unreachable("Unknown argument type!");
2649 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2650 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2652 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2653 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2655 if (VA.getLocInfo() == CCValAssign::SExt)
2656 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2657 DAG.getValueType(VA.getValVT()));
2658 else if (VA.getLocInfo() == CCValAssign::ZExt)
2659 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2660 DAG.getValueType(VA.getValVT()));
2661 else if (VA.getLocInfo() == CCValAssign::BCvt)
2662 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
2664 if (VA.isExtInLoc()) {
2665 // Handle MMX values passed in XMM regs.
2666 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
2667 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2669 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2672 assert(VA.isMemLoc());
2673 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2676 // If value is passed via pointer - do a load.
2677 if (VA.getLocInfo() == CCValAssign::Indirect)
2679 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
2681 InVals.push_back(ArgValue);
2684 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2685 // Swift calling convention does not require we copy the sret argument
2686 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
2687 if (CallConv == CallingConv::Swift)
2690 // All x86 ABIs require that for returning structs by value we copy the
2691 // sret argument into %rax/%eax (depending on ABI) for the return. Save
2692 // the argument into a virtual register so that we can access it from the
2694 if (Ins[i].Flags.isSRet()) {
2695 unsigned Reg = FuncInfo->getSRetReturnReg();
2697 MVT PtrTy = getPointerTy(DAG.getDataLayout());
2698 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2699 FuncInfo->setSRetReturnReg(Reg);
2701 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2702 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2707 unsigned StackSize = CCInfo.getNextStackOffset();
2708 // Align stack specially for tail calls.
2709 if (shouldGuaranteeTCO(CallConv,
2710 MF.getTarget().Options.GuaranteedTailCallOpt))
2711 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2713 // If the function takes variable number of arguments, make a frame index for
2714 // the start of the first vararg value... for expansion of llvm.va_start. We
2715 // can skip this if there are no va_start calls.
2716 if (MFI->hasVAStart() &&
2717 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2718 CallConv != CallingConv::X86_ThisCall))) {
2719 FuncInfo->setVarArgsFrameIndex(
2720 MFI->CreateFixedObject(1, StackSize, true));
2723 // Figure out if XMM registers are in use.
2724 assert(!(Subtarget.useSoftFloat() &&
2725 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2726 "SSE register cannot be used when SSE is disabled!");
2728 // 64-bit calling conventions support varargs and register parameters, so we
2729 // have to do extra work to spill them in the prologue.
2730 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2731 // Find the first unallocated argument registers.
2732 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2733 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2734 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
2735 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
2736 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
2737 "SSE register cannot be used when SSE is disabled!");
2739 // Gather all the live in physical registers.
2740 SmallVector<SDValue, 6> LiveGPRs;
2741 SmallVector<SDValue, 8> LiveXMMRegs;
2743 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2744 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2746 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2748 if (!ArgXMMs.empty()) {
2749 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2750 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2751 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2752 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2753 LiveXMMRegs.push_back(
2754 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2759 // Get to the caller-allocated home save location. Add 8 to account
2760 // for the return address.
2761 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2762 FuncInfo->setRegSaveFrameIndex(
2763 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2764 // Fixup to set vararg frame on shadow area (4 x i64).
2766 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2768 // For X86-64, if there are vararg parameters that are passed via
2769 // registers, then we must store them to their spots on the stack so
2770 // they may be loaded by dereferencing the result of va_next.
2771 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2772 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2773 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2774 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2777 // Store the integer parameter registers.
2778 SmallVector<SDValue, 8> MemOps;
2779 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2780 getPointerTy(DAG.getDataLayout()));
2781 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2782 for (SDValue Val : LiveGPRs) {
2783 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2784 RSFIN, DAG.getIntPtrConstant(Offset, dl));
2786 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2787 MachinePointerInfo::getFixedStack(
2788 DAG.getMachineFunction(),
2789 FuncInfo->getRegSaveFrameIndex(), Offset));
2790 MemOps.push_back(Store);
2794 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2795 // Now store the XMM (fp + vector) parameter registers.
2796 SmallVector<SDValue, 12> SaveXMMOps;
2797 SaveXMMOps.push_back(Chain);
2798 SaveXMMOps.push_back(ALVal);
2799 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2800 FuncInfo->getRegSaveFrameIndex(), dl));
2801 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2802 FuncInfo->getVarArgsFPOffset(), dl));
2803 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2805 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2806 MVT::Other, SaveXMMOps));
2809 if (!MemOps.empty())
2810 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2813 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2814 // Find the largest legal vector type.
2815 MVT VecVT = MVT::Other;
2816 // FIXME: Only some x86_32 calling conventions support AVX512.
2817 if (Subtarget.hasAVX512() &&
2818 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2819 CallConv == CallingConv::Intel_OCL_BI)))
2820 VecVT = MVT::v16f32;
2821 else if (Subtarget.hasAVX())
2823 else if (Subtarget.hasSSE2())
2826 // We forward some GPRs and some vector types.
2827 SmallVector<MVT, 2> RegParmTypes;
2828 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2829 RegParmTypes.push_back(IntVT);
2830 if (VecVT != MVT::Other)
2831 RegParmTypes.push_back(VecVT);
2833 // Compute the set of forwarded registers. The rest are scratch.
2834 SmallVectorImpl<ForwardedRegister> &Forwards =
2835 FuncInfo->getForwardedMustTailRegParms();
2836 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2838 // Conservatively forward AL on x86_64, since it might be used for varargs.
2839 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2840 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2841 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2844 // Copy all forwards from physical to virtual registers.
2845 for (ForwardedRegister &F : Forwards) {
2846 // FIXME: Can we use a less constrained schedule?
2847 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2848 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2849 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2853 // Some CCs need callee pop.
2854 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2855 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2856 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2857 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
2858 // X86 interrupts must pop the error code if present
2859 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 8 : 4);
2861 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2862 // If this is an sret function, the return should pop the hidden pointer.
2863 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
2864 !Subtarget.getTargetTriple().isOSMSVCRT() &&
2865 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
2866 FuncInfo->setBytesToPopOnReturn(4);
2870 // RegSaveFrameIndex is X86-64 only.
2871 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2872 if (CallConv == CallingConv::X86_FastCall ||
2873 CallConv == CallingConv::X86_ThisCall)
2874 // fastcc functions can't have varargs.
2875 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2878 FuncInfo->setArgumentStackSize(StackSize);
2880 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
2881 EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
2882 if (Personality == EHPersonality::CoreCLR) {
2884 // TODO: Add a mechanism to frame lowering that will allow us to indicate
2885 // that we'd prefer this slot be allocated towards the bottom of the frame
2886 // (i.e. near the stack pointer after allocating the frame). Every
2887 // funclet needs a copy of this slot in its (mostly empty) frame, and the
2888 // offset from the bottom of this and each funclet's frame must be the
2889 // same, so the size of funclets' (mostly empty) frames is dictated by
2890 // how far this slot is from the bottom (since they allocate just enough
2891 // space to accommodate holding this slot at the correct offset).
2892 int PSPSymFI = MFI->CreateStackObject(8, 8, /*isSS=*/false);
2893 EHInfo->PSPSymFrameIdx = PSPSymFI;
2900 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
2901 SDValue Arg, const SDLoc &dl,
2903 const CCValAssign &VA,
2904 ISD::ArgFlagsTy Flags) const {
2905 unsigned LocMemOffset = VA.getLocMemOffset();
2906 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2907 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2909 if (Flags.isByVal())
2910 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2912 return DAG.getStore(
2913 Chain, dl, Arg, PtrOff,
2914 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
2917 /// Emit a load of return address if tail call
2918 /// optimization is performed and it is required.
2919 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
2920 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
2921 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
2922 // Adjust the Return address stack slot.
2923 EVT VT = getPointerTy(DAG.getDataLayout());
2924 OutRetAddr = getReturnAddressFrameIndex(DAG);
2926 // Load the "old" Return address.
2927 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
2928 return SDValue(OutRetAddr.getNode(), 1);
2931 /// Emit a store of the return address if tail call
2932 /// optimization is performed and it is required (FPDiff!=0).
2933 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2934 SDValue Chain, SDValue RetAddrFrIdx,
2935 EVT PtrVT, unsigned SlotSize,
2936 int FPDiff, const SDLoc &dl) {
2937 // Store the return address to the appropriate stack slot.
2938 if (!FPDiff) return Chain;
2939 // Calculate the new stack slot for the return address.
2940 int NewReturnAddrFI =
2941 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2943 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2944 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2945 MachinePointerInfo::getFixedStack(
2946 DAG.getMachineFunction(), NewReturnAddrFI));
2950 /// Returns a vector_shuffle mask for an movs{s|d}, movd
2951 /// operation of specified width.
2952 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
2954 unsigned NumElems = VT.getVectorNumElements();
2955 SmallVector<int, 8> Mask;
2956 Mask.push_back(NumElems);
2957 for (unsigned i = 1; i != NumElems; ++i)
2959 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
2963 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2964 SmallVectorImpl<SDValue> &InVals) const {
2965 SelectionDAG &DAG = CLI.DAG;
2967 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2968 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2969 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2970 SDValue Chain = CLI.Chain;
2971 SDValue Callee = CLI.Callee;
2972 CallingConv::ID CallConv = CLI.CallConv;
2973 bool &isTailCall = CLI.IsTailCall;
2974 bool isVarArg = CLI.IsVarArg;
2976 MachineFunction &MF = DAG.getMachineFunction();
2977 bool Is64Bit = Subtarget.is64Bit();
2978 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
2979 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
2980 bool IsSibcall = false;
2981 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2982 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
2984 if (CallConv == CallingConv::X86_INTR)
2985 report_fatal_error("X86 interrupts may not be called directly");
2987 if (Attr.getValueAsString() == "true")
2990 if (Subtarget.isPICStyleGOT() &&
2991 !MF.getTarget().Options.GuaranteedTailCallOpt) {
2992 // If we are using a GOT, disable tail calls to external symbols with
2993 // default visibility. Tail calling such a symbol requires using a GOT
2994 // relocation, which forces early binding of the symbol. This breaks code
2995 // that require lazy function symbol resolution. Using musttail or
2996 // GuaranteedTailCallOpt will override this.
2997 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2998 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
2999 G->getGlobal()->hasDefaultVisibility()))
3003 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
3005 // Force this to be a tail call. The verifier rules are enough to ensure
3006 // that we can lower this successfully without moving the return address
3009 } else if (isTailCall) {
3010 // Check if it's really possible to do a tail call.
3011 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3012 isVarArg, SR != NotStructReturn,
3013 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
3014 Outs, OutVals, Ins, DAG);
3016 // Sibcalls are automatically detected tailcalls which do not require
3018 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
3025 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3026 "Var args not supported with calling convention fastcc, ghc or hipe");
3028 // Analyze operands of the call, assigning locations to each operand.
3029 SmallVector<CCValAssign, 16> ArgLocs;
3030 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3032 // Allocate shadow area for Win64
3034 CCInfo.AllocateStack(32, 8);
3036 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3038 // Get a count of how many bytes are to be pushed on the stack.
3039 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3041 // This is a sibcall. The memory operands are available in caller's
3042 // own caller's stack.
3044 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
3045 canGuaranteeTCO(CallConv))
3046 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3049 if (isTailCall && !IsSibcall && !IsMustTail) {
3050 // Lower arguments at fp - stackoffset + fpdiff.
3051 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3053 FPDiff = NumBytesCallerPushed - NumBytes;
3055 // Set the delta of movement of the returnaddr stackslot.
3056 // But only set if delta is greater than previous delta.
3057 if (FPDiff < X86Info->getTCReturnAddrDelta())
3058 X86Info->setTCReturnAddrDelta(FPDiff);
3061 unsigned NumBytesToPush = NumBytes;
3062 unsigned NumBytesToPop = NumBytes;
3064 // If we have an inalloca argument, all stack space has already been allocated
3065 // for us and be right at the top of the stack. We don't support multiple
3066 // arguments passed in memory when using inalloca.
3067 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3069 if (!ArgLocs.back().isMemLoc())
3070 report_fatal_error("cannot use inalloca attribute on a register "
3072 if (ArgLocs.back().getLocMemOffset() != 0)
3073 report_fatal_error("any parameter with the inalloca attribute must be "
3074 "the only memory argument");
3078 Chain = DAG.getCALLSEQ_START(
3079 Chain, DAG.getIntPtrConstant(NumBytesToPush, dl, true), dl);
3081 SDValue RetAddrFrIdx;
3082 // Load return address for tail calls.
3083 if (isTailCall && FPDiff)
3084 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3085 Is64Bit, FPDiff, dl);
3087 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3088 SmallVector<SDValue, 8> MemOpChains;
3091 // Walk the register/memloc assignments, inserting copies/loads. In the case
3092 // of tail call optimization arguments are handle later.
3093 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3094 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3095 // Skip inalloca arguments, they have already been written.
3096 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3097 if (Flags.isInAlloca())
3100 CCValAssign &VA = ArgLocs[i];
3101 EVT RegVT = VA.getLocVT();
3102 SDValue Arg = OutVals[i];
3103 bool isByVal = Flags.isByVal();
3105 // Promote the value if needed.
3106 switch (VA.getLocInfo()) {
3107 default: llvm_unreachable("Unknown loc info!");
3108 case CCValAssign::Full: break;
3109 case CCValAssign::SExt:
3110 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3112 case CCValAssign::ZExt:
3113 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3115 case CCValAssign::AExt:
3116 if (Arg.getValueType().isVector() &&
3117 Arg.getValueType().getVectorElementType() == MVT::i1)
3118 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3119 else if (RegVT.is128BitVector()) {
3120 // Special case: passing MMX values in XMM registers.
3121 Arg = DAG.getBitcast(MVT::i64, Arg);
3122 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3123 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3125 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3127 case CCValAssign::BCvt:
3128 Arg = DAG.getBitcast(RegVT, Arg);
3130 case CCValAssign::Indirect: {
3131 // Store the argument.
3132 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3133 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3134 Chain = DAG.getStore(
3135 Chain, dl, Arg, SpillSlot,
3136 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3142 if (VA.isRegLoc()) {
3143 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3144 if (isVarArg && IsWin64) {
3145 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3146 // shadow reg if callee is a varargs function.
3147 unsigned ShadowReg = 0;
3148 switch (VA.getLocReg()) {
3149 case X86::XMM0: ShadowReg = X86::RCX; break;
3150 case X86::XMM1: ShadowReg = X86::RDX; break;
3151 case X86::XMM2: ShadowReg = X86::R8; break;
3152 case X86::XMM3: ShadowReg = X86::R9; break;
3155 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3157 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3158 assert(VA.isMemLoc());
3159 if (!StackPtr.getNode())
3160 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3161 getPointerTy(DAG.getDataLayout()));
3162 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3163 dl, DAG, VA, Flags));
3167 if (!MemOpChains.empty())
3168 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3170 if (Subtarget.isPICStyleGOT()) {
3171 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3174 RegsToPass.push_back(std::make_pair(
3175 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3176 getPointerTy(DAG.getDataLayout()))));
3178 // If we are tail calling and generating PIC/GOT style code load the
3179 // address of the callee into ECX. The value in ecx is used as target of
3180 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3181 // for tail calls on PIC/GOT architectures. Normally we would just put the
3182 // address of GOT into ebx and then call target@PLT. But for tail calls
3183 // ebx would be restored (since ebx is callee saved) before jumping to the
3186 // Note: The actual moving to ECX is done further down.
3187 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3188 if (G && !G->getGlobal()->hasLocalLinkage() &&
3189 G->getGlobal()->hasDefaultVisibility())
3190 Callee = LowerGlobalAddress(Callee, DAG);
3191 else if (isa<ExternalSymbolSDNode>(Callee))
3192 Callee = LowerExternalSymbol(Callee, DAG);
3196 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3197 // From AMD64 ABI document:
3198 // For calls that may call functions that use varargs or stdargs
3199 // (prototype-less calls or calls to functions containing ellipsis (...) in
3200 // the declaration) %al is used as hidden argument to specify the number
3201 // of SSE registers used. The contents of %al do not need to match exactly
3202 // the number of registers, but must be an ubound on the number of SSE
3203 // registers used and is in the range 0 - 8 inclusive.
3205 // Count the number of XMM registers allocated.
3206 static const MCPhysReg XMMArgRegs[] = {
3207 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3208 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3210 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3211 assert((Subtarget.hasSSE1() || !NumXMMRegs)
3212 && "SSE registers cannot be used when SSE is disabled");
3214 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3215 DAG.getConstant(NumXMMRegs, dl,
3219 if (isVarArg && IsMustTail) {
3220 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3221 for (const auto &F : Forwards) {
3222 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3223 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3227 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3228 // don't need this because the eligibility check rejects calls that require
3229 // shuffling arguments passed in memory.
3230 if (!IsSibcall && isTailCall) {
3231 // Force all the incoming stack arguments to be loaded from the stack
3232 // before any new outgoing arguments are stored to the stack, because the
3233 // outgoing stack slots may alias the incoming argument stack slots, and
3234 // the alias isn't otherwise explicit. This is slightly more conservative
3235 // than necessary, because it means that each store effectively depends
3236 // on every argument instead of just those arguments it would clobber.
3237 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3239 SmallVector<SDValue, 8> MemOpChains2;
3242 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3243 CCValAssign &VA = ArgLocs[i];
3246 assert(VA.isMemLoc());
3247 SDValue Arg = OutVals[i];
3248 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3249 // Skip inalloca arguments. They don't require any work.
3250 if (Flags.isInAlloca())
3252 // Create frame index.
3253 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3254 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3255 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3256 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3258 if (Flags.isByVal()) {
3259 // Copy relative to framepointer.
3260 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3261 if (!StackPtr.getNode())
3262 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3263 getPointerTy(DAG.getDataLayout()));
3264 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3267 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3271 // Store relative to framepointer.
3272 MemOpChains2.push_back(DAG.getStore(
3273 ArgChain, dl, Arg, FIN,
3274 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
3278 if (!MemOpChains2.empty())
3279 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3281 // Store the return address to the appropriate stack slot.
3282 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3283 getPointerTy(DAG.getDataLayout()),
3284 RegInfo->getSlotSize(), FPDiff, dl);
3287 // Build a sequence of copy-to-reg nodes chained together with token chain
3288 // and flag operands which copy the outgoing args into registers.
3290 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3291 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3292 RegsToPass[i].second, InFlag);
3293 InFlag = Chain.getValue(1);
3296 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3297 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3298 // In the 64-bit large code model, we have to make all calls
3299 // through a register, since the call instruction's 32-bit
3300 // pc-relative offset may not be large enough to hold the whole
3302 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3303 // If the callee is a GlobalAddress node (quite common, every direct call
3304 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3306 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3308 // We should use extra load for direct calls to dllimported functions in
3310 const GlobalValue *GV = G->getGlobal();
3311 if (!GV->hasDLLImportStorageClass()) {
3312 unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
3314 Callee = DAG.getTargetGlobalAddress(
3315 GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
3317 if (OpFlags == X86II::MO_GOTPCREL) {
3319 Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
3320 getPointerTy(DAG.getDataLayout()), Callee);
3321 // Add extra indirection
3322 Callee = DAG.getLoad(
3323 getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
3324 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3327 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3328 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
3329 unsigned char OpFlags =
3330 Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
3332 Callee = DAG.getTargetExternalSymbol(
3333 S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
3334 } else if (Subtarget.isTarget64BitILP32() &&
3335 Callee->getValueType(0) == MVT::i32) {
3336 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3337 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3340 // Returns a chain & a flag for retval copy to use.
3341 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3342 SmallVector<SDValue, 8> Ops;
3344 if (!IsSibcall && isTailCall) {
3345 Chain = DAG.getCALLSEQ_END(Chain,
3346 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3347 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
3348 InFlag = Chain.getValue(1);
3351 Ops.push_back(Chain);
3352 Ops.push_back(Callee);
3355 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
3357 // Add argument registers to the end of the list so that they are known live
3359 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3360 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3361 RegsToPass[i].second.getValueType()));
3363 // Add a register mask operand representing the call-preserved registers.
3364 const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
3365 assert(Mask && "Missing call preserved mask for calling convention");
3367 // If this is an invoke in a 32-bit function using a funclet-based
3368 // personality, assume the function clobbers all registers. If an exception
3369 // is thrown, the runtime will not restore CSRs.
3370 // FIXME: Model this more precisely so that we can register allocate across
3371 // the normal edge and spill and fill across the exceptional edge.
3372 if (!Is64Bit && CLI.CS && CLI.CS->isInvoke()) {
3373 const Function *CallerFn = MF.getFunction();
3374 EHPersonality Pers =
3375 CallerFn->hasPersonalityFn()
3376 ? classifyEHPersonality(CallerFn->getPersonalityFn())
3377 : EHPersonality::Unknown;
3378 if (isFuncletEHPersonality(Pers))
3379 Mask = RegInfo->getNoPreservedMask();
3382 Ops.push_back(DAG.getRegisterMask(Mask));
3384 if (InFlag.getNode())
3385 Ops.push_back(InFlag);
3389 //// If this is the first return lowered for this function, add the regs
3390 //// to the liveout set for the function.
3391 // This isn't right, although it's probably harmless on x86; liveouts
3392 // should be computed from returns not tail calls. Consider a void
3393 // function making a tail call to a function returning int.
3394 MF.getFrameInfo()->setHasTailCall();
3395 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3398 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3399 InFlag = Chain.getValue(1);
3401 // Create the CALLSEQ_END node.
3402 unsigned NumBytesForCalleeToPop;
3403 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3404 DAG.getTarget().Options.GuaranteedTailCallOpt))
3405 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3406 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3407 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3408 SR == StackStructReturn)
3409 // If this is a call to a struct-return function, the callee
3410 // pops the hidden struct pointer, so we have to push it back.
3411 // This is common for Darwin/X86, Linux & Mingw32 targets.
3412 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3413 NumBytesForCalleeToPop = 4;
3415 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3417 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
3418 // No need to reset the stack after the call if the call doesn't return. To
3419 // make the MI verify, we'll pretend the callee does it for us.
3420 NumBytesForCalleeToPop = NumBytes;
3423 // Returns a flag for retval copy to use.
3425 Chain = DAG.getCALLSEQ_END(Chain,
3426 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
3427 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
3430 InFlag = Chain.getValue(1);
3433 // Handle result values, copying them out of physregs into vregs that we
3435 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3436 Ins, dl, DAG, InVals);
3439 //===----------------------------------------------------------------------===//
3440 // Fast Calling Convention (tail call) implementation
3441 //===----------------------------------------------------------------------===//
3443 // Like std call, callee cleans arguments, convention except that ECX is
3444 // reserved for storing the tail called function address. Only 2 registers are
3445 // free for argument passing (inreg). Tail call optimization is performed
3447 // * tailcallopt is enabled
3448 // * caller/callee are fastcc
3449 // On X86_64 architecture with GOT-style position independent code only local
3450 // (within module) calls are supported at the moment.
3451 // To keep the stack aligned according to platform abi the function
3452 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3453 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3454 // If a tail called function callee has more arguments than the caller the
3455 // caller needs to make sure that there is room to move the RETADDR to. This is
3456 // achieved by reserving an area the size of the argument delta right after the
3457 // original RETADDR, but before the saved framepointer or the spilled registers
3458 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3470 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
3473 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3474 SelectionDAG& DAG) const {
3475 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3476 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3477 unsigned StackAlignment = TFI.getStackAlignment();
3478 uint64_t AlignMask = StackAlignment - 1;
3479 int64_t Offset = StackSize;
3480 unsigned SlotSize = RegInfo->getSlotSize();
3481 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3482 // Number smaller than 12 so just add the difference.
3483 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3485 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3486 Offset = ((~AlignMask) & Offset) + StackAlignment +
3487 (StackAlignment-SlotSize);
3492 /// Return true if the given stack call argument is already available in the
3493 /// same position (relatively) of the caller's incoming argument stack.
3495 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3496 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3497 const X86InstrInfo *TII, const CCValAssign &VA) {
3498 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3501 // Look through nodes that don't alter the bits of the incoming value.
3502 unsigned Op = Arg.getOpcode();
3503 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
3504 Arg = Arg.getOperand(0);
3507 if (Op == ISD::TRUNCATE) {
3508 const SDValue &TruncInput = Arg.getOperand(0);
3509 if (TruncInput.getOpcode() == ISD::AssertZext &&
3510 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
3511 Arg.getValueType()) {
3512 Arg = TruncInput.getOperand(0);
3520 if (Arg.getOpcode() == ISD::CopyFromReg) {
3521 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3522 if (!TargetRegisterInfo::isVirtualRegister(VR))
3524 MachineInstr *Def = MRI->getVRegDef(VR);
3527 if (!Flags.isByVal()) {
3528 if (!TII->isLoadFromStackSlot(*Def, FI))
3531 unsigned Opcode = Def->getOpcode();
3532 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3533 Opcode == X86::LEA64_32r) &&
3534 Def->getOperand(1).isFI()) {
3535 FI = Def->getOperand(1).getIndex();
3536 Bytes = Flags.getByValSize();
3540 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3541 if (Flags.isByVal())
3542 // ByVal argument is passed in as a pointer but it's now being
3543 // dereferenced. e.g.
3544 // define @foo(%struct.X* %A) {
3545 // tail call @bar(%struct.X* byval %A)
3548 SDValue Ptr = Ld->getBasePtr();
3549 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3552 FI = FINode->getIndex();
3553 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3554 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3555 FI = FINode->getIndex();
3556 Bytes = Flags.getByValSize();
3560 assert(FI != INT_MAX);
3561 if (!MFI->isFixedObjectIndex(FI))
3564 if (Offset != MFI->getObjectOffset(FI))
3567 if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
3568 // If the argument location is wider than the argument type, check that any
3569 // extension flags match.
3570 if (Flags.isZExt() != MFI->isObjectZExt(FI) ||
3571 Flags.isSExt() != MFI->isObjectSExt(FI)) {
3576 return Bytes == MFI->getObjectSize(FI);
3579 /// Check whether the call is eligible for tail call optimization. Targets
3580 /// that want to do tail call optimization should implement this function.
3581 bool X86TargetLowering::IsEligibleForTailCallOptimization(
3582 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
3583 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
3584 const SmallVectorImpl<ISD::OutputArg> &Outs,
3585 const SmallVectorImpl<SDValue> &OutVals,
3586 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
3587 if (!mayTailCallThisCC(CalleeCC))
3590 // If -tailcallopt is specified, make fastcc functions tail-callable.
3591 MachineFunction &MF = DAG.getMachineFunction();
3592 const Function *CallerF = MF.getFunction();
3594 // If the function return type is x86_fp80 and the callee return type is not,
3595 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3596 // perform a tailcall optimization here.
3597 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3600 CallingConv::ID CallerCC = CallerF->getCallingConv();
3601 bool CCMatch = CallerCC == CalleeCC;
3602 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
3603 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
3605 // Win64 functions have extra shadow space for argument homing. Don't do the
3606 // sibcall if the caller and callee have mismatched expectations for this
3608 if (IsCalleeWin64 != IsCallerWin64)
3611 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3612 if (canGuaranteeTCO(CalleeCC) && CCMatch)
3617 // Look for obvious safe cases to perform tail call optimization that do not
3618 // require ABI changes. This is what gcc calls sibcall.
3620 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3621 // emit a special epilogue.
3622 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3623 if (RegInfo->needsStackRealignment(MF))
3626 // Also avoid sibcall optimization if either caller or callee uses struct
3627 // return semantics.
3628 if (isCalleeStructRet || isCallerStructRet)
3631 // Do not sibcall optimize vararg calls unless all arguments are passed via
3633 LLVMContext &C = *DAG.getContext();
3634 if (isVarArg && !Outs.empty()) {
3635 // Optimizing for varargs on Win64 is unlikely to be safe without
3636 // additional testing.
3637 if (IsCalleeWin64 || IsCallerWin64)
3640 SmallVector<CCValAssign, 16> ArgLocs;
3641 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
3643 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3644 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3645 if (!ArgLocs[i].isRegLoc())
3649 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3650 // stack. Therefore, if it's not used by the call it is not safe to optimize
3651 // this into a sibcall.
3652 bool Unused = false;
3653 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3660 SmallVector<CCValAssign, 16> RVLocs;
3661 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
3662 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3663 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3664 CCValAssign &VA = RVLocs[i];
3665 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3670 // Check that the call results are passed in the same way.
3671 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
3672 RetCC_X86, RetCC_X86))
3674 // The callee has to preserve all registers the caller needs to preserve.
3675 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
3676 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
3678 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
3679 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3683 unsigned StackArgsSize = 0;
3685 // If the callee takes no arguments then go on to check the results of the
3687 if (!Outs.empty()) {
3688 // Check if stack adjustment is needed. For now, do not do this if any
3689 // argument is passed on the stack.
3690 SmallVector<CCValAssign, 16> ArgLocs;
3691 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
3693 // Allocate shadow area for Win64
3695 CCInfo.AllocateStack(32, 8);
3697 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3698 StackArgsSize = CCInfo.getNextStackOffset();
3700 if (CCInfo.getNextStackOffset()) {
3701 // Check if the arguments are already laid out in the right way as
3702 // the caller's fixed stack objects.
3703 MachineFrameInfo *MFI = MF.getFrameInfo();
3704 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3705 const X86InstrInfo *TII = Subtarget.getInstrInfo();
3706 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3707 CCValAssign &VA = ArgLocs[i];
3708 SDValue Arg = OutVals[i];
3709 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3710 if (VA.getLocInfo() == CCValAssign::Indirect)
3712 if (!VA.isRegLoc()) {
3713 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3720 bool PositionIndependent = isPositionIndependent();
3721 // If the tailcall address may be in a register, then make sure it's
3722 // possible to register allocate for it. In 32-bit, the call address can
3723 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3724 // callee-saved registers are restored. These happen to be the same
3725 // registers used to pass 'inreg' arguments so watch out for those.
3726 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
3727 !isa<ExternalSymbolSDNode>(Callee)) ||
3728 PositionIndependent)) {
3729 unsigned NumInRegs = 0;
3730 // In PIC we need an extra register to formulate the address computation
3732 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
3734 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3735 CCValAssign &VA = ArgLocs[i];
3738 unsigned Reg = VA.getLocReg();
3741 case X86::EAX: case X86::EDX: case X86::ECX:
3742 if (++NumInRegs == MaxInRegs)
3749 const MachineRegisterInfo &MRI = MF.getRegInfo();
3750 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
3754 bool CalleeWillPop =
3755 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
3756 MF.getTarget().Options.GuaranteedTailCallOpt);
3758 if (unsigned BytesToPop =
3759 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
3760 // If we have bytes to pop, the callee must pop them.
3761 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
3762 if (!CalleePopMatches)
3764 } else if (CalleeWillPop && StackArgsSize > 0) {
3765 // If we don't have bytes to pop, make sure the callee doesn't pop any.
3773 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3774 const TargetLibraryInfo *libInfo) const {
3775 return X86::createFastISel(funcInfo, libInfo);
3778 //===----------------------------------------------------------------------===//
3779 // Other Lowering Hooks
3780 //===----------------------------------------------------------------------===//
3782 static bool MayFoldLoad(SDValue Op) {
3783 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3786 static bool MayFoldIntoStore(SDValue Op) {
3787 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3790 static bool isTargetShuffle(unsigned Opcode) {
3792 default: return false;
3793 case X86ISD::BLENDI:
3794 case X86ISD::PSHUFB:
3795 case X86ISD::PSHUFD:
3796 case X86ISD::PSHUFHW:
3797 case X86ISD::PSHUFLW:
3799 case X86ISD::INSERTPS:
3800 case X86ISD::PALIGNR:
3801 case X86ISD::VSHLDQ:
3802 case X86ISD::VSRLDQ:
3803 case X86ISD::MOVLHPS:
3804 case X86ISD::MOVLHPD:
3805 case X86ISD::MOVHLPS:
3806 case X86ISD::MOVLPS:
3807 case X86ISD::MOVLPD:
3808 case X86ISD::MOVSHDUP:
3809 case X86ISD::MOVSLDUP:
3810 case X86ISD::MOVDDUP:
3813 case X86ISD::UNPCKL:
3814 case X86ISD::UNPCKH:
3815 case X86ISD::VBROADCAST:
3816 case X86ISD::VPERMILPI:
3817 case X86ISD::VPERMILPV:
3818 case X86ISD::VPERM2X128:
3819 case X86ISD::VPERMIL2:
3820 case X86ISD::VPERMI:
3821 case X86ISD::VPPERM:
3822 case X86ISD::VPERMV:
3823 case X86ISD::VPERMV3:
3824 case X86ISD::VZEXT_MOVL:
3829 static bool isTargetShuffleVariableMask(unsigned Opcode) {
3831 default: return false;
3832 case X86ISD::PSHUFB:
3833 case X86ISD::VPERMILPV:
3838 static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT,
3839 SDValue V1, unsigned TargetMask,
3840 SelectionDAG &DAG) {
3842 default: llvm_unreachable("Unknown x86 shuffle node");
3843 case X86ISD::PSHUFD:
3844 case X86ISD::PSHUFHW:
3845 case X86ISD::PSHUFLW:
3846 case X86ISD::VPERMILPI:
3847 case X86ISD::VPERMI:
3848 return DAG.getNode(Opc, dl, VT, V1,
3849 DAG.getConstant(TargetMask, dl, MVT::i8));
3853 static SDValue getTargetShuffleNode(unsigned Opc, const SDLoc &dl, MVT VT,
3854 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3856 default: llvm_unreachable("Unknown x86 shuffle node");
3857 case X86ISD::MOVLHPS:
3858 case X86ISD::MOVLHPD:
3859 case X86ISD::MOVHLPS:
3860 case X86ISD::MOVLPS:
3861 case X86ISD::MOVLPD:
3864 case X86ISD::UNPCKL:
3865 case X86ISD::UNPCKH:
3866 return DAG.getNode(Opc, dl, VT, V1, V2);
3870 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3871 MachineFunction &MF = DAG.getMachineFunction();
3872 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3873 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3874 int ReturnAddrIndex = FuncInfo->getRAIndex();
3876 if (ReturnAddrIndex == 0) {
3877 // Set up a frame object for the return address.
3878 unsigned SlotSize = RegInfo->getSlotSize();
3879 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3882 FuncInfo->setRAIndex(ReturnAddrIndex);
3885 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
3888 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3889 bool hasSymbolicDisplacement) {
3890 // Offset should fit into 32 bit immediate field.
3891 if (!isInt<32>(Offset))
3894 // If we don't have a symbolic displacement - we don't have any extra
3896 if (!hasSymbolicDisplacement)
3899 // FIXME: Some tweaks might be needed for medium code model.
3900 if (M != CodeModel::Small && M != CodeModel::Kernel)
3903 // For small code model we assume that latest object is 16MB before end of 31
3904 // bits boundary. We may also accept pretty large negative constants knowing
3905 // that all objects are in the positive half of address space.
3906 if (M == CodeModel::Small && Offset < 16*1024*1024)
3909 // For kernel code model we know that all object resist in the negative half
3910 // of 32bits address space. We may not accept negative offsets, since they may
3911 // be just off and we may accept pretty large positive ones.
3912 if (M == CodeModel::Kernel && Offset >= 0)
3918 /// Determines whether the callee is required to pop its own arguments.
3919 /// Callee pop is necessary to support tail calls.
3920 bool X86::isCalleePop(CallingConv::ID CallingConv,
3921 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
3922 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
3923 // can guarantee TCO.
3924 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
3927 switch (CallingConv) {
3930 case CallingConv::X86_StdCall:
3931 case CallingConv::X86_FastCall:
3932 case CallingConv::X86_ThisCall:
3933 case CallingConv::X86_VectorCall:
3938 /// \brief Return true if the condition is an unsigned comparison operation.
3939 static bool isX86CCUnsigned(unsigned X86CC) {
3942 llvm_unreachable("Invalid integer condition!");
3958 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
3959 switch (SetCCOpcode) {
3960 default: llvm_unreachable("Invalid integer condition!");
3961 case ISD::SETEQ: return X86::COND_E;
3962 case ISD::SETGT: return X86::COND_G;
3963 case ISD::SETGE: return X86::COND_GE;
3964 case ISD::SETLT: return X86::COND_L;
3965 case ISD::SETLE: return X86::COND_LE;
3966 case ISD::SETNE: return X86::COND_NE;
3967 case ISD::SETULT: return X86::COND_B;
3968 case ISD::SETUGT: return X86::COND_A;
3969 case ISD::SETULE: return X86::COND_BE;
3970 case ISD::SETUGE: return X86::COND_AE;
3974 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
3975 /// condition code, returning the condition code and the LHS/RHS of the
3976 /// comparison to make.
3977 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
3978 bool isFP, SDValue &LHS, SDValue &RHS,
3979 SelectionDAG &DAG) {
3981 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3982 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3983 // X > -1 -> X == 0, jump !sign.
3984 RHS = DAG.getConstant(0, DL, RHS.getValueType());
3985 return X86::COND_NS;
3987 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3988 // X < 0 -> X == 0, jump on sign.
3991 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3993 RHS = DAG.getConstant(0, DL, RHS.getValueType());
3994 return X86::COND_LE;
3998 return TranslateIntegerX86CC(SetCCOpcode);
4001 // First determine if it is required or is profitable to flip the operands.
4003 // If LHS is a foldable load, but RHS is not, flip the condition.
4004 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4005 !ISD::isNON_EXTLoad(RHS.getNode())) {
4006 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4007 std::swap(LHS, RHS);
4010 switch (SetCCOpcode) {
4016 std::swap(LHS, RHS);
4020 // On a floating point condition, the flags are set as follows:
4022 // 0 | 0 | 0 | X > Y
4023 // 0 | 0 | 1 | X < Y
4024 // 1 | 0 | 0 | X == Y
4025 // 1 | 1 | 1 | unordered
4026 switch (SetCCOpcode) {
4027 default: llvm_unreachable("Condcode should be pre-legalized away");
4029 case ISD::SETEQ: return X86::COND_E;
4030 case ISD::SETOLT: // flipped
4032 case ISD::SETGT: return X86::COND_A;
4033 case ISD::SETOLE: // flipped
4035 case ISD::SETGE: return X86::COND_AE;
4036 case ISD::SETUGT: // flipped
4038 case ISD::SETLT: return X86::COND_B;
4039 case ISD::SETUGE: // flipped
4041 case ISD::SETLE: return X86::COND_BE;
4043 case ISD::SETNE: return X86::COND_NE;
4044 case ISD::SETUO: return X86::COND_P;
4045 case ISD::SETO: return X86::COND_NP;
4047 case ISD::SETUNE: return X86::COND_INVALID;
4051 /// Is there a floating point cmov for the specific X86 condition code?
4052 /// Current x86 isa includes the following FP cmov instructions:
4053 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4054 static bool hasFPCMov(unsigned X86CC) {
4071 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4073 unsigned Intrinsic) const {
4075 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4079 Info.opc = ISD::INTRINSIC_W_CHAIN;
4080 Info.readMem = false;
4081 Info.writeMem = false;
4085 switch (IntrData->Type) {
4086 case EXPAND_FROM_MEM: {
4087 Info.ptrVal = I.getArgOperand(0);
4088 Info.memVT = MVT::getVT(I.getType());
4090 Info.readMem = true;
4093 case COMPRESS_TO_MEM: {
4094 Info.ptrVal = I.getArgOperand(0);
4095 Info.memVT = MVT::getVT(I.getArgOperand(1)->getType());
4097 Info.writeMem = true;
4100 case TRUNCATE_TO_MEM_VI8:
4101 case TRUNCATE_TO_MEM_VI16:
4102 case TRUNCATE_TO_MEM_VI32: {
4103 Info.ptrVal = I.getArgOperand(0);
4104 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4105 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4106 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4108 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4109 ScalarVT = MVT::i16;
4110 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4111 ScalarVT = MVT::i32;
4113 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4115 Info.writeMem = true;
4125 /// Returns true if the target can instruction select the
4126 /// specified FP immediate natively. If false, the legalizer will
4127 /// materialize the FP immediate as a load from a constant pool.
4128 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
4129 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4130 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4136 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4137 ISD::LoadExtType ExtTy,
4139 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4140 // relocation target a movq or addq instruction: don't let the load shrink.
4141 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4142 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4143 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4144 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4148 /// \brief Returns true if it is beneficial to convert a load of a constant
4149 /// to just the constant itself.
4150 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4152 assert(Ty->isIntegerTy());
4154 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4155 if (BitSize == 0 || BitSize > 64)
4160 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
4161 unsigned Index) const {
4162 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4165 return (Index == 0 || Index == ResVT.getVectorNumElements());
4168 bool X86TargetLowering::isCheapToSpeculateCttz() const {
4169 // Speculate cttz only if we can directly use TZCNT.
4170 return Subtarget.hasBMI();
4173 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
4174 // Speculate ctlz only if we can directly use LZCNT.
4175 return Subtarget.hasLZCNT();
4178 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
4179 if (!Subtarget.hasBMI())
4182 // There are only 32-bit and 64-bit forms for 'andn'.
4183 EVT VT = Y.getValueType();
4184 if (VT != MVT::i32 && VT != MVT::i64)
4190 /// Return true if every element in Mask, beginning
4191 /// from position Pos and ending in Pos+Size is undef.
4192 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
4193 for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
4199 /// Return true if Val is undef or if its value falls within the
4200 /// specified range (L, H].
4201 static bool isUndefOrInRange(int Val, int Low, int Hi) {
4202 return (Val < 0) || (Val >= Low && Val < Hi);
4205 /// Return true if every element in Mask is undef or if its value
4206 /// falls within the specified range (L, H].
4207 static bool isUndefOrInRange(ArrayRef<int> Mask,
4210 if (!isUndefOrInRange(M, Low, Hi))
4215 /// Val is either less than zero (undef) or equal to the specified value.
4216 static bool isUndefOrEqual(int Val, int CmpVal) {
4217 return (Val < 0 || Val == CmpVal);
4220 /// Val is either the undef or zero sentinel value.
4221 static bool isUndefOrZero(int Val) {
4222 return (Val == SM_SentinelUndef || Val == SM_SentinelZero);
4225 /// Return true if every element in Mask, beginning
4226 /// from position Pos and ending in Pos+Size, falls within the specified
4227 /// sequential range (Low, Low+Size]. or is undef.
4228 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
4229 unsigned Pos, unsigned Size, int Low) {
4230 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
4231 if (!isUndefOrEqual(Mask[i], Low))
4236 /// Return true if every element in Mask, beginning
4237 /// from position Pos and ending in Pos+Size, falls within the specified
4238 /// sequential range (Low, Low+Size], or is undef or is zero.
4239 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
4240 unsigned Size, int Low) {
4241 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
4242 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
4247 /// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector
4248 /// extract that is suitable for instruction that extract 128 or 256 bit vectors
4249 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4250 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4251 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4254 // The index should be aligned on a vecWidth-bit boundary.
4256 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4258 MVT VT = N->getSimpleValueType(0);
4259 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4260 bool Result = (Index * ElSize) % vecWidth == 0;
4265 /// Return true if the specified INSERT_SUBVECTOR
4266 /// operand specifies a subvector insert that is suitable for input to
4267 /// insertion of 128 or 256-bit subvectors
4268 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4269 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4270 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4272 // The index should be aligned on a vecWidth-bit boundary.
4274 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4276 MVT VT = N->getSimpleValueType(0);
4277 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4278 bool Result = (Index * ElSize) % vecWidth == 0;
4283 bool X86::isVINSERT128Index(SDNode *N) {
4284 return isVINSERTIndex(N, 128);
4287 bool X86::isVINSERT256Index(SDNode *N) {
4288 return isVINSERTIndex(N, 256);
4291 bool X86::isVEXTRACT128Index(SDNode *N) {
4292 return isVEXTRACTIndex(N, 128);
4295 bool X86::isVEXTRACT256Index(SDNode *N) {
4296 return isVEXTRACTIndex(N, 256);
4299 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4300 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4301 assert(isa<ConstantSDNode>(N->getOperand(1).getNode()) &&
4302 "Illegal extract subvector for VEXTRACT");
4305 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4307 MVT VecVT = N->getOperand(0).getSimpleValueType();
4308 MVT ElVT = VecVT.getVectorElementType();
4310 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4311 return Index / NumElemsPerChunk;
4314 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4315 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4316 assert(isa<ConstantSDNode>(N->getOperand(2).getNode()) &&
4317 "Illegal insert subvector for VINSERT");
4320 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4322 MVT VecVT = N->getSimpleValueType(0);
4323 MVT ElVT = VecVT.getVectorElementType();
4325 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4326 return Index / NumElemsPerChunk;
4329 /// Return the appropriate immediate to extract the specified
4330 /// EXTRACT_SUBVECTOR index with VEXTRACTF128 and VINSERTI128 instructions.
4331 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4332 return getExtractVEXTRACTImmediate(N, 128);
4335 /// Return the appropriate immediate to extract the specified
4336 /// EXTRACT_SUBVECTOR index with VEXTRACTF64x4 and VINSERTI64x4 instructions.
4337 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4338 return getExtractVEXTRACTImmediate(N, 256);
4341 /// Return the appropriate immediate to insert at the specified
4342 /// INSERT_SUBVECTOR index with VINSERTF128 and VINSERTI128 instructions.
4343 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
4344 return getInsertVINSERTImmediate(N, 128);
4347 /// Return the appropriate immediate to insert at the specified
4348 /// INSERT_SUBVECTOR index with VINSERTF46x4 and VINSERTI64x4 instructions.
4349 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
4350 return getInsertVINSERTImmediate(N, 256);
4353 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
4354 bool X86::isZeroNode(SDValue Elt) {
4355 return isNullConstant(Elt) || isNullFPConstant(Elt);
4358 // Build a vector of constants
4359 // Use an UNDEF node if MaskElt == -1.
4360 // Spilt 64-bit constants in the 32-bit mode.
4361 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
4362 const SDLoc &dl, bool IsMask = false) {
4364 SmallVector<SDValue, 32> Ops;
4367 MVT ConstVecVT = VT;
4368 unsigned NumElts = VT.getVectorNumElements();
4369 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
4370 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
4371 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
4375 MVT EltVT = ConstVecVT.getVectorElementType();
4376 for (unsigned i = 0; i < NumElts; ++i) {
4377 bool IsUndef = Values[i] < 0 && IsMask;
4378 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
4379 DAG.getConstant(Values[i], dl, EltVT);
4380 Ops.push_back(OpNode);
4382 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
4383 DAG.getConstant(0, dl, EltVT));
4385 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
4387 ConstsNode = DAG.getBitcast(VT, ConstsNode);
4391 /// Returns a vector of specified type with all zero elements.
4392 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
4393 SelectionDAG &DAG, const SDLoc &dl) {
4394 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
4395 VT.getVectorElementType() == MVT::i1) &&
4396 "Unexpected vector type");
4398 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
4399 // type. This ensures they get CSE'd. But if the integer type is not
4400 // available, use a floating-point +0.0 instead.
4402 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
4403 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
4404 } else if (VT.getVectorElementType() == MVT::i1) {
4405 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
4406 "Unexpected vector type");
4407 assert((Subtarget.hasVLX() || VT.getVectorNumElements() >= 8) &&
4408 "Unexpected vector type");
4409 Vec = DAG.getConstant(0, dl, VT);
4411 unsigned Num32BitElts = VT.getSizeInBits() / 32;
4412 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
4414 return DAG.getBitcast(VT, Vec);
4417 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
4418 const SDLoc &dl, unsigned vectorWidth) {
4419 assert((vectorWidth == 128 || vectorWidth == 256) &&
4420 "Unsupported vector width");
4421 EVT VT = Vec.getValueType();
4422 EVT ElVT = VT.getVectorElementType();
4423 unsigned Factor = VT.getSizeInBits()/vectorWidth;
4424 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
4425 VT.getVectorNumElements()/Factor);
4427 // Extract from UNDEF is UNDEF.
4429 return DAG.getUNDEF(ResultVT);
4431 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
4432 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
4433 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
4435 // This is the index of the first element of the vectorWidth-bit chunk
4436 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
4437 IdxVal &= ~(ElemsPerChunk - 1);
4439 // If the input is a buildvector just emit a smaller one.
4440 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
4441 return DAG.getNode(ISD::BUILD_VECTOR,
4442 dl, ResultVT, makeArrayRef(Vec->op_begin() + IdxVal, ElemsPerChunk));
4444 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
4445 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
4448 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
4449 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
4450 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
4451 /// instructions or a simple subregister reference. Idx is an index in the
4452 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
4453 /// lowering EXTRACT_VECTOR_ELT operations easier.
4454 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
4455 SelectionDAG &DAG, const SDLoc &dl) {
4456 assert((Vec.getValueType().is256BitVector() ||
4457 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
4458 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
4461 /// Generate a DAG to grab 256-bits from a 512-bit vector.
4462 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
4463 SelectionDAG &DAG, const SDLoc &dl) {
4464 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
4465 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
4468 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
4469 SelectionDAG &DAG, const SDLoc &dl,
4470 unsigned vectorWidth) {
4471 assert((vectorWidth == 128 || vectorWidth == 256) &&
4472 "Unsupported vector width");
4473 // Inserting UNDEF is Result
4476 EVT VT = Vec.getValueType();
4477 EVT ElVT = VT.getVectorElementType();
4478 EVT ResultVT = Result.getValueType();
4480 // Insert the relevant vectorWidth bits.
4481 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
4482 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
4484 // This is the index of the first element of the vectorWidth-bit chunk
4485 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
4486 IdxVal &= ~(ElemsPerChunk - 1);
4488 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
4489 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
4492 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
4493 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
4494 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
4495 /// simple superregister reference. Idx is an index in the 128 bits
4496 /// we want. It need not be aligned to a 128-bit boundary. That makes
4497 /// lowering INSERT_VECTOR_ELT operations easier.
4498 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
4499 SelectionDAG &DAG, const SDLoc &dl) {
4500 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
4502 // For insertion into the zero index (low half) of a 256-bit vector, it is
4503 // more efficient to generate a blend with immediate instead of an insert*128.
4504 // We are still creating an INSERT_SUBVECTOR below with an undef node to
4505 // extend the subvector to the size of the result vector. Make sure that
4506 // we are not recursing on that node by checking for undef here.
4507 if (IdxVal == 0 && Result.getValueType().is256BitVector() &&
4508 !Result.isUndef()) {
4509 EVT ResultVT = Result.getValueType();
4510 SDValue ZeroIndex = DAG.getIntPtrConstant(0, dl);
4511 SDValue Undef = DAG.getUNDEF(ResultVT);
4512 SDValue Vec256 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Undef,
4515 // The blend instruction, and therefore its mask, depend on the data type.
4516 MVT ScalarType = ResultVT.getVectorElementType().getSimpleVT();
4517 if (ScalarType.isFloatingPoint()) {
4518 // Choose either vblendps (float) or vblendpd (double).
4519 unsigned ScalarSize = ScalarType.getSizeInBits();
4520 assert((ScalarSize == 64 || ScalarSize == 32) && "Unknown float type");
4521 unsigned MaskVal = (ScalarSize == 64) ? 0x03 : 0x0f;
4522 SDValue Mask = DAG.getConstant(MaskVal, dl, MVT::i8);
4523 return DAG.getNode(X86ISD::BLENDI, dl, ResultVT, Result, Vec256, Mask);
4526 const X86Subtarget &Subtarget =
4527 static_cast<const X86Subtarget &>(DAG.getSubtarget());
4529 // AVX2 is needed for 256-bit integer blend support.
4530 // Integers must be cast to 32-bit because there is only vpblendd;
4531 // vpblendw can't be used for this because it has a handicapped mask.
4533 // If we don't have AVX2, then cast to float. Using a wrong domain blend
4534 // is still more efficient than using the wrong domain vinsertf128 that
4535 // will be created by InsertSubVector().
4536 MVT CastVT = Subtarget.hasAVX2() ? MVT::v8i32 : MVT::v8f32;
4538 SDValue Mask = DAG.getConstant(0x0f, dl, MVT::i8);
4539 Result = DAG.getBitcast(CastVT, Result);
4540 Vec256 = DAG.getBitcast(CastVT, Vec256);
4541 Vec256 = DAG.getNode(X86ISD::BLENDI, dl, CastVT, Result, Vec256, Mask);
4542 return DAG.getBitcast(ResultVT, Vec256);
4545 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
4548 static SDValue insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
4549 SelectionDAG &DAG, const SDLoc &dl) {
4550 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
4551 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
4554 /// Insert i1-subvector to i1-vector.
4555 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
4556 const X86Subtarget &Subtarget) {
4559 SDValue Vec = Op.getOperand(0);
4560 SDValue SubVec = Op.getOperand(1);
4561 SDValue Idx = Op.getOperand(2);
4563 if (!isa<ConstantSDNode>(Idx))
4566 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
4567 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
4570 MVT OpVT = Op.getSimpleValueType();
4571 MVT SubVecVT = SubVec.getSimpleValueType();
4572 unsigned NumElems = OpVT.getVectorNumElements();
4573 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
4575 assert(IdxVal + SubVecNumElems <= NumElems &&
4576 IdxVal % SubVecVT.getSizeInBits() == 0 &&
4577 "Unexpected index value in INSERT_SUBVECTOR");
4579 // There are 3 possible cases:
4580 // 1. Subvector should be inserted in the lower part (IdxVal == 0)
4581 // 2. Subvector should be inserted in the upper part
4582 // (IdxVal + SubVecNumElems == NumElems)
4583 // 3. Subvector should be inserted in the middle (for example v2i1
4584 // to v16i1, index 2)
4586 // extend to natively supported kshift
4587 MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
4588 MVT WideOpVT = OpVT;
4589 if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits())
4592 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
4593 SDValue Undef = DAG.getUNDEF(WideOpVT);
4594 SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4595 Undef, SubVec, ZeroIdx);
4597 // Extract sub-vector if require.
4598 auto ExtractSubVec = [&](SDValue V) {
4599 return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
4603 if (Vec.isUndef()) {
4605 SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8);
4606 WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits);
4608 return ExtractSubVec(WideSubVec);
4611 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
4612 NumElems = WideOpVT.getVectorNumElements();
4613 unsigned ShiftLeft = NumElems - SubVecNumElems;
4614 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4615 Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
4616 DAG.getConstant(ShiftLeft, dl, MVT::i8));
4617 Vec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec,
4618 DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec;
4619 return ExtractSubVec(Vec);
4623 // Zero lower bits of the Vec
4624 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
4625 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
4626 Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
4627 Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
4628 // Merge them together, SubVec should be zero extended.
4629 WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4630 getZeroVector(WideOpVT, Subtarget, DAG, dl),
4632 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
4633 return ExtractSubVec(Vec);
4636 // Simple case when we put subvector in the upper part
4637 if (IdxVal + SubVecNumElems == NumElems) {
4638 // Zero upper bits of the Vec
4639 WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
4640 DAG.getConstant(IdxVal, dl, MVT::i8));
4641 SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
4642 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
4643 Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
4644 Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
4645 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
4646 return ExtractSubVec(Vec);
4648 // Subvector should be inserted in the middle - use shuffle
4649 WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef,
4651 SmallVector<int, 64> Mask;
4652 for (unsigned i = 0; i < NumElems; ++i)
4653 Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
4655 return DAG.getVectorShuffle(OpVT, dl, WideSubVec, Vec, Mask);
4658 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
4659 /// instructions. This is used because creating CONCAT_VECTOR nodes of
4660 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
4661 /// large BUILD_VECTORS.
4662 static SDValue concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
4663 unsigned NumElems, SelectionDAG &DAG,
4665 SDValue V = insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
4666 return insert128BitVector(V, V2, NumElems / 2, DAG, dl);
4669 static SDValue concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
4670 unsigned NumElems, SelectionDAG &DAG,
4672 SDValue V = insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
4673 return insert256BitVector(V, V2, NumElems / 2, DAG, dl);
4676 /// Returns a vector of specified type with all bits set.
4677 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
4678 /// no AVX2 support, use two <4 x i32> inserted in a <8 x i32> appropriately.
4679 /// Then bitcast to their original type, ensuring they get CSE'd.
4680 static SDValue getOnesVector(EVT VT, const X86Subtarget &Subtarget,
4681 SelectionDAG &DAG, const SDLoc &dl) {
4682 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
4683 "Expected a 128/256/512-bit vector type");
4685 APInt Ones = APInt::getAllOnesValue(32);
4686 unsigned NumElts = VT.getSizeInBits() / 32;
4688 if (!Subtarget.hasInt256() && NumElts == 8) {
4689 Vec = DAG.getConstant(Ones, dl, MVT::v4i32);
4690 Vec = concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
4692 Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
4694 return DAG.getBitcast(VT, Vec);
4697 /// Returns a vector_shuffle node for an unpackl operation.
4698 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
4699 SDValue V1, SDValue V2) {
4700 assert(VT.is128BitVector() && "Expected a 128-bit vector type");
4701 unsigned NumElems = VT.getVectorNumElements();
4702 SmallVector<int, 8> Mask(NumElems);
4703 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
4705 Mask[i * 2 + 1] = i + NumElems;
4707 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4710 /// Returns a vector_shuffle node for an unpackh operation.
4711 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
4712 SDValue V1, SDValue V2) {
4713 assert(VT.is128BitVector() && "Expected a 128-bit vector type");
4714 unsigned NumElems = VT.getVectorNumElements();
4715 SmallVector<int, 8> Mask(NumElems);
4716 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
4717 Mask[i * 2] = i + Half;
4718 Mask[i * 2 + 1] = i + NumElems + Half;
4720 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4723 /// Return a vector_shuffle of the specified vector of zero or undef vector.
4724 /// This produces a shuffle where the low element of V2 is swizzled into the
4725 /// zero/undef vector, landing at element Idx.
4726 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
4727 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
4729 const X86Subtarget &Subtarget,
4730 SelectionDAG &DAG) {
4731 MVT VT = V2.getSimpleValueType();
4733 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
4734 int NumElems = VT.getVectorNumElements();
4735 SmallVector<int, 16> MaskVec(NumElems);
4736 for (int i = 0; i != NumElems; ++i)
4737 // If this is the insertion idx, put the low elt of V2 here.
4738 MaskVec[i] = (i == Idx) ? NumElems : i;
4739 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
4742 static SDValue peekThroughBitcasts(SDValue V) {
4743 while (V.getNode() && V.getOpcode() == ISD::BITCAST)
4744 V = V.getOperand(0);
4748 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
4749 unsigned MaskEltSizeInBits,
4750 SmallVectorImpl<uint64_t> &RawMask) {
4751 MaskNode = peekThroughBitcasts(MaskNode);
4753 MVT VT = MaskNode.getSimpleValueType();
4754 assert(VT.isVector() && "Can't produce a non-vector with a build_vector!");
4756 // Split an APInt element into MaskEltSizeInBits sized pieces and
4757 // insert into the shuffle mask.
4758 auto SplitElementToMask = [&](APInt Element) {
4759 // Note that this is x86 and so always little endian: the low byte is
4760 // the first byte of the mask.
4761 int Split = VT.getScalarSizeInBits() / MaskEltSizeInBits;
4762 for (int i = 0; i < Split; ++i) {
4763 APInt RawElt = Element.getLoBits(MaskEltSizeInBits);
4764 Element = Element.lshr(MaskEltSizeInBits);
4765 RawMask.push_back(RawElt.getZExtValue());
4769 if (MaskNode.getOpcode() == X86ISD::VBROADCAST) {
4770 // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
4771 // TODO: Handle (VT.getScalarSizeInBits() % MaskEltSizeInBits) == 0
4772 if (VT.getScalarSizeInBits() != MaskEltSizeInBits)
4774 if (auto *CN = dyn_cast<ConstantSDNode>(MaskNode.getOperand(0))) {
4775 const APInt &MaskElement = CN->getAPIntValue();
4776 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
4777 APInt RawElt = MaskElement.getLoBits(MaskEltSizeInBits);
4778 RawMask.push_back(RawElt.getZExtValue());
4784 if (MaskNode.getOpcode() == X86ISD::VZEXT_MOVL &&
4785 MaskNode.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR) {
4787 // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
4788 if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0)
4790 unsigned ElementSplit = VT.getScalarSizeInBits() / MaskEltSizeInBits;
4792 SDValue MaskOp = MaskNode.getOperand(0).getOperand(0);
4793 if (auto *CN = dyn_cast<ConstantSDNode>(MaskOp)) {
4794 SplitElementToMask(CN->getAPIntValue());
4795 RawMask.append((VT.getVectorNumElements() - 1) * ElementSplit, 0);
4801 if (MaskNode.getOpcode() != ISD::BUILD_VECTOR)
4804 // We can always decode if the buildvector is all zero constants,
4805 // but can't use isBuildVectorAllZeros as it might contain UNDEFs.
4806 if (llvm::all_of(MaskNode->ops(), X86::isZeroNode)) {
4807 RawMask.append(VT.getSizeInBits() / MaskEltSizeInBits, 0);
4811 // TODO: Handle (MaskEltSizeInBits % VT.getScalarSizeInBits()) == 0
4812 if ((VT.getScalarSizeInBits() % MaskEltSizeInBits) != 0)
4815 for (SDValue Op : MaskNode->ops()) {
4816 if (auto *CN = dyn_cast<ConstantSDNode>(Op.getNode()))
4817 SplitElementToMask(CN->getAPIntValue());
4818 else if (auto *CFN = dyn_cast<ConstantFPSDNode>(Op.getNode()))
4819 SplitElementToMask(CFN->getValueAPF().bitcastToAPInt());
4827 static const Constant *getTargetShuffleMaskConstant(SDValue MaskNode) {
4828 MaskNode = peekThroughBitcasts(MaskNode);
4830 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
4834 SDValue Ptr = MaskLoad->getBasePtr();
4835 if (Ptr->getOpcode() == X86ISD::Wrapper ||
4836 Ptr->getOpcode() == X86ISD::WrapperRIP)
4837 Ptr = Ptr->getOperand(0);
4839 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
4840 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
4843 return dyn_cast<Constant>(MaskCP->getConstVal());
4846 /// Calculates the shuffle mask corresponding to the target-specific opcode.
4847 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
4848 /// operands in \p Ops, and returns true.
4849 /// Sets \p IsUnary to true if only one source is used. Note that this will set
4850 /// IsUnary for shuffles which use a single input multiple times, and in those
4851 /// cases it will adjust the mask to only have indices within that single input.
4852 /// It is an error to call this with non-empty Mask/Ops vectors.
4853 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
4854 SmallVectorImpl<SDValue> &Ops,
4855 SmallVectorImpl<int> &Mask, bool &IsUnary) {
4856 unsigned NumElems = VT.getVectorNumElements();
4859 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
4860 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
4863 bool IsFakeUnary = false;
4864 switch(N->getOpcode()) {
4865 case X86ISD::BLENDI:
4866 ImmN = N->getOperand(N->getNumOperands()-1);
4867 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4870 ImmN = N->getOperand(N->getNumOperands()-1);
4871 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4872 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4874 case X86ISD::INSERTPS:
4875 ImmN = N->getOperand(N->getNumOperands()-1);
4876 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4877 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4879 case X86ISD::UNPCKH:
4880 DecodeUNPCKHMask(VT, Mask);
4881 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4883 case X86ISD::UNPCKL:
4884 DecodeUNPCKLMask(VT, Mask);
4885 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4887 case X86ISD::MOVHLPS:
4888 DecodeMOVHLPSMask(NumElems, Mask);
4889 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4891 case X86ISD::MOVLHPS:
4892 DecodeMOVLHPSMask(NumElems, Mask);
4893 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4895 case X86ISD::PALIGNR:
4896 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
4897 ImmN = N->getOperand(N->getNumOperands()-1);
4898 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4900 case X86ISD::VSHLDQ:
4901 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
4902 ImmN = N->getOperand(N->getNumOperands() - 1);
4903 DecodePSLLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4906 case X86ISD::VSRLDQ:
4907 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
4908 ImmN = N->getOperand(N->getNumOperands() - 1);
4909 DecodePSRLDQMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4912 case X86ISD::PSHUFD:
4913 case X86ISD::VPERMILPI:
4914 ImmN = N->getOperand(N->getNumOperands()-1);
4915 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4918 case X86ISD::PSHUFHW:
4919 ImmN = N->getOperand(N->getNumOperands()-1);
4920 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4923 case X86ISD::PSHUFLW:
4924 ImmN = N->getOperand(N->getNumOperands()-1);
4925 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4928 case X86ISD::VZEXT_MOVL:
4929 DecodeZeroMoveLowMask(VT, Mask);
4932 case X86ISD::VBROADCAST: {
4933 // We only decode broadcasts of same-sized vectors at the moment.
4934 if (N->getOperand(0).getValueType() == VT) {
4935 DecodeVectorBroadcast(VT, Mask);
4941 case X86ISD::VPERMILPV: {
4943 SDValue MaskNode = N->getOperand(1);
4944 unsigned MaskEltSize = VT.getScalarSizeInBits();
4945 SmallVector<uint64_t, 32> RawMask;
4946 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
4947 DecodeVPERMILPMask(VT, RawMask, Mask);
4950 if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
4951 DecodeVPERMILPMask(C, MaskEltSize, Mask);
4956 case X86ISD::PSHUFB: {
4958 SDValue MaskNode = N->getOperand(1);
4959 SmallVector<uint64_t, 32> RawMask;
4960 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
4961 DecodePSHUFBMask(RawMask, Mask);
4964 if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
4965 DecodePSHUFBMask(C, Mask);
4970 case X86ISD::VPERMI:
4971 ImmN = N->getOperand(N->getNumOperands()-1);
4972 DecodeVPERMMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4977 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
4979 case X86ISD::VPERM2X128:
4980 ImmN = N->getOperand(N->getNumOperands()-1);
4981 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
4982 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
4984 case X86ISD::MOVSLDUP:
4985 DecodeMOVSLDUPMask(VT, Mask);
4988 case X86ISD::MOVSHDUP:
4989 DecodeMOVSHDUPMask(VT, Mask);
4992 case X86ISD::MOVDDUP:
4993 DecodeMOVDDUPMask(VT, Mask);
4996 case X86ISD::MOVLHPD:
4997 case X86ISD::MOVLPD:
4998 case X86ISD::MOVLPS:
4999 // Not yet implemented
5001 case X86ISD::VPERMIL2: {
5002 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5003 unsigned MaskEltSize = VT.getScalarSizeInBits();
5004 SDValue MaskNode = N->getOperand(2);
5005 SDValue CtrlNode = N->getOperand(3);
5006 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
5007 unsigned CtrlImm = CtrlOp->getZExtValue();
5008 SmallVector<uint64_t, 32> RawMask;
5009 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
5010 DecodeVPERMIL2PMask(VT, CtrlImm, RawMask, Mask);
5013 if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
5014 DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, Mask);
5020 case X86ISD::VPPERM: {
5021 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5022 SDValue MaskNode = N->getOperand(2);
5023 SmallVector<uint64_t, 32> RawMask;
5024 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
5025 DecodeVPPERMMask(RawMask, Mask);
5028 if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
5029 DecodeVPPERMMask(C, Mask);
5034 case X86ISD::VPERMV: {
5036 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
5037 Ops.push_back(N->getOperand(1));
5038 SDValue MaskNode = N->getOperand(0);
5039 SmallVector<uint64_t, 32> RawMask;
5040 unsigned MaskEltSize = VT.getScalarSizeInBits();
5041 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
5042 DecodeVPERMVMask(RawMask, Mask);
5045 if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
5046 DecodeVPERMVMask(C, VT, Mask);
5051 case X86ISD::VPERMV3: {
5052 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
5053 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
5054 Ops.push_back(N->getOperand(0));
5055 Ops.push_back(N->getOperand(2));
5056 SDValue MaskNode = N->getOperand(1);
5057 if (auto *C = getTargetShuffleMaskConstant(MaskNode)) {
5058 DecodeVPERMV3Mask(C, VT, Mask);
5063 default: llvm_unreachable("unknown target shuffle node");
5066 // Empty mask indicates the decode failed.
5070 // Check if we're getting a shuffle mask with zero'd elements.
5071 if (!AllowSentinelZero)
5072 if (llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
5075 // If we have a fake unary shuffle, the shuffle mask is spread across two
5076 // inputs that are actually the same node. Re-map the mask to always point
5077 // into the first input.
5080 if (M >= (int)Mask.size())
5083 // If we didn't already add operands in the opcode-specific code, default to
5084 // adding 1 or 2 operands starting at 0.
5086 Ops.push_back(N->getOperand(0));
5087 if (!IsUnary || IsFakeUnary)
5088 Ops.push_back(N->getOperand(1));
5094 /// Check a target shuffle mask's inputs to see if we can set any values to
5095 /// SM_SentinelZero - this is for elements that are known to be zero
5096 /// (not just zeroable) from their inputs.
5097 /// Returns true if the target shuffle mask was decoded.
5098 static bool setTargetShuffleZeroElements(SDValue N,
5099 SmallVectorImpl<int> &Mask,
5100 SmallVectorImpl<SDValue> &Ops) {
5102 if (!isTargetShuffle(N.getOpcode()))
5104 if (!getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), true, Ops,
5108 SDValue V1 = Ops[0];
5109 SDValue V2 = IsUnary ? V1 : Ops[1];
5111 V1 = peekThroughBitcasts(V1);
5112 V2 = peekThroughBitcasts(V2);
5114 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
5117 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
5121 // Determine shuffle input and normalize the mask.
5122 SDValue V = M < Size ? V1 : V2;
5125 // We are referencing an UNDEF input.
5127 Mask[i] = SM_SentinelUndef;
5131 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
5132 if (V.getOpcode() != ISD::BUILD_VECTOR)
5135 // If the BUILD_VECTOR has fewer elements then the (larger) source
5136 // element must be UNDEF/ZERO.
5137 // TODO: Is it worth testing the individual bits of a constant?
5138 if ((Size % V.getNumOperands()) == 0) {
5139 int Scale = Size / V->getNumOperands();
5140 SDValue Op = V.getOperand(M / Scale);
5142 Mask[i] = SM_SentinelUndef;
5143 else if (X86::isZeroNode(Op))
5144 Mask[i] = SM_SentinelZero;
5148 // If the BUILD_VECTOR has more elements then all the (smaller) source
5149 // elements must be all UNDEF or all ZERO.
5150 if ((V.getNumOperands() % Size) == 0) {
5151 int Scale = V->getNumOperands() / Size;
5152 bool AllUndef = true;
5153 bool AllZero = true;
5154 for (int j = 0; j < Scale; ++j) {
5155 SDValue Op = V.getOperand((M * Scale) + j);
5156 AllUndef &= Op.isUndef();
5157 AllZero &= X86::isZeroNode(Op);
5160 Mask[i] = SM_SentinelUndef;
5162 Mask[i] = SM_SentinelZero;
5170 /// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
5171 /// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
5172 /// remaining input indices in case we now have a unary shuffle and adjust the
5173 /// Op0/Op1 inputs accordingly.
5174 /// Returns true if the target shuffle mask was decoded.
5175 static bool resolveTargetShuffleInputs(SDValue Op, SDValue &Op0, SDValue &Op1,
5176 SmallVectorImpl<int> &Mask) {
5177 SmallVector<SDValue, 2> Ops;
5178 if (!setTargetShuffleZeroElements(Op, Mask, Ops))
5181 int NumElts = Mask.size();
5182 bool Op0InUse = std::any_of(Mask.begin(), Mask.end(), [NumElts](int Idx) {
5183 return 0 <= Idx && Idx < NumElts;
5185 bool Op1InUse = std::any_of(Mask.begin(), Mask.end(),
5186 [NumElts](int Idx) { return NumElts <= Idx; });
5188 Op0 = Op0InUse ? Ops[0] : SDValue();
5189 Op1 = Op1InUse ? Ops[1] : SDValue();
5191 // We're only using Op1 - commute the mask and inputs.
5192 if (!Op0InUse && Op1InUse) {
5203 /// Returns the scalar element that will make up the ith
5204 /// element of the result of the vector shuffle.
5205 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5208 return SDValue(); // Limit search depth.
5210 SDValue V = SDValue(N, 0);
5211 EVT VT = V.getValueType();
5212 unsigned Opcode = V.getOpcode();
5214 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5215 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5216 int Elt = SV->getMaskElt(Index);
5219 return DAG.getUNDEF(VT.getVectorElementType());
5221 unsigned NumElems = VT.getVectorNumElements();
5222 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5223 : SV->getOperand(1);
5224 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5227 // Recurse into target specific vector shuffles to find scalars.
5228 if (isTargetShuffle(Opcode)) {
5229 MVT ShufVT = V.getSimpleValueType();
5230 MVT ShufSVT = ShufVT.getVectorElementType();
5231 int NumElems = (int)ShufVT.getVectorNumElements();
5232 SmallVector<int, 16> ShuffleMask;
5233 SmallVector<SDValue, 16> ShuffleOps;
5236 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
5239 int Elt = ShuffleMask[Index];
5240 if (Elt == SM_SentinelZero)
5241 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
5242 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
5243 if (Elt == SM_SentinelUndef)
5244 return DAG.getUNDEF(ShufSVT);
5246 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
5247 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
5248 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5252 // Actual nodes that may contain scalar elements
5253 if (Opcode == ISD::BITCAST) {
5254 V = V.getOperand(0);
5255 EVT SrcVT = V.getValueType();
5256 unsigned NumElems = VT.getVectorNumElements();
5258 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5262 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5263 return (Index == 0) ? V.getOperand(0)
5264 : DAG.getUNDEF(VT.getVectorElementType());
5266 if (V.getOpcode() == ISD::BUILD_VECTOR)
5267 return V.getOperand(Index);
5272 /// Custom lower build_vector of v16i8.
5273 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5274 unsigned NumNonZero, unsigned NumZero,
5276 const X86Subtarget &Subtarget,
5277 const TargetLowering &TLI) {
5285 // SSE4.1 - use PINSRB to insert each byte directly.
5286 if (Subtarget.hasSSE41()) {
5287 for (unsigned i = 0; i < 16; ++i) {
5288 bool isNonZero = (NonZeros & (1 << i)) != 0;
5292 V = getZeroVector(MVT::v16i8, Subtarget, DAG, dl);
5294 V = DAG.getUNDEF(MVT::v16i8);
5297 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5298 MVT::v16i8, V, Op.getOperand(i),
5299 DAG.getIntPtrConstant(i, dl));
5306 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
5307 for (unsigned i = 0; i < 16; ++i) {
5308 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5309 if (ThisIsNonZero && First) {
5311 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5313 V = DAG.getUNDEF(MVT::v8i16);
5318 SDValue ThisElt, LastElt;
5319 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5320 if (LastIsNonZero) {
5321 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5322 MVT::i16, Op.getOperand(i-1));
5324 if (ThisIsNonZero) {
5325 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5326 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5327 ThisElt, DAG.getConstant(8, dl, MVT::i8));
5329 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5333 if (ThisElt.getNode())
5334 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5335 DAG.getIntPtrConstant(i/2, dl));
5339 return DAG.getBitcast(MVT::v16i8, V);
5342 /// Custom lower build_vector of v8i16.
5343 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5344 unsigned NumNonZero, unsigned NumZero,
5346 const X86Subtarget &Subtarget,
5347 const TargetLowering &TLI) {
5354 for (unsigned i = 0; i < 8; ++i) {
5355 bool isNonZero = (NonZeros & (1 << i)) != 0;
5359 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5361 V = DAG.getUNDEF(MVT::v8i16);
5364 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5365 MVT::v8i16, V, Op.getOperand(i),
5366 DAG.getIntPtrConstant(i, dl));
5373 /// Custom lower build_vector of v4i32 or v4f32.
5374 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5375 const X86Subtarget &Subtarget,
5376 const TargetLowering &TLI) {
5377 // Find all zeroable elements.
5378 std::bitset<4> Zeroable;
5379 for (int i=0; i < 4; ++i) {
5380 SDValue Elt = Op->getOperand(i);
5381 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
5383 assert(Zeroable.size() - Zeroable.count() > 1 &&
5384 "We expect at least two non-zero elements!");
5386 // We only know how to deal with build_vector nodes where elements are either
5387 // zeroable or extract_vector_elt with constant index.
5388 SDValue FirstNonZero;
5389 unsigned FirstNonZeroIdx;
5390 for (unsigned i=0; i < 4; ++i) {
5393 SDValue Elt = Op->getOperand(i);
5394 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5395 !isa<ConstantSDNode>(Elt.getOperand(1)))
5397 // Make sure that this node is extracting from a 128-bit vector.
5398 MVT VT = Elt.getOperand(0).getSimpleValueType();
5399 if (!VT.is128BitVector())
5401 if (!FirstNonZero.getNode()) {
5403 FirstNonZeroIdx = i;
5407 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5408 SDValue V1 = FirstNonZero.getOperand(0);
5409 MVT VT = V1.getSimpleValueType();
5411 // See if this build_vector can be lowered as a blend with zero.
5413 unsigned EltMaskIdx, EltIdx;
5415 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5416 if (Zeroable[EltIdx]) {
5417 // The zero vector will be on the right hand side.
5418 Mask[EltIdx] = EltIdx+4;
5422 Elt = Op->getOperand(EltIdx);
5423 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5424 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5425 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5427 Mask[EltIdx] = EltIdx;
5431 // Let the shuffle legalizer deal with blend operations.
5432 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5433 if (V1.getSimpleValueType() != VT)
5434 V1 = DAG.getBitcast(VT, V1);
5435 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask);
5438 // See if we can lower this build_vector to a INSERTPS.
5439 if (!Subtarget.hasSSE41())
5442 SDValue V2 = Elt.getOperand(0);
5443 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5446 bool CanFold = true;
5447 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5451 SDValue Current = Op->getOperand(i);
5452 SDValue SrcVector = Current->getOperand(0);
5455 CanFold = SrcVector == V1 &&
5456 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5462 assert(V1.getNode() && "Expected at least two non-zero elements!");
5463 if (V1.getSimpleValueType() != MVT::v4f32)
5464 V1 = DAG.getBitcast(MVT::v4f32, V1);
5465 if (V2.getSimpleValueType() != MVT::v4f32)
5466 V2 = DAG.getBitcast(MVT::v4f32, V2);
5468 // Ok, we can emit an INSERTPS instruction.
5469 unsigned ZMask = Zeroable.to_ulong();
5471 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5472 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5474 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
5475 DAG.getIntPtrConstant(InsertPSMask, DL));
5476 return DAG.getBitcast(VT, Result);
5479 /// Return a vector logical shift node.
5480 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
5481 SelectionDAG &DAG, const TargetLowering &TLI,
5483 assert(VT.is128BitVector() && "Unknown type for VShift");
5484 MVT ShVT = MVT::v16i8;
5485 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5486 SrcOp = DAG.getBitcast(ShVT, SrcOp);
5487 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(DAG.getDataLayout(), VT);
5488 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5489 SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, ScalarShiftTy);
5490 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5493 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
5494 SelectionDAG &DAG) {
5496 // Check if the scalar load can be widened into a vector load. And if
5497 // the address is "base + cst" see if the cst can be "absorbed" into
5498 // the shuffle mask.
5499 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5500 SDValue Ptr = LD->getBasePtr();
5501 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5503 EVT PVT = LD->getValueType(0);
5504 if (PVT != MVT::i32 && PVT != MVT::f32)
5509 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5510 FI = FINode->getIndex();
5512 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5513 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5514 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5515 Offset = Ptr.getConstantOperandVal(1);
5516 Ptr = Ptr.getOperand(0);
5521 // FIXME: 256-bit vector instructions don't require a strict alignment,
5522 // improve this code to support it better.
5523 unsigned RequiredAlign = VT.getSizeInBits()/8;
5524 SDValue Chain = LD->getChain();
5525 // Make sure the stack object alignment is at least 16 or 32.
5526 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5527 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5528 if (MFI->isFixedObjectIndex(FI)) {
5529 // Can't change the alignment. FIXME: It's possible to compute
5530 // the exact stack offset and reference FI + adjust offset instead.
5531 // If someone *really* cares about this. That's the way to implement it.
5534 MFI->setObjectAlignment(FI, RequiredAlign);
5538 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5539 // Ptr + (Offset & ~15).
5542 if ((Offset % RequiredAlign) & 3)
5544 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
5547 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
5548 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
5551 int EltNo = (Offset - StartOffset) >> 2;
5552 unsigned NumElems = VT.getVectorNumElements();
5554 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5555 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5556 LD->getPointerInfo().getWithOffset(StartOffset));
5558 SmallVector<int, 8> Mask(NumElems, EltNo);
5560 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
5566 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
5567 /// elements can be replaced by a single large load which has the same value as
5568 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
5570 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
5571 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
5572 SDLoc &DL, SelectionDAG &DAG,
5573 bool isAfterLegalize) {
5574 unsigned NumElems = Elts.size();
5576 int LastLoadedElt = -1;
5577 SmallBitVector LoadMask(NumElems, false);
5578 SmallBitVector ZeroMask(NumElems, false);
5579 SmallBitVector UndefMask(NumElems, false);
5581 // For each element in the initializer, see if we've found a load, zero or an
5583 for (unsigned i = 0; i < NumElems; ++i) {
5584 SDValue Elt = peekThroughBitcasts(Elts[i]);
5589 UndefMask[i] = true;
5590 else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
5592 else if (ISD::isNON_EXTLoad(Elt.getNode())) {
5595 // Each loaded element must be the correct fractional portion of the
5596 // requested vector load.
5597 if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
5602 assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&
5603 "Incomplete element masks");
5605 // Handle Special Cases - all undef or undef/zero.
5606 if (UndefMask.count() == NumElems)
5607 return DAG.getUNDEF(VT);
5609 // FIXME: Should we return this as a BUILD_VECTOR instead?
5610 if ((ZeroMask | UndefMask).count() == NumElems)
5611 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
5612 : DAG.getConstantFP(0.0, DL, VT);
5614 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5615 int FirstLoadedElt = LoadMask.find_first();
5616 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
5617 LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
5618 EVT LDBaseVT = EltBase.getValueType();
5620 // Consecutive loads can contain UNDEFS but not ZERO elements.
5621 // Consecutive loads with UNDEFs and ZEROs elements require a
5622 // an additional shuffle stage to clear the ZERO elements.
5623 bool IsConsecutiveLoad = true;
5624 bool IsConsecutiveLoadWithZeros = true;
5625 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
5627 SDValue Elt = peekThroughBitcasts(Elts[i]);
5628 LoadSDNode *LD = cast<LoadSDNode>(Elt);
5629 if (!DAG.areNonVolatileConsecutiveLoads(
5630 LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
5631 i - FirstLoadedElt)) {
5632 IsConsecutiveLoad = false;
5633 IsConsecutiveLoadWithZeros = false;
5636 } else if (ZeroMask[i]) {
5637 IsConsecutiveLoad = false;
5641 auto CreateLoad = [&DAG, &DL](EVT VT, LoadSDNode *LDBase) {
5642 auto MMOFlags = LDBase->getMemOperand()->getFlags();
5643 assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
5644 "Cannot merge volatile loads.");
5646 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
5647 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
5649 if (LDBase->hasAnyUseOfValue(1)) {
5651 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
5652 SDValue(NewLd.getNode(), 1));
5653 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
5654 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
5655 SDValue(NewLd.getNode(), 1));
5661 // LOAD - all consecutive load/undefs (must start/end with a load).
5662 // If we have found an entire vector of loads and undefs, then return a large
5663 // load of the entire vector width starting at the base pointer.
5664 // If the vector contains zeros, then attempt to shuffle those elements.
5665 if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
5666 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
5667 assert(LDBase && "Did not find base load for merging consecutive loads");
5668 EVT EltVT = LDBase->getValueType(0);
5669 // Ensure that the input vector size for the merged loads matches the
5670 // cumulative size of the input elements.
5671 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
5674 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
5677 if (IsConsecutiveLoad)
5678 return CreateLoad(VT, LDBase);
5680 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
5681 // vector and a zero vector to clear out the zero elements.
5682 if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) {
5683 SmallVector<int, 4> ClearMask(NumElems, -1);
5684 for (unsigned i = 0; i < NumElems; ++i) {
5686 ClearMask[i] = i + NumElems;
5687 else if (LoadMask[i])
5690 SDValue V = CreateLoad(VT, LDBase);
5691 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
5692 : DAG.getConstantFP(0.0, DL, VT);
5693 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
5698 (1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits();
5700 // VZEXT_LOAD - consecutive load/undefs followed by zeros/undefs.
5701 if (IsConsecutiveLoad && FirstLoadedElt == 0 && LoadSize == 64 &&
5702 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
5703 MVT VecSVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
5704 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 64);
5705 if (TLI.isTypeLegal(VecVT)) {
5706 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
5707 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
5709 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
5710 LDBase->getPointerInfo(),
5711 LDBase->getAlignment(),
5712 false/*isVolatile*/, true/*ReadMem*/,
5715 // Make sure the newly-created LOAD is in the same position as LDBase in
5716 // terms of dependency. We create a TokenFactor for LDBase and ResNode,
5717 // and update uses of LDBase's output chain to use the TokenFactor.
5718 if (LDBase->hasAnyUseOfValue(1)) {
5720 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, SDValue(LDBase, 1),
5721 SDValue(ResNode.getNode(), 1));
5722 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
5723 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
5724 SDValue(ResNode.getNode(), 1));
5727 return DAG.getBitcast(VT, ResNode);
5731 // VZEXT_MOVL - consecutive 32-bit load/undefs followed by zeros/undefs.
5732 if (IsConsecutiveLoad && FirstLoadedElt == 0 && LoadSize == 32 &&
5733 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
5734 MVT VecSVT = VT.isFloatingPoint() ? MVT::f32 : MVT::i32;
5735 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / 32);
5736 if (TLI.isTypeLegal(VecVT)) {
5737 SDValue V = LastLoadedElt != 0 ? CreateLoad(VecSVT, LDBase)
5738 : DAG.getBitcast(VecSVT, EltBase);
5739 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, V);
5740 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, V);
5741 return DAG.getBitcast(VT, V);
5748 /// Attempt to use the vbroadcast instruction to generate a splat value for the
5749 /// following cases:
5750 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
5751 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
5752 /// a scalar load, or a constant.
5753 /// The VBROADCAST node is returned when a pattern is found,
5754 /// or SDValue() otherwise.
5755 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget &Subtarget,
5756 SelectionDAG &DAG) {
5757 // VBROADCAST requires AVX.
5758 // TODO: Splats could be generated for non-AVX CPUs using SSE
5759 // instructions, but there's less potential gain for only 128-bit vectors.
5760 if (!Subtarget.hasAVX())
5763 MVT VT = Op.getSimpleValueType();
5766 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5767 "Unsupported vector type for broadcast.");
5772 switch (Op.getOpcode()) {
5774 // Unknown pattern found.
5777 case ISD::BUILD_VECTOR: {
5778 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
5779 BitVector UndefElements;
5780 SDValue Splat = BVOp->getSplatValue(&UndefElements);
5782 // We need a splat of a single value to use broadcast, and it doesn't
5783 // make any sense if the value is only in one element of the vector.
5784 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
5788 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
5789 Ld.getOpcode() == ISD::ConstantFP);
5791 // Make sure that all of the users of a non-constant load are from the
5792 // BUILD_VECTOR node.
5793 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
5798 case ISD::VECTOR_SHUFFLE: {
5799 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
5801 // Shuffles must have a splat mask where the first element is
5803 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
5806 SDValue Sc = Op.getOperand(0);
5807 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
5808 Sc.getOpcode() != ISD::BUILD_VECTOR) {
5810 if (!Subtarget.hasInt256())
5813 // Use the register form of the broadcast instruction available on AVX2.
5814 if (VT.getSizeInBits() >= 256)
5815 Sc = extract128BitVector(Sc, 0, DAG, dl);
5816 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
5819 Ld = Sc.getOperand(0);
5820 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
5821 Ld.getOpcode() == ISD::ConstantFP);
5823 // The scalar_to_vector node and the suspected
5824 // load node must have exactly one user.
5825 // Constants may have multiple users.
5827 // AVX-512 has register version of the broadcast
5828 bool hasRegVer = Subtarget.hasAVX512() && VT.is512BitVector() &&
5829 Ld.getValueType().getSizeInBits() >= 32;
5830 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
5837 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
5838 bool IsGE256 = (VT.getSizeInBits() >= 256);
5840 // When optimizing for size, generate up to 5 extra bytes for a broadcast
5841 // instruction to save 8 or more bytes of constant pool data.
5842 // TODO: If multiple splats are generated to load the same constant,
5843 // it may be detrimental to overall size. There needs to be a way to detect
5844 // that condition to know if this is truly a size win.
5845 bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
5847 // Handle broadcasting a single constant scalar from the constant pool
5849 // On Sandybridge (no AVX2), it is still better to load a constant vector
5850 // from the constant pool and not to broadcast it from a scalar.
5851 // But override that restriction when optimizing for size.
5852 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
5853 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
5854 EVT CVT = Ld.getValueType();
5855 assert(!CVT.isVector() && "Must not broadcast a vector type");
5857 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
5858 // For size optimization, also splat v2f64 and v2i64, and for size opt
5859 // with AVX2, also splat i8 and i16.
5860 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
5861 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
5862 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
5863 const Constant *C = nullptr;
5864 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
5865 C = CI->getConstantIntValue();
5866 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
5867 C = CF->getConstantFPValue();
5869 assert(C && "Invalid constant type");
5871 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5873 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
5874 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
5876 CVT, dl, DAG.getEntryNode(), CP,
5877 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
5880 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5884 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
5886 // Handle AVX2 in-register broadcasts.
5887 if (!IsLoad && Subtarget.hasInt256() &&
5888 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
5889 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5891 // The scalar source must be a normal load.
5895 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
5896 (Subtarget.hasVLX() && ScalarSize == 64))
5897 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5899 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
5900 // double since there is no vbroadcastsd xmm
5901 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
5902 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
5903 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
5906 // Unsupported broadcast.
5910 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
5911 /// underlying vector and index.
5913 /// Modifies \p ExtractedFromVec to the real vector and returns the real
5915 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
5917 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
5918 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
5921 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
5923 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
5925 // (extract_vector_elt (vector_shuffle<2,u,u,u>
5926 // (extract_subvector (v8f32 %vreg0), Constant<4>),
5929 // In this case the vector is the extract_subvector expression and the index
5930 // is 2, as specified by the shuffle.
5931 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
5932 SDValue ShuffleVec = SVOp->getOperand(0);
5933 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
5934 assert(ShuffleVecVT.getVectorElementType() ==
5935 ExtractedFromVec.getSimpleValueType().getVectorElementType());
5937 int ShuffleIdx = SVOp->getMaskElt(Idx);
5938 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
5939 ExtractedFromVec = ShuffleVec;
5945 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
5946 MVT VT = Op.getSimpleValueType();
5948 // Skip if insert_vec_elt is not supported.
5949 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5950 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
5954 unsigned NumElems = Op.getNumOperands();
5958 SmallVector<unsigned, 4> InsertIndices;
5959 SmallVector<int, 8> Mask(NumElems, -1);
5961 for (unsigned i = 0; i != NumElems; ++i) {
5962 unsigned Opc = Op.getOperand(i).getOpcode();
5964 if (Opc == ISD::UNDEF)
5967 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
5968 // Quit if more than 1 elements need inserting.
5969 if (InsertIndices.size() > 1)
5972 InsertIndices.push_back(i);
5976 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
5977 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
5978 // Quit if non-constant index.
5979 if (!isa<ConstantSDNode>(ExtIdx))
5981 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
5983 // Quit if extracted from vector of different type.
5984 if (ExtractedFromVec.getValueType() != VT)
5987 if (!VecIn1.getNode())
5988 VecIn1 = ExtractedFromVec;
5989 else if (VecIn1 != ExtractedFromVec) {
5990 if (!VecIn2.getNode())
5991 VecIn2 = ExtractedFromVec;
5992 else if (VecIn2 != ExtractedFromVec)
5993 // Quit if more than 2 vectors to shuffle
5997 if (ExtractedFromVec == VecIn1)
5999 else if (ExtractedFromVec == VecIn2)
6000 Mask[i] = Idx + NumElems;
6003 if (!VecIn1.getNode())
6006 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6007 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
6008 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6009 unsigned Idx = InsertIndices[i];
6010 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6011 DAG.getIntPtrConstant(Idx, DL));
6017 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
6018 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
6019 Op.getScalarValueSizeInBits() == 1 &&
6020 "Can not convert non-constant vector");
6021 uint64_t Immediate = 0;
6022 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6023 SDValue In = Op.getOperand(idx);
6025 Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
6029 MVT::getIntegerVT(std::max((int)Op.getValueType().getSizeInBits(), 8));
6030 return DAG.getConstant(Immediate, dl, VT);
6032 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6034 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6036 MVT VT = Op.getSimpleValueType();
6037 assert((VT.getVectorElementType() == MVT::i1) &&
6038 "Unexpected type in LowerBUILD_VECTORvXi1!");
6041 if (ISD::isBuildVectorAllZeros(Op.getNode()))
6042 return DAG.getTargetConstant(0, dl, VT);
6044 if (ISD::isBuildVectorAllOnes(Op.getNode()))
6045 return DAG.getTargetConstant(1, dl, VT);
6047 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6048 SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
6049 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
6050 return DAG.getBitcast(VT, Imm);
6051 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
6052 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
6053 DAG.getIntPtrConstant(0, dl));
6056 // Vector has one or more non-const elements
6057 uint64_t Immediate = 0;
6058 SmallVector<unsigned, 16> NonConstIdx;
6059 bool IsSplat = true;
6060 bool HasConstElts = false;
6062 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6063 SDValue In = Op.getOperand(idx);
6066 if (!isa<ConstantSDNode>(In))
6067 NonConstIdx.push_back(idx);
6069 Immediate |= cast<ConstantSDNode>(In)->getZExtValue() << idx;
6070 HasConstElts = true;
6074 else if (In != Op.getOperand(SplatIdx))
6078 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
6080 return DAG.getNode(ISD::SELECT, dl, VT, Op.getOperand(SplatIdx),
6081 DAG.getConstant(1, dl, VT),
6082 DAG.getConstant(0, dl, VT));
6084 // insert elements one by one
6088 MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
6089 Imm = DAG.getConstant(Immediate, dl, ImmVT);
6091 else if (HasConstElts)
6092 Imm = DAG.getConstant(0, dl, VT);
6094 Imm = DAG.getUNDEF(VT);
6095 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
6096 DstVec = DAG.getBitcast(VT, Imm);
6098 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
6099 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
6100 DAG.getIntPtrConstant(0, dl));
6103 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
6104 unsigned InsertIdx = NonConstIdx[i];
6105 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6106 Op.getOperand(InsertIdx),
6107 DAG.getIntPtrConstant(InsertIdx, dl));
6112 /// \brief Return true if \p N implements a horizontal binop and return the
6113 /// operands for the horizontal binop into V0 and V1.
6115 /// This is a helper function of LowerToHorizontalOp().
6116 /// This function checks that the build_vector \p N in input implements a
6117 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6118 /// operation to match.
6119 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6120 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6121 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6124 /// This function only analyzes elements of \p N whose indices are
6125 /// in range [BaseIdx, LastIdx).
6126 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6128 unsigned BaseIdx, unsigned LastIdx,
6129 SDValue &V0, SDValue &V1) {
6130 EVT VT = N->getValueType(0);
6132 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6133 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6134 "Invalid Vector in input!");
6136 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6137 bool CanFold = true;
6138 unsigned ExpectedVExtractIdx = BaseIdx;
6139 unsigned NumElts = LastIdx - BaseIdx;
6140 V0 = DAG.getUNDEF(VT);
6141 V1 = DAG.getUNDEF(VT);
6143 // Check if N implements a horizontal binop.
6144 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6145 SDValue Op = N->getOperand(i + BaseIdx);
6148 if (Op->isUndef()) {
6149 // Update the expected vector extract index.
6150 if (i * 2 == NumElts)
6151 ExpectedVExtractIdx = BaseIdx;
6152 ExpectedVExtractIdx += 2;
6156 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6161 SDValue Op0 = Op.getOperand(0);
6162 SDValue Op1 = Op.getOperand(1);
6164 // Try to match the following pattern:
6165 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6166 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6167 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6168 Op0.getOperand(0) == Op1.getOperand(0) &&
6169 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6170 isa<ConstantSDNode>(Op1.getOperand(1)));
6174 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6175 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6177 if (i * 2 < NumElts) {
6179 V0 = Op0.getOperand(0);
6180 if (V0.getValueType() != VT)
6185 V1 = Op0.getOperand(0);
6186 if (V1.getValueType() != VT)
6189 if (i * 2 == NumElts)
6190 ExpectedVExtractIdx = BaseIdx;
6193 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6194 if (I0 == ExpectedVExtractIdx)
6195 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6196 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6197 // Try to match the following dag sequence:
6198 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6199 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6203 ExpectedVExtractIdx += 2;
6209 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6210 /// a concat_vector.
6212 /// This is a helper function of LowerToHorizontalOp().
6213 /// This function expects two 256-bit vectors called V0 and V1.
6214 /// At first, each vector is split into two separate 128-bit vectors.
6215 /// Then, the resulting 128-bit vectors are used to implement two
6216 /// horizontal binary operations.
6218 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6220 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6221 /// the two new horizontal binop.
6222 /// When Mode is set, the first horizontal binop dag node would take as input
6223 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6224 /// horizontal binop dag node would take as input the lower 128-bit of V1
6225 /// and the upper 128-bit of V1.
6227 /// HADD V0_LO, V0_HI
6228 /// HADD V1_LO, V1_HI
6230 /// Otherwise, the first horizontal binop dag node takes as input the lower
6231 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6232 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
6234 /// HADD V0_LO, V1_LO
6235 /// HADD V0_HI, V1_HI
6237 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6238 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6239 /// the upper 128-bits of the result.
6240 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6241 const SDLoc &DL, SelectionDAG &DAG,
6242 unsigned X86Opcode, bool Mode,
6243 bool isUndefLO, bool isUndefHI) {
6244 MVT VT = V0.getSimpleValueType();
6245 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
6246 "Invalid nodes in input!");
6248 unsigned NumElts = VT.getVectorNumElements();
6249 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
6250 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
6251 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
6252 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
6253 MVT NewVT = V0_LO.getSimpleValueType();
6255 SDValue LO = DAG.getUNDEF(NewVT);
6256 SDValue HI = DAG.getUNDEF(NewVT);
6259 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6260 if (!isUndefLO && !V0->isUndef())
6261 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6262 if (!isUndefHI && !V1->isUndef())
6263 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6265 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6266 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
6267 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6269 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
6270 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6273 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6276 /// Try to fold a build_vector that performs an 'addsub' to an X86ISD::ADDSUB
6278 static SDValue LowerToAddSub(const BuildVectorSDNode *BV,
6279 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
6280 MVT VT = BV->getSimpleValueType(0);
6281 if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
6282 (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
6286 unsigned NumElts = VT.getVectorNumElements();
6287 SDValue InVec0 = DAG.getUNDEF(VT);
6288 SDValue InVec1 = DAG.getUNDEF(VT);
6290 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6291 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6293 // Odd-numbered elements in the input build vector are obtained from
6294 // adding two integer/float elements.
6295 // Even-numbered elements in the input build vector are obtained from
6296 // subtracting two integer/float elements.
6297 unsigned ExpectedOpcode = ISD::FSUB;
6298 unsigned NextExpectedOpcode = ISD::FADD;
6299 bool AddFound = false;
6300 bool SubFound = false;
6302 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6303 SDValue Op = BV->getOperand(i);
6305 // Skip 'undef' values.
6306 unsigned Opcode = Op.getOpcode();
6307 if (Opcode == ISD::UNDEF) {
6308 std::swap(ExpectedOpcode, NextExpectedOpcode);
6312 // Early exit if we found an unexpected opcode.
6313 if (Opcode != ExpectedOpcode)
6316 SDValue Op0 = Op.getOperand(0);
6317 SDValue Op1 = Op.getOperand(1);
6319 // Try to match the following pattern:
6320 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6321 // Early exit if we cannot match that sequence.
6322 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6323 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6324 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6325 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6326 Op0.getOperand(1) != Op1.getOperand(1))
6329 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6333 // We found a valid add/sub node. Update the information accordingly.
6339 // Update InVec0 and InVec1.
6340 if (InVec0.isUndef()) {
6341 InVec0 = Op0.getOperand(0);
6342 if (InVec0.getSimpleValueType() != VT)
6345 if (InVec1.isUndef()) {
6346 InVec1 = Op1.getOperand(0);
6347 if (InVec1.getSimpleValueType() != VT)
6351 // Make sure that operands in input to each add/sub node always
6352 // come from a same pair of vectors.
6353 if (InVec0 != Op0.getOperand(0)) {
6354 if (ExpectedOpcode == ISD::FSUB)
6357 // FADD is commutable. Try to commute the operands
6358 // and then test again.
6359 std::swap(Op0, Op1);
6360 if (InVec0 != Op0.getOperand(0))
6364 if (InVec1 != Op1.getOperand(0))
6367 // Update the pair of expected opcodes.
6368 std::swap(ExpectedOpcode, NextExpectedOpcode);
6371 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6372 if (AddFound && SubFound && !InVec0.isUndef() && !InVec1.isUndef())
6373 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6378 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
6379 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
6380 const X86Subtarget &Subtarget,
6381 SelectionDAG &DAG) {
6382 MVT VT = BV->getSimpleValueType(0);
6383 unsigned NumElts = VT.getVectorNumElements();
6384 unsigned NumUndefsLO = 0;
6385 unsigned NumUndefsHI = 0;
6386 unsigned Half = NumElts/2;
6388 // Count the number of UNDEF operands in the build_vector in input.
6389 for (unsigned i = 0, e = Half; i != e; ++i)
6390 if (BV->getOperand(i)->isUndef())
6393 for (unsigned i = Half, e = NumElts; i != e; ++i)
6394 if (BV->getOperand(i)->isUndef())
6397 // Early exit if this is either a build_vector of all UNDEFs or all the
6398 // operands but one are UNDEF.
6399 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6403 SDValue InVec0, InVec1;
6404 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) {
6405 // Try to match an SSE3 float HADD/HSUB.
6406 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6407 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6409 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6410 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6411 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) {
6412 // Try to match an SSSE3 integer HADD/HSUB.
6413 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6414 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6416 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6417 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6420 if (!Subtarget.hasAVX())
6423 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6424 // Try to match an AVX horizontal add/sub of packed single/double
6425 // precision floating point values from 256-bit vectors.
6426 SDValue InVec2, InVec3;
6427 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6428 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6429 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
6430 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
6431 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6433 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6434 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6435 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
6436 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
6437 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6438 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6439 // Try to match an AVX2 horizontal add/sub of signed integers.
6440 SDValue InVec2, InVec3;
6442 bool CanFold = true;
6444 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6445 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6446 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
6447 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
6448 X86Opcode = X86ISD::HADD;
6449 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6450 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6451 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
6452 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
6453 X86Opcode = X86ISD::HSUB;
6458 // Fold this build_vector into a single horizontal add/sub.
6459 // Do this only if the target has AVX2.
6460 if (Subtarget.hasAVX2())
6461 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6463 // Do not try to expand this build_vector into a pair of horizontal
6464 // add/sub if we can emit a pair of scalar add/sub.
6465 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6468 // Convert this build_vector into a pair of horizontal binop followed by
6470 bool isUndefLO = NumUndefsLO == Half;
6471 bool isUndefHI = NumUndefsHI == Half;
6472 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6473 isUndefLO, isUndefHI);
6477 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6478 VT == MVT::v16i16) && Subtarget.hasAVX()) {
6480 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6481 X86Opcode = X86ISD::HADD;
6482 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6483 X86Opcode = X86ISD::HSUB;
6484 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6485 X86Opcode = X86ISD::FHADD;
6486 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6487 X86Opcode = X86ISD::FHSUB;
6491 // Don't try to expand this build_vector into a pair of horizontal add/sub
6492 // if we can simply emit a pair of scalar add/sub.
6493 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6496 // Convert this build_vector into two horizontal add/sub followed by
6498 bool isUndefLO = NumUndefsLO == Half;
6499 bool isUndefHI = NumUndefsHI == Half;
6500 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6501 isUndefLO, isUndefHI);
6507 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
6508 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
6509 /// just apply the bit to the vectors.
6510 /// NOTE: Its not in our interest to start make a general purpose vectorizer
6511 /// from this, but enough scalar bit operations are created from the later
6512 /// legalization + scalarization stages to need basic support.
6513 static SDValue lowerBuildVectorToBitOp(SDValue Op, SelectionDAG &DAG) {
6515 MVT VT = Op.getSimpleValueType();
6516 unsigned NumElems = VT.getVectorNumElements();
6517 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6519 // Check that all elements have the same opcode.
6520 // TODO: Should we allow UNDEFS and if so how many?
6521 unsigned Opcode = Op.getOperand(0).getOpcode();
6522 for (unsigned i = 1; i < NumElems; ++i)
6523 if (Opcode != Op.getOperand(i).getOpcode())
6526 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
6533 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
6538 SmallVector<SDValue, 4> LHSElts, RHSElts;
6539 for (SDValue Elt : Op->ops()) {
6540 SDValue LHS = Elt.getOperand(0);
6541 SDValue RHS = Elt.getOperand(1);
6543 // We expect the canonicalized RHS operand to be the constant.
6544 if (!isa<ConstantSDNode>(RHS))
6546 LHSElts.push_back(LHS);
6547 RHSElts.push_back(RHS);
6550 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
6551 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
6552 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
6555 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
6556 /// functionality to do this, so it's all zeros, all ones, or some derivation
6557 /// that is cheap to calculate.
6558 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
6559 const X86Subtarget &Subtarget) {
6561 MVT VT = Op.getSimpleValueType();
6563 // Vectors containing all zeros can be matched by pxor and xorps.
6564 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6565 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6566 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6567 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6570 return getZeroVector(VT, Subtarget, DAG, DL);
6573 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6574 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6575 // vpcmpeqd on 256-bit vectors.
6576 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6577 if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
6578 (VT == MVT::v8i32 && Subtarget.hasInt256()))
6581 return getOnesVector(VT, Subtarget, DAG, DL);
6588 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6591 MVT VT = Op.getSimpleValueType();
6592 MVT ExtVT = VT.getVectorElementType();
6593 unsigned NumElems = Op.getNumOperands();
6595 // Generate vectors for predicate vectors.
6596 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
6597 return LowerBUILD_VECTORvXi1(Op, DAG);
6599 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
6600 return VectorConstant;
6602 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
6603 if (SDValue AddSub = LowerToAddSub(BV, Subtarget, DAG))
6605 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
6606 return HorizontalOp;
6607 if (SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG))
6609 if (SDValue BitOp = lowerBuildVectorToBitOp(Op, DAG))
6612 unsigned EVTBits = ExtVT.getSizeInBits();
6614 unsigned NumZero = 0;
6615 unsigned NumNonZero = 0;
6616 uint64_t NonZeros = 0;
6617 bool IsAllConstants = true;
6618 SmallSet<SDValue, 8> Values;
6619 for (unsigned i = 0; i < NumElems; ++i) {
6620 SDValue Elt = Op.getOperand(i);
6624 if (Elt.getOpcode() != ISD::Constant &&
6625 Elt.getOpcode() != ISD::ConstantFP)
6626 IsAllConstants = false;
6627 if (X86::isZeroNode(Elt))
6630 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
6631 NonZeros |= ((uint64_t)1 << i);
6636 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6637 if (NumNonZero == 0)
6638 return DAG.getUNDEF(VT);
6640 // Special case for single non-zero, non-undef, element.
6641 if (NumNonZero == 1) {
6642 unsigned Idx = countTrailingZeros(NonZeros);
6643 SDValue Item = Op.getOperand(Idx);
6645 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6646 // the value are obviously zero, truncate the value to i32 and do the
6647 // insertion that way. Only do this if the value is non-constant or if the
6648 // value is a constant being inserted into element 0. It is cheaper to do
6649 // a constant pool load than it is to do a movd + shuffle.
6650 if (ExtVT == MVT::i64 && !Subtarget.is64Bit() &&
6651 (!IsAllConstants || Idx == 0)) {
6652 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6654 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6655 MVT VecVT = MVT::v4i32;
6657 // Truncate the value (which may itself be a constant) to i32, and
6658 // convert it to a vector with movd (S2V+shuffle to zero extend).
6659 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6660 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6661 return DAG.getBitcast(VT, getShuffleVectorZeroOrUndef(
6662 Item, Idx * 2, true, Subtarget, DAG));
6666 // If we have a constant or non-constant insertion into the low element of
6667 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6668 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6669 // depending on what the source datatype is.
6672 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6674 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6675 (ExtVT == MVT::i64 && Subtarget.is64Bit())) {
6676 if (VT.is512BitVector()) {
6677 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6678 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6679 Item, DAG.getIntPtrConstant(0, dl));
6681 assert((VT.is128BitVector() || VT.is256BitVector()) &&
6682 "Expected an SSE value type!");
6683 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6684 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6685 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6688 // We can't directly insert an i8 or i16 into a vector, so zero extend
6690 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
6691 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
6692 if (VT.getSizeInBits() >= 256) {
6693 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
6694 if (Subtarget.hasAVX()) {
6695 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
6696 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6698 // Without AVX, we need to extend to a 128-bit vector and then
6699 // insert into the 256-bit vector.
6700 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6701 SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
6702 Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
6705 assert(VT.is128BitVector() && "Expected an SSE value type!");
6706 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6707 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6709 return DAG.getBitcast(VT, Item);
6713 // Is it a vector logical left shift?
6714 if (NumElems == 2 && Idx == 1 &&
6715 X86::isZeroNode(Op.getOperand(0)) &&
6716 !X86::isZeroNode(Op.getOperand(1))) {
6717 unsigned NumBits = VT.getSizeInBits();
6718 return getVShift(true, VT,
6719 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
6720 VT, Op.getOperand(1)),
6721 NumBits/2, DAG, *this, dl);
6724 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
6727 // Otherwise, if this is a vector with i32 or f32 elements, and the element
6728 // is a non-constant being inserted into an element other than the low one,
6729 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
6730 // movd/movss) to move this into the low element, then shuffle it into
6732 if (EVTBits == 32) {
6733 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6734 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
6738 // Splat is obviously ok. Let legalizer expand it to a shuffle.
6739 if (Values.size() == 1) {
6740 if (EVTBits == 32) {
6741 // Instead of a shuffle like this:
6742 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
6743 // Check if it's possible to issue this instead.
6744 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
6745 unsigned Idx = countTrailingZeros(NonZeros);
6746 SDValue Item = Op.getOperand(Idx);
6747 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
6748 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
6753 // A vector full of immediates; various special cases are already
6754 // handled, so this is best done with a single constant-pool load.
6758 // See if we can use a vector load to get all of the elements.
6759 if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) {
6760 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
6761 if (SDValue LD = EltsFromConsecutiveLoads(VT, Ops, dl, DAG, false))
6765 // For AVX-length vectors, build the individual 128-bit pieces and use
6766 // shuffles to put them in place.
6767 if (VT.is256BitVector() || VT.is512BitVector()) {
6768 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
6770 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
6772 // Build both the lower and upper subvector.
6774 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElems / 2));
6775 SDValue Upper = DAG.getBuildVector(
6776 HVT, dl, makeArrayRef(&Ops[NumElems / 2], NumElems / 2));
6778 // Recreate the wider vector with the lower and upper part.
6779 if (VT.is256BitVector())
6780 return concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
6781 return concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
6784 // Let legalizer expand 2-wide build_vectors.
6785 if (EVTBits == 64) {
6786 if (NumNonZero == 1) {
6787 // One half is zero or undef.
6788 unsigned Idx = countTrailingZeros(NonZeros);
6789 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
6790 Op.getOperand(Idx));
6791 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
6796 // If element VT is < 32 bits, convert it to inserts into a zero vector.
6797 if (EVTBits == 8 && NumElems == 16)
6798 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
6799 DAG, Subtarget, *this))
6802 if (EVTBits == 16 && NumElems == 8)
6803 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
6804 DAG, Subtarget, *this))
6807 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
6808 if (EVTBits == 32 && NumElems == 4)
6809 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this))
6812 // If element VT is == 32 bits, turn it into a number of shuffles.
6813 if (NumElems == 4 && NumZero > 0) {
6814 SmallVector<SDValue, 8> Ops(NumElems);
6815 for (unsigned i = 0; i < 4; ++i) {
6816 bool isZero = !(NonZeros & (1ULL << i));
6818 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
6820 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
6823 for (unsigned i = 0; i < 2; ++i) {
6824 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
6827 Ops[i] = Ops[i*2]; // Must be a zero vector.
6830 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
6833 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
6836 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
6841 bool Reverse1 = (NonZeros & 0x3) == 2;
6842 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
6846 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
6847 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
6849 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
6852 if (Values.size() > 1 && VT.is128BitVector()) {
6853 // Check for a build vector from mostly shuffle plus few inserting.
6854 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
6857 // For SSE 4.1, use insertps to put the high elements into the low element.
6858 if (Subtarget.hasSSE41()) {
6860 if (!Op.getOperand(0).isUndef())
6861 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
6863 Result = DAG.getUNDEF(VT);
6865 for (unsigned i = 1; i < NumElems; ++i) {
6866 if (Op.getOperand(i).isUndef()) continue;
6867 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
6868 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
6873 // Otherwise, expand into a number of unpckl*, start by extending each of
6874 // our (non-undef) elements to the full vector width with the element in the
6875 // bottom slot of the vector (which generates no code for SSE).
6876 SmallVector<SDValue, 8> Ops(NumElems);
6877 for (unsigned i = 0; i < NumElems; ++i) {
6878 if (!Op.getOperand(i).isUndef())
6879 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
6881 Ops[i] = DAG.getUNDEF(VT);
6884 // Next, we iteratively mix elements, e.g. for v4f32:
6885 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
6886 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
6887 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
6888 unsigned EltStride = NumElems >> 1;
6889 while (EltStride != 0) {
6890 for (unsigned i = 0; i < EltStride; ++i) {
6891 // If Ops[i+EltStride] is undef and this is the first round of mixing,
6892 // then it is safe to just drop this shuffle: V[i] is already in the
6893 // right place, the one element (since it's the first round) being
6894 // inserted as undef can be dropped. This isn't safe for successive
6895 // rounds because they will permute elements within both vectors.
6896 if (Ops[i+EltStride].isUndef() &&
6897 EltStride == NumElems/2)
6900 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i], Ops[i + EltStride]);
6909 // 256-bit AVX can use the vinsertf128 instruction
6910 // to create 256-bit vectors from two other 128-bit ones.
6911 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
6913 MVT ResVT = Op.getSimpleValueType();
6915 assert((ResVT.is256BitVector() ||
6916 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
6918 SDValue V1 = Op.getOperand(0);
6919 SDValue V2 = Op.getOperand(1);
6920 unsigned NumElems = ResVT.getVectorNumElements();
6921 if (ResVT.is256BitVector())
6922 return concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
6924 if (Op.getNumOperands() == 4) {
6925 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
6926 ResVT.getVectorNumElements()/2);
6927 SDValue V3 = Op.getOperand(2);
6928 SDValue V4 = Op.getOperand(3);
6929 return concat256BitVectors(
6930 concat128BitVectors(V1, V2, HalfVT, NumElems / 2, DAG, dl),
6931 concat128BitVectors(V3, V4, HalfVT, NumElems / 2, DAG, dl), ResVT,
6934 return concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
6937 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
6938 const X86Subtarget &Subtarget,
6939 SelectionDAG & DAG) {
6941 MVT ResVT = Op.getSimpleValueType();
6942 unsigned NumOfOperands = Op.getNumOperands();
6944 assert(isPowerOf2_32(NumOfOperands) &&
6945 "Unexpected number of operands in CONCAT_VECTORS");
6947 SDValue Undef = DAG.getUNDEF(ResVT);
6948 if (NumOfOperands > 2) {
6949 // Specialize the cases when all, or all but one, of the operands are undef.
6950 unsigned NumOfDefinedOps = 0;
6952 for (unsigned i = 0; i < NumOfOperands; i++)
6953 if (!Op.getOperand(i).isUndef()) {
6957 if (NumOfDefinedOps == 0)
6959 if (NumOfDefinedOps == 1) {
6960 unsigned SubVecNumElts =
6961 Op.getOperand(OpIdx).getValueType().getVectorNumElements();
6962 SDValue IdxVal = DAG.getIntPtrConstant(SubVecNumElts * OpIdx, dl);
6963 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef,
6964 Op.getOperand(OpIdx), IdxVal);
6967 MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
6968 ResVT.getVectorNumElements()/2);
6969 SmallVector<SDValue, 2> Ops;
6970 for (unsigned i = 0; i < NumOfOperands/2; i++)
6971 Ops.push_back(Op.getOperand(i));
6972 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, Ops);
6974 for (unsigned i = NumOfOperands/2; i < NumOfOperands; i++)
6975 Ops.push_back(Op.getOperand(i));
6976 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, Ops);
6977 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
6981 SDValue V1 = Op.getOperand(0);
6982 SDValue V2 = Op.getOperand(1);
6983 unsigned NumElems = ResVT.getVectorNumElements();
6984 assert(V1.getValueType() == V2.getValueType() &&
6985 V1.getValueType().getVectorNumElements() == NumElems/2 &&
6986 "Unexpected operands in CONCAT_VECTORS");
6988 if (ResVT.getSizeInBits() >= 16)
6989 return Op; // The operation is legal with KUNPCK
6991 bool IsZeroV1 = ISD::isBuildVectorAllZeros(V1.getNode());
6992 bool IsZeroV2 = ISD::isBuildVectorAllZeros(V2.getNode());
6993 SDValue ZeroVec = getZeroVector(ResVT, Subtarget, DAG, dl);
6994 if (IsZeroV1 && IsZeroV2)
6997 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6999 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V1, ZeroIdx);
7001 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ZeroVec, V1, ZeroIdx);
7003 SDValue IdxVal = DAG.getIntPtrConstant(NumElems/2, dl);
7005 V2 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V2, IdxVal);
7008 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, ZeroVec, V2, IdxVal);
7010 V1 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Undef, V1, ZeroIdx);
7011 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, V1, V2, IdxVal);
7014 static SDValue LowerCONCAT_VECTORS(SDValue Op,
7015 const X86Subtarget &Subtarget,
7016 SelectionDAG &DAG) {
7017 MVT VT = Op.getSimpleValueType();
7018 if (VT.getVectorElementType() == MVT::i1)
7019 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
7021 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7022 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7023 Op.getNumOperands() == 4)));
7025 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7026 // from two other 128-bit ones.
7028 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7029 return LowerAVXCONCAT_VECTORS(Op, DAG);
7032 //===----------------------------------------------------------------------===//
7033 // Vector shuffle lowering
7035 // This is an experimental code path for lowering vector shuffles on x86. It is
7036 // designed to handle arbitrary vector shuffles and blends, gracefully
7037 // degrading performance as necessary. It works hard to recognize idiomatic
7038 // shuffles and lower them to optimal instruction patterns without leaving
7039 // a framework that allows reasonably efficient handling of all vector shuffle
7041 //===----------------------------------------------------------------------===//
7043 /// \brief Tiny helper function to identify a no-op mask.
7045 /// This is a somewhat boring predicate function. It checks whether the mask
7046 /// array input, which is assumed to be a single-input shuffle mask of the kind
7047 /// used by the X86 shuffle instructions (not a fully general
7048 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7049 /// in-place shuffle are 'no-op's.
7050 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7051 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7052 assert(Mask[i] >= -1 && "Out of bound mask element!");
7053 if (Mask[i] >= 0 && Mask[i] != i)
7059 /// \brief Test whether there are elements crossing 128-bit lanes in this
7062 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7063 /// and we routinely test for these.
7064 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7065 int LaneSize = 128 / VT.getScalarSizeInBits();
7066 int Size = Mask.size();
7067 for (int i = 0; i < Size; ++i)
7068 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7073 /// \brief Test whether a shuffle mask is equivalent within each sub-lane.
7075 /// This checks a shuffle mask to see if it is performing the same
7076 /// lane-relative shuffle in each sub-lane. This trivially implies
7077 /// that it is also not lane-crossing. It may however involve a blend from the
7078 /// same lane of a second vector.
7080 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7081 /// non-trivial to compute in the face of undef lanes. The representation is
7082 /// suitable for use with existing 128-bit shuffles as entries from the second
7083 /// vector have been remapped to [LaneSize, 2*LaneSize).
7084 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
7086 SmallVectorImpl<int> &RepeatedMask) {
7087 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
7088 RepeatedMask.assign(LaneSize, -1);
7089 int Size = Mask.size();
7090 for (int i = 0; i < Size; ++i) {
7093 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7094 // This entry crosses lanes, so there is no way to model this shuffle.
7097 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7098 // Adjust second vector indices to start at LaneSize instead of Size.
7099 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
7100 : Mask[i] % LaneSize + LaneSize;
7101 if (RepeatedMask[i % LaneSize] < 0)
7102 // This is the first non-undef entry in this slot of a 128-bit lane.
7103 RepeatedMask[i % LaneSize] = LocalM;
7104 else if (RepeatedMask[i % LaneSize] != LocalM)
7105 // Found a mismatch with the repeated mask.
7111 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
7113 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7114 SmallVectorImpl<int> &RepeatedMask) {
7115 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
7118 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
7120 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7121 SmallVectorImpl<int> &RepeatedMask) {
7122 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
7125 static void scaleShuffleMask(int Scale, ArrayRef<int> Mask,
7126 SmallVectorImpl<int> &ScaledMask) {
7127 assert(0 < Scale && "Unexpected scaling factor");
7128 int NumElts = Mask.size();
7129 ScaledMask.assign(NumElts * Scale, -1);
7131 for (int i = 0; i != NumElts; ++i) {
7134 // Repeat sentinel values in every mask element.
7136 for (int s = 0; s != Scale; ++s)
7137 ScaledMask[(Scale * i) + s] = M;
7141 // Scale mask element and increment across each mask element.
7142 for (int s = 0; s != Scale; ++s)
7143 ScaledMask[(Scale * i) + s] = (Scale * M) + s;
7147 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7150 /// This is a fast way to test a shuffle mask against a fixed pattern:
7152 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
7154 /// It returns true if the mask is exactly as wide as the argument list, and
7155 /// each element of the mask is either -1 (signifying undef) or the value given
7156 /// in the argument.
7157 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7158 ArrayRef<int> ExpectedMask) {
7159 if (Mask.size() != ExpectedMask.size())
7162 int Size = Mask.size();
7164 // If the values are build vectors, we can look through them to find
7165 // equivalent inputs that make the shuffles equivalent.
7166 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7167 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7169 for (int i = 0; i < Size; ++i) {
7170 assert(Mask[i] >= -1 && "Out of bound mask element!");
7171 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
7172 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7173 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
7174 if (!MaskBV || !ExpectedBV ||
7175 MaskBV->getOperand(Mask[i] % Size) !=
7176 ExpectedBV->getOperand(ExpectedMask[i] % Size))
7184 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
7186 /// The masks must be exactly the same width.
7188 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
7189 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
7191 /// SM_SentinelZero is accepted as a valid negative index but must match in both.
7192 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
7193 ArrayRef<int> ExpectedMask) {
7194 int Size = Mask.size();
7195 if (Size != (int)ExpectedMask.size())
7198 for (int i = 0; i < Size; ++i)
7199 if (Mask[i] == SM_SentinelUndef)
7201 else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
7203 else if (Mask[i] != ExpectedMask[i])
7209 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7211 /// This helper function produces an 8-bit shuffle immediate corresponding to
7212 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7213 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7216 /// NB: We rely heavily on "undef" masks preserving the input lane.
7217 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
7218 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7219 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7220 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7221 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7222 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7225 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
7226 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
7227 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
7228 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
7232 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
7233 SelectionDAG &DAG) {
7234 return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
7237 /// \brief Compute whether each element of a shuffle is zeroable.
7239 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7240 /// Either it is an undef element in the shuffle mask, the element of the input
7241 /// referenced is undef, or the element of the input referenced is known to be
7242 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7243 /// as many lanes with this technique as possible to simplify the remaining
7245 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7246 SDValue V1, SDValue V2) {
7247 SmallBitVector Zeroable(Mask.size(), false);
7248 V1 = peekThroughBitcasts(V1);
7249 V2 = peekThroughBitcasts(V2);
7251 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7252 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7254 int VectorSizeInBits = V1.getValueType().getSizeInBits();
7255 int ScalarSizeInBits = VectorSizeInBits / Mask.size();
7256 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
7258 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7260 // Handle the easy cases.
7261 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7266 // Determine shuffle input and normalize the mask.
7267 SDValue V = M < Size ? V1 : V2;
7270 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
7271 if (V.getOpcode() != ISD::BUILD_VECTOR)
7274 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
7275 // the (larger) source element must be UNDEF/ZERO.
7276 if ((Size % V.getNumOperands()) == 0) {
7277 int Scale = Size / V->getNumOperands();
7278 SDValue Op = V.getOperand(M / Scale);
7279 if (Op.isUndef() || X86::isZeroNode(Op))
7281 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
7282 APInt Val = Cst->getAPIntValue();
7283 Val = Val.lshr((M % Scale) * ScalarSizeInBits);
7284 Val = Val.getLoBits(ScalarSizeInBits);
7285 Zeroable[i] = (Val == 0);
7286 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7287 APInt Val = Cst->getValueAPF().bitcastToAPInt();
7288 Val = Val.lshr((M % Scale) * ScalarSizeInBits);
7289 Val = Val.getLoBits(ScalarSizeInBits);
7290 Zeroable[i] = (Val == 0);
7295 // If the BUILD_VECTOR has more elements then all the (smaller) source
7296 // elements must be UNDEF or ZERO.
7297 if ((V.getNumOperands() % Size) == 0) {
7298 int Scale = V->getNumOperands() / Size;
7299 bool AllZeroable = true;
7300 for (int j = 0; j < Scale; ++j) {
7301 SDValue Op = V.getOperand((M * Scale) + j);
7302 AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
7304 Zeroable[i] = AllZeroable;
7312 /// Try to lower a shuffle with a single PSHUFB of V1.
7313 /// This is only possible if V2 is unused (at all, or only for zero elements).
7314 static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
7315 ArrayRef<int> Mask, SDValue V1,
7317 const X86Subtarget &Subtarget,
7318 SelectionDAG &DAG) {
7319 int Size = Mask.size();
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 const int NumBytes = VT.getSizeInBits() / 8;
7322 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
7324 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
7325 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
7326 (Subtarget.hasBWI() && VT.is512BitVector()));
7328 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7330 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
7331 // Sign bit set in i8 mask means zero element.
7332 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
7334 for (int i = 0; i < NumBytes; ++i) {
7335 int M = Mask[i / NumEltBytes];
7337 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
7340 if (Zeroable[i / NumEltBytes]) {
7341 PSHUFBMask[i] = ZeroMask;
7348 // PSHUFB can't cross lanes, ensure this doesn't happen.
7349 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
7353 M = M * NumEltBytes + (i % NumEltBytes);
7354 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
7357 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
7358 return DAG.getBitcast(
7359 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V1),
7360 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
7363 // X86 has dedicated unpack instructions that can handle specific blend
7364 // operations: UNPCKH and UNPCKL.
7365 static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT,
7366 ArrayRef<int> Mask, SDValue V1,
7367 SDValue V2, SelectionDAG &DAG) {
7368 int NumElts = VT.getVectorNumElements();
7369 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
7370 SmallVector<int, 8> Unpckl(NumElts);
7371 SmallVector<int, 8> Unpckh(NumElts);
7373 for (int i = 0; i < NumElts; ++i) {
7374 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
7375 int LoPos = (i % NumEltsInLane) / 2 + LaneStart + NumElts * (i % 2);
7376 int HiPos = LoPos + NumEltsInLane / 2;
7381 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
7382 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
7383 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
7384 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
7386 // Commute and try again.
7387 ShuffleVectorSDNode::commuteMask(Unpckl);
7388 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
7389 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
7391 ShuffleVectorSDNode::commuteMask(Unpckh);
7392 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
7393 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
7398 /// \brief Try to emit a bitmask instruction for a shuffle.
7400 /// This handles cases where we can model a blend exactly as a bitmask due to
7401 /// one of the inputs being zeroable.
7402 static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
7403 SDValue V2, ArrayRef<int> Mask,
7404 SelectionDAG &DAG) {
7405 MVT EltVT = VT.getVectorElementType();
7406 int NumEltBits = EltVT.getSizeInBits();
7407 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7408 SDValue Zero = DAG.getConstant(0, DL, IntEltVT);
7409 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL,
7411 if (EltVT.isFloatingPoint()) {
7412 Zero = DAG.getBitcast(EltVT, Zero);
7413 AllOnes = DAG.getBitcast(EltVT, AllOnes);
7415 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7416 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7418 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7421 if (Mask[i] % Size != i)
7422 return SDValue(); // Not a blend.
7424 V = Mask[i] < Size ? V1 : V2;
7425 else if (V != (Mask[i] < Size ? V1 : V2))
7426 return SDValue(); // Can only let one input through the mask.
7428 VMaskOps[i] = AllOnes;
7431 return SDValue(); // No non-zeroable elements!
7433 SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
7434 V = DAG.getNode(VT.isFloatingPoint()
7435 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7440 /// \brief Try to emit a blend instruction for a shuffle using bit math.
7442 /// This is used as a fallback approach when first class blend instructions are
7443 /// unavailable. Currently it is only suitable for integer vectors, but could
7444 /// be generalized for floating point vectors if desirable.
7445 static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
7446 SDValue V2, ArrayRef<int> Mask,
7447 SelectionDAG &DAG) {
7448 assert(VT.isInteger() && "Only supports integer vector types!");
7449 MVT EltVT = VT.getVectorElementType();
7450 int NumEltBits = EltVT.getSizeInBits();
7451 SDValue Zero = DAG.getConstant(0, DL, EltVT);
7452 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), DL,
7454 SmallVector<SDValue, 16> MaskOps;
7455 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7456 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
7457 return SDValue(); // Shuffled input!
7458 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
7461 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
7462 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
7463 // We have to cast V2 around.
7464 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
7465 V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
7466 DAG.getBitcast(MaskVT, V1Mask),
7467 DAG.getBitcast(MaskVT, V2)));
7468 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
7471 /// \brief Try to emit a blend instruction for a shuffle.
7473 /// This doesn't do any checks for the availability of instructions for blending
7474 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7475 /// be matched in the backend with the type given. What it does check for is
7476 /// that the shuffle mask is a blend, or convertible into a blend with zero.
7477 static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
7478 SDValue V2, ArrayRef<int> Original,
7479 const X86Subtarget &Subtarget,
7480 SelectionDAG &DAG) {
7481 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7482 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7483 SmallVector<int, 8> Mask(Original.begin(), Original.end());
7484 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7485 bool ForceV1Zero = false, ForceV2Zero = false;
7487 // Attempt to generate the binary blend mask. If an input is zero then
7488 // we can use any lane.
7489 // TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
7490 unsigned BlendMask = 0;
7491 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7497 if (M == i + Size) {
7498 BlendMask |= 1u << i;
7509 BlendMask |= 1u << i;
7514 return SDValue(); // Shuffled input!
7517 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
7519 V1 = getZeroVector(VT, Subtarget, DAG, DL);
7521 V2 = getZeroVector(VT, Subtarget, DAG, DL);
7523 auto ScaleBlendMask = [](unsigned BlendMask, int Size, int Scale) {
7524 unsigned ScaledMask = 0;
7525 for (int i = 0; i != Size; ++i)
7526 if (BlendMask & (1u << i))
7527 for (int j = 0; j != Scale; ++j)
7528 ScaledMask |= 1u << (i * Scale + j);
7532 switch (VT.SimpleTy) {
7537 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7538 DAG.getConstant(BlendMask, DL, MVT::i8));
7542 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
7546 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7547 // that instruction.
7548 if (Subtarget.hasAVX2()) {
7549 // Scale the blend by the number of 32-bit dwords per element.
7550 int Scale = VT.getScalarSizeInBits() / 32;
7551 BlendMask = ScaleBlendMask(BlendMask, Mask.size(), Scale);
7552 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7553 V1 = DAG.getBitcast(BlendVT, V1);
7554 V2 = DAG.getBitcast(BlendVT, V2);
7555 return DAG.getBitcast(
7556 VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7557 DAG.getConstant(BlendMask, DL, MVT::i8)));
7561 // For integer shuffles we need to expand the mask and cast the inputs to
7562 // v8i16s prior to blending.
7563 int Scale = 8 / VT.getVectorNumElements();
7564 BlendMask = ScaleBlendMask(BlendMask, Mask.size(), Scale);
7565 V1 = DAG.getBitcast(MVT::v8i16, V1);
7566 V2 = DAG.getBitcast(MVT::v8i16, V2);
7567 return DAG.getBitcast(VT,
7568 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7569 DAG.getConstant(BlendMask, DL, MVT::i8)));
7573 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
7574 SmallVector<int, 8> RepeatedMask;
7575 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7576 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7577 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7579 for (int i = 0; i < 8; ++i)
7580 if (RepeatedMask[i] >= 8)
7581 BlendMask |= 1u << i;
7582 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7583 DAG.getConstant(BlendMask, DL, MVT::i8));
7589 assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
7590 "256-bit byte-blends require AVX2 support!");
7592 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
7593 if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG))
7596 // Scale the blend by the number of bytes per element.
7597 int Scale = VT.getScalarSizeInBits() / 8;
7599 // This form of blend is always done on bytes. Compute the byte vector
7601 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7603 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7604 // mix of LLVM's code generator and the x86 backend. We tell the code
7605 // generator that boolean values in the elements of an x86 vector register
7606 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7607 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7608 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7609 // of the element (the remaining are ignored) and 0 in that high bit would
7610 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7611 // the LLVM model for boolean values in vector elements gets the relevant
7612 // bit set, it is set backwards and over constrained relative to x86's
7614 SmallVector<SDValue, 32> VSELECTMask;
7615 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7616 for (int j = 0; j < Scale; ++j)
7617 VSELECTMask.push_back(
7618 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7619 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
7622 V1 = DAG.getBitcast(BlendVT, V1);
7623 V2 = DAG.getBitcast(BlendVT, V2);
7624 return DAG.getBitcast(
7625 VT, DAG.getNode(ISD::VSELECT, DL, BlendVT,
7626 DAG.getBuildVector(BlendVT, DL, VSELECTMask), V1, V2));
7630 llvm_unreachable("Not a supported integer vector type!");
7634 /// \brief Try to lower as a blend of elements from two inputs followed by
7635 /// a single-input permutation.
7637 /// This matches the pattern where we can blend elements from two inputs and
7638 /// then reduce the shuffle to a single-input permutation.
7639 static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
7640 SDValue V1, SDValue V2,
7642 SelectionDAG &DAG) {
7643 // We build up the blend mask while checking whether a blend is a viable way
7644 // to reduce the shuffle.
7645 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7646 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7648 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7652 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7654 if (BlendMask[Mask[i] % Size] < 0)
7655 BlendMask[Mask[i] % Size] = Mask[i];
7656 else if (BlendMask[Mask[i] % Size] != Mask[i])
7657 return SDValue(); // Can't blend in the needed input!
7659 PermuteMask[i] = Mask[i] % Size;
7662 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7663 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7666 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7667 /// blends and permutes.
7669 /// This matches the extremely common pattern for handling combined
7670 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7671 /// operations. It will try to pick the best arrangement of shuffles and
7673 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(const SDLoc &DL,
7677 SelectionDAG &DAG) {
7678 // Shuffle the input elements into the desired positions in V1 and V2 and
7679 // blend them together.
7680 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7681 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7682 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7683 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7684 if (Mask[i] >= 0 && Mask[i] < Size) {
7685 V1Mask[i] = Mask[i];
7687 } else if (Mask[i] >= Size) {
7688 V2Mask[i] = Mask[i] - Size;
7689 BlendMask[i] = i + Size;
7692 // Try to lower with the simpler initial blend strategy unless one of the
7693 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7694 // shuffle may be able to fold with a load or other benefit. However, when
7695 // we'll have to do 2x as many shuffles in order to achieve this, blending
7696 // first is a better strategy.
7697 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7698 if (SDValue BlendPerm =
7699 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7702 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7703 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7704 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7707 /// \brief Try to lower a vector shuffle as a byte rotation.
7709 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7710 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7711 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7712 /// try to generically lower a vector shuffle through such an pattern. It
7713 /// does not check for the profitability of lowering either as PALIGNR or
7714 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7715 /// This matches shuffle vectors that look like:
7717 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7719 /// Essentially it concatenates V1 and V2, shifts right by some number of
7720 /// elements, and takes the low elements as the result. Note that while this is
7721 /// specified as a *right shift* because x86 is little-endian, it is a *left
7722 /// rotate* of the vector lanes.
7723 static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT,
7724 SDValue V1, SDValue V2,
7726 const X86Subtarget &Subtarget,
7727 SelectionDAG &DAG) {
7728 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7730 int NumElts = Mask.size();
7731 int NumLanes = VT.getSizeInBits() / 128;
7732 int NumLaneElts = NumElts / NumLanes;
7734 // We need to detect various ways of spelling a rotation:
7735 // [11, 12, 13, 14, 15, 0, 1, 2]
7736 // [-1, 12, 13, 14, -1, -1, 1, -1]
7737 // [-1, -1, -1, -1, -1, -1, 1, 2]
7738 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7739 // [-1, 4, 5, 6, -1, -1, 9, -1]
7740 // [-1, 4, 5, 6, -1, -1, -1, -1]
7743 for (int l = 0; l < NumElts; l += NumLaneElts) {
7744 for (int i = 0; i < NumLaneElts; ++i) {
7745 if (Mask[l + i] < 0)
7748 // Get the mod-Size index and lane correct it.
7749 int LaneIdx = (Mask[l + i] % NumElts) - l;
7750 // Make sure it was in this lane.
7751 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7754 // Determine where a rotated vector would have started.
7755 int StartIdx = i - LaneIdx;
7757 // The identity rotation isn't interesting, stop.
7760 // If we found the tail of a vector the rotation must be the missing
7761 // front. If we found the head of a vector, it must be how much of the
7763 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7766 Rotation = CandidateRotation;
7767 else if (Rotation != CandidateRotation)
7768 // The rotations don't match, so we can't match this mask.
7771 // Compute which value this mask is pointing at.
7772 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7774 // Compute which of the two target values this index should be assigned
7775 // to. This reflects whether the high elements are remaining or the low
7776 // elements are remaining.
7777 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7779 // Either set up this value if we've not encountered it before, or check
7780 // that it remains consistent.
7783 else if (TargetV != MaskV)
7784 // This may be a rotation, but it pulls from the inputs in some
7785 // unsupported interleaving.
7790 // Check that we successfully analyzed the mask, and normalize the results.
7791 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7792 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7798 // Cast the inputs to i8 vector of correct length to match PALIGNR or
7800 MVT ByteVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7801 Lo = DAG.getBitcast(ByteVT, Lo);
7802 Hi = DAG.getBitcast(ByteVT, Hi);
7804 // The actual rotate instruction rotates bytes, so we need to scale the
7805 // rotation based on how many bytes are in the vector lane.
7806 int Scale = 16 / NumLaneElts;
7808 // SSSE3 targets can use the palignr instruction.
7809 if (Subtarget.hasSSSE3()) {
7810 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
7811 "512-bit PALIGNR requires BWI instructions");
7812 return DAG.getBitcast(
7813 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
7814 DAG.getConstant(Rotation * Scale, DL, MVT::i8)));
7817 assert(VT.is128BitVector() &&
7818 "Rotate-based lowering only supports 128-bit lowering!");
7819 assert(Mask.size() <= 16 &&
7820 "Can shuffle at most 16 bytes in a 128-bit vector!");
7821 assert(ByteVT == MVT::v16i8 &&
7822 "SSE2 rotate lowering only needed for v16i8!");
7824 // Default SSE2 implementation
7825 int LoByteShift = 16 - Rotation * Scale;
7826 int HiByteShift = Rotation * Scale;
7828 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
7829 DAG.getConstant(LoByteShift, DL, MVT::i8));
7830 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
7831 DAG.getConstant(HiByteShift, DL, MVT::i8));
7832 return DAG.getBitcast(VT,
7833 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
7836 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7838 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
7839 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
7840 /// matches elements from one of the input vectors shuffled to the left or
7841 /// right with zeroable elements 'shifted in'. It handles both the strictly
7842 /// bit-wise element shifts and the byte shift across an entire 128-bit double
7845 /// PSHL : (little-endian) left bit shift.
7846 /// [ zz, 0, zz, 2 ]
7847 /// [ -1, 4, zz, -1 ]
7848 /// PSRL : (little-endian) right bit shift.
7850 /// [ -1, -1, 7, zz]
7851 /// PSLLDQ : (little-endian) left byte shift
7852 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
7853 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
7854 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
7855 /// PSRLDQ : (little-endian) right byte shift
7856 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
7857 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
7858 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
7859 static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
7860 SDValue V2, ArrayRef<int> Mask,
7861 const X86Subtarget &Subtarget,
7862 SelectionDAG &DAG) {
7863 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7865 int Size = Mask.size();
7866 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7868 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
7869 for (int i = 0; i < Size; i += Scale)
7870 for (int j = 0; j < Shift; ++j)
7871 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
7877 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
7878 for (int i = 0; i != Size; i += Scale) {
7879 unsigned Pos = Left ? i + Shift : i;
7880 unsigned Low = Left ? i : i + Shift;
7881 unsigned Len = Scale - Shift;
7882 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
7883 Low + (V == V1 ? 0 : Size)))
7887 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
7888 bool ByteShift = ShiftEltBits > 64;
7889 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
7890 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
7891 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
7893 // Normalize the scale for byte shifts to still produce an i64 element
7895 Scale = ByteShift ? Scale / 2 : Scale;
7897 // We need to round trip through the appropriate type for the shift.
7898 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7899 MVT ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8)
7900 : MVT::getVectorVT(ShiftSVT, Size / Scale);
7901 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7902 "Illegal integer vector type");
7903 V = DAG.getBitcast(ShiftVT, V);
7905 V = DAG.getNode(OpCode, DL, ShiftVT, V,
7906 DAG.getConstant(ShiftAmt, DL, MVT::i8));
7907 return DAG.getBitcast(VT, V);
7910 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7911 // keep doubling the size of the integer elements up to that. We can
7912 // then shift the elements of the integer vector by whole multiples of
7913 // their width within the elements of the larger integer vector. Test each
7914 // multiple to see if we can find a match with the moved element indices
7915 // and that the shifted in elements are all zeroable.
7916 unsigned MaxWidth = (VT.is512BitVector() && !Subtarget.hasBWI() ? 64 : 128);
7917 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= MaxWidth; Scale *= 2)
7918 for (int Shift = 1; Shift != Scale; ++Shift)
7919 for (bool Left : {true, false})
7920 if (CheckZeros(Shift, Scale, Left))
7921 for (SDValue V : {V1, V2})
7922 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
7929 /// \brief Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
7930 static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
7931 SDValue V2, ArrayRef<int> Mask,
7932 SelectionDAG &DAG) {
7933 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7934 assert(!Zeroable.all() && "Fully zeroable shuffle mask");
7936 int Size = Mask.size();
7937 int HalfSize = Size / 2;
7938 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7940 // Upper half must be undefined.
7941 if (!isUndefInRange(Mask, HalfSize, HalfSize))
7944 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
7945 // Remainder of lower half result is zero and upper half is all undef.
7946 auto LowerAsEXTRQ = [&]() {
7947 // Determine the extraction length from the part of the
7948 // lower half that isn't zeroable.
7950 for (; Len > 0; --Len)
7951 if (!Zeroable[Len - 1])
7953 assert(Len > 0 && "Zeroable shuffle mask");
7955 // Attempt to match first Len sequential elements from the lower half.
7958 for (int i = 0; i != Len; ++i) {
7962 SDValue &V = (M < Size ? V1 : V2);
7965 // The extracted elements must start at a valid index and all mask
7966 // elements must be in the lower half.
7967 if (i > M || M >= HalfSize)
7970 if (Idx < 0 || (Src == V && Idx == (M - i))) {
7981 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
7982 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
7983 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
7984 return DAG.getNode(X86ISD::EXTRQI, DL, VT, Src,
7985 DAG.getConstant(BitLen, DL, MVT::i8),
7986 DAG.getConstant(BitIdx, DL, MVT::i8));
7989 if (SDValue ExtrQ = LowerAsEXTRQ())
7992 // INSERTQ: Extract lowest Len elements from lower half of second source and
7993 // insert over first source, starting at Idx.
7994 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
7995 auto LowerAsInsertQ = [&]() {
7996 for (int Idx = 0; Idx != HalfSize; ++Idx) {
7999 // Attempt to match first source from mask before insertion point.
8000 if (isUndefInRange(Mask, 0, Idx)) {
8002 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
8004 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
8010 // Extend the extraction length looking to match both the insertion of
8011 // the second source and the remaining elements of the first.
8012 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
8017 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
8019 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
8025 // Match the remaining elements of the lower half.
8026 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
8028 } else if ((!Base || (Base == V1)) &&
8029 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
8031 } else if ((!Base || (Base == V2)) &&
8032 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
8039 // We may not have a base (first source) - this can safely be undefined.
8041 Base = DAG.getUNDEF(VT);
8043 int BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
8044 int BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
8045 return DAG.getNode(X86ISD::INSERTQI, DL, VT, Base, Insert,
8046 DAG.getConstant(BitLen, DL, MVT::i8),
8047 DAG.getConstant(BitIdx, DL, MVT::i8));
8054 if (SDValue InsertQ = LowerAsInsertQ())
8060 /// \brief Lower a vector shuffle as a zero or any extension.
8062 /// Given a specific number of elements, element bit width, and extension
8063 /// stride, produce either a zero or any extension based on the available
8064 /// features of the subtarget. The extended elements are consecutive and
8065 /// begin and can start from an offseted element index in the input; to
8066 /// avoid excess shuffling the offset must either being in the bottom lane
8067 /// or at the start of a higher lane. All extended elements must be from
8069 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8070 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
8071 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
8072 assert(Scale > 1 && "Need a scale to extend.");
8073 int EltBits = VT.getScalarSizeInBits();
8074 int NumElements = VT.getVectorNumElements();
8075 int NumEltsPerLane = 128 / EltBits;
8076 int OffsetLane = Offset / NumEltsPerLane;
8077 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
8078 "Only 8, 16, and 32 bit elements can be extended.");
8079 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
8080 assert(0 <= Offset && "Extension offset must be positive.");
8081 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
8082 "Extension offset must be in the first lane or start an upper lane.");
8084 // Check that an index is in same lane as the base offset.
8085 auto SafeOffset = [&](int Idx) {
8086 return OffsetLane == (Idx / NumEltsPerLane);
8089 // Shift along an input so that the offset base moves to the first element.
8090 auto ShuffleOffset = [&](SDValue V) {
8094 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
8095 for (int i = 0; i * Scale < NumElements; ++i) {
8096 int SrcIdx = i + Offset;
8097 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
8099 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
8102 // Found a valid zext mask! Try various lowering strategies based on the
8103 // input type and available ISA extensions.
8104 if (Subtarget.hasSSE41()) {
8105 // Not worth offseting 128-bit vectors if scale == 2, a pattern using
8106 // PUNPCK will catch this in a later shuffle match.
8107 if (Offset && Scale == 2 && VT.is128BitVector())
8109 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
8110 NumElements / Scale);
8111 InputV = ShuffleOffset(InputV);
8113 // For 256-bit vectors, we only need the lower (128-bit) input half.
8114 if (VT.is256BitVector())
8115 InputV = extract128BitVector(InputV, 0, DAG, DL);
8117 InputV = DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV);
8118 return DAG.getBitcast(VT, InputV);
8121 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
8123 // For any extends we can cheat for larger element sizes and use shuffle
8124 // instructions that can fold with a load and/or copy.
8125 if (AnyExt && EltBits == 32) {
8126 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
8128 return DAG.getBitcast(
8129 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8130 DAG.getBitcast(MVT::v4i32, InputV),
8131 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
8133 if (AnyExt && EltBits == 16 && Scale > 2) {
8134 int PSHUFDMask[4] = {Offset / 2, -1,
8135 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
8136 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8137 DAG.getBitcast(MVT::v4i32, InputV),
8138 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
8139 int PSHUFWMask[4] = {1, -1, -1, -1};
8140 unsigned OddEvenOp = (Offset & 1 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW);
8141 return DAG.getBitcast(
8142 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
8143 DAG.getBitcast(MVT::v8i16, InputV),
8144 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
8147 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
8149 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
8150 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
8151 assert(VT.is128BitVector() && "Unexpected vector width!");
8153 int LoIdx = Offset * EltBits;
8154 SDValue Lo = DAG.getBitcast(
8155 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
8156 DAG.getConstant(EltBits, DL, MVT::i8),
8157 DAG.getConstant(LoIdx, DL, MVT::i8)));
8159 if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
8160 !SafeOffset(Offset + 1))
8161 return DAG.getBitcast(VT, Lo);
8163 int HiIdx = (Offset + 1) * EltBits;
8164 SDValue Hi = DAG.getBitcast(
8165 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
8166 DAG.getConstant(EltBits, DL, MVT::i8),
8167 DAG.getConstant(HiIdx, DL, MVT::i8)));
8168 return DAG.getBitcast(VT,
8169 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
8172 // If this would require more than 2 unpack instructions to expand, use
8173 // pshufb when available. We can only use more than 2 unpack instructions
8174 // when zero extending i8 elements which also makes it easier to use pshufb.
8175 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
8176 assert(NumElements == 16 && "Unexpected byte vector width!");
8177 SDValue PSHUFBMask[16];
8178 for (int i = 0; i < 16; ++i) {
8179 int Idx = Offset + (i / Scale);
8180 PSHUFBMask[i] = DAG.getConstant(
8181 (i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
8183 InputV = DAG.getBitcast(MVT::v16i8, InputV);
8184 return DAG.getBitcast(
8185 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8186 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
8189 // If we are extending from an offset, ensure we start on a boundary that
8190 // we can unpack from.
8191 int AlignToUnpack = Offset % (NumElements / Scale);
8192 if (AlignToUnpack) {
8193 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
8194 for (int i = AlignToUnpack; i < NumElements; ++i)
8195 ShMask[i - AlignToUnpack] = i;
8196 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
8197 Offset -= AlignToUnpack;
8200 // Otherwise emit a sequence of unpacks.
8202 unsigned UnpackLoHi = X86ISD::UNPCKL;
8203 if (Offset >= (NumElements / 2)) {
8204 UnpackLoHi = X86ISD::UNPCKH;
8205 Offset -= (NumElements / 2);
8208 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8209 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8210 : getZeroVector(InputVT, Subtarget, DAG, DL);
8211 InputV = DAG.getBitcast(InputVT, InputV);
8212 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
8216 } while (Scale > 1);
8217 return DAG.getBitcast(VT, InputV);
8220 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8222 /// This routine will try to do everything in its power to cleverly lower
8223 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8224 /// check for the profitability of this lowering, it tries to aggressively
8225 /// match this pattern. It will use all of the micro-architectural details it
8226 /// can to emit an efficient lowering. It handles both blends with all-zero
8227 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8228 /// masking out later).
8230 /// The reason we have dedicated lowering for zext-style shuffles is that they
8231 /// are both incredibly common and often quite performance sensitive.
8232 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8233 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8234 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
8235 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8237 int Bits = VT.getSizeInBits();
8238 int NumLanes = Bits / 128;
8239 int NumElements = VT.getVectorNumElements();
8240 int NumEltsPerLane = NumElements / NumLanes;
8241 assert(VT.getScalarSizeInBits() <= 32 &&
8242 "Exceeds 32-bit integer zero extension limit");
8243 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8245 // Define a helper function to check a particular ext-scale and lower to it if
8247 auto Lower = [&](int Scale) -> SDValue {
8252 for (int i = 0; i < NumElements; ++i) {
8255 continue; // Valid anywhere but doesn't tell us anything.
8256 if (i % Scale != 0) {
8257 // Each of the extended elements need to be zeroable.
8261 // We no longer are in the anyext case.
8266 // Each of the base elements needs to be consecutive indices into the
8267 // same input vector.
8268 SDValue V = M < NumElements ? V1 : V2;
8269 M = M % NumElements;
8272 Offset = M - (i / Scale);
8273 } else if (InputV != V)
8274 return SDValue(); // Flip-flopping inputs.
8276 // Offset must start in the lowest 128-bit lane or at the start of an
8278 // FIXME: Is it ever worth allowing a negative base offset?
8279 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
8280 (Offset % NumEltsPerLane) == 0))
8283 // If we are offsetting, all referenced entries must come from the same
8285 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
8288 if ((M % NumElements) != (Offset + (i / Scale)))
8289 return SDValue(); // Non-consecutive strided elements.
8293 // If we fail to find an input, we have a zero-shuffle which should always
8294 // have already been handled.
8295 // FIXME: Maybe handle this here in case during blending we end up with one?
8299 // If we are offsetting, don't extend if we only match a single input, we
8300 // can always do better by using a basic PSHUF or PUNPCK.
8301 if (Offset != 0 && Matches < 2)
8304 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8305 DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG);
8308 // The widest scale possible for extending is to a 64-bit integer.
8309 assert(Bits % 64 == 0 &&
8310 "The number of bits in a vector must be divisible by 64 on x86!");
8311 int NumExtElements = Bits / 64;
8313 // Each iteration, try extending the elements half as much, but into twice as
8315 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8316 assert(NumElements % NumExtElements == 0 &&
8317 "The input vector size must be divisible by the extended size.");
8318 if (SDValue V = Lower(NumElements / NumExtElements))
8322 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8326 // Returns one of the source operands if the shuffle can be reduced to a
8327 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8328 auto CanZExtLowHalf = [&]() {
8329 for (int i = NumElements / 2; i != NumElements; ++i)
8332 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8334 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8339 if (SDValue V = CanZExtLowHalf()) {
8340 V = DAG.getBitcast(MVT::v2i64, V);
8341 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8342 return DAG.getBitcast(VT, V);
8345 // No viable ext lowering found.
8349 /// \brief Try to get a scalar value for a specific element of a vector.
8351 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8352 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8353 SelectionDAG &DAG) {
8354 MVT VT = V.getSimpleValueType();
8355 MVT EltVT = VT.getVectorElementType();
8356 V = peekThroughBitcasts(V);
8358 // If the bitcasts shift the element size, we can't extract an equivalent
8360 MVT NewVT = V.getSimpleValueType();
8361 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8364 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8365 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
8366 // Ensure the scalar operand is the same size as the destination.
8367 // FIXME: Add support for scalar truncation where possible.
8368 SDValue S = V.getOperand(Idx);
8369 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
8370 return DAG.getBitcast(EltVT, S);
8376 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8378 /// This is particularly important because the set of instructions varies
8379 /// significantly based on whether the operand is a load or not.
8380 static bool isShuffleFoldableLoad(SDValue V) {
8381 V = peekThroughBitcasts(V);
8382 return ISD::isNON_EXTLoad(V.getNode());
8385 /// \brief Try to lower insertion of a single element into a zero vector.
8387 /// This is a common pattern that we have especially efficient patterns to lower
8388 /// across all subtarget feature sets.
8389 static SDValue lowerVectorShuffleAsElementInsertion(
8390 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8391 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
8392 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8394 MVT EltVT = VT.getVectorElementType();
8396 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8397 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8399 bool IsV1Zeroable = true;
8400 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8401 if (i != V2Index && !Zeroable[i]) {
8402 IsV1Zeroable = false;
8406 // Check for a single input from a SCALAR_TO_VECTOR node.
8407 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8408 // all the smarts here sunk into that routine. However, the current
8409 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8410 // vector shuffle lowering is dead.
8411 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
8413 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
8414 // We need to zext the scalar if it is smaller than an i32.
8415 V2S = DAG.getBitcast(EltVT, V2S);
8416 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8417 // Using zext to expand a narrow element won't work for non-zero
8422 // Zero-extend directly to i32.
8424 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8426 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8427 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8428 EltVT == MVT::i16) {
8429 // Either not inserting from the low element of the input or the input
8430 // element size is too small to use VZEXT_MOVL to clear the high bits.
8434 if (!IsV1Zeroable) {
8435 // If V1 can't be treated as a zero vector we have fewer options to lower
8436 // this. We can't support integer vectors or non-zero targets cheaply, and
8437 // the V1 elements can't be permuted in any way.
8438 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8439 if (!VT.isFloatingPoint() || V2Index != 0)
8441 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8442 V1Mask[V2Index] = -1;
8443 if (!isNoopShuffleMask(V1Mask))
8445 // This is essentially a special case blend operation, but if we have
8446 // general purpose blend operations, they are always faster. Bail and let
8447 // the rest of the lowering handle these as blends.
8448 if (Subtarget.hasSSE41())
8451 // Otherwise, use MOVSD or MOVSS.
8452 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8453 "Only two types of floating point element types to handle!");
8454 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8458 // This lowering only works for the low element with floating point vectors.
8459 if (VT.isFloatingPoint() && V2Index != 0)
8462 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8464 V2 = DAG.getBitcast(VT, V2);
8467 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8468 // the desired position. Otherwise it is more efficient to do a vector
8469 // shift left. We know that we can do a vector shift left because all
8470 // the inputs are zero.
8471 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8472 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8473 V2Shuffle[V2Index] = 0;
8474 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8476 V2 = DAG.getBitcast(MVT::v16i8, V2);
8478 X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
8479 DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL,
8480 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(
8481 DAG.getDataLayout(), VT)));
8482 V2 = DAG.getBitcast(VT, V2);
8488 /// Try to lower broadcast of a single - truncated - integer element,
8489 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
8491 /// This assumes we have AVX2.
8492 static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT,
8493 SDValue V0, int BroadcastIdx,
8494 const X86Subtarget &Subtarget,
8495 SelectionDAG &DAG) {
8496 assert(Subtarget.hasAVX2() &&
8497 "We can only lower integer broadcasts with AVX2!");
8499 EVT EltVT = VT.getVectorElementType();
8500 EVT V0VT = V0.getValueType();
8502 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
8503 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
8505 EVT V0EltVT = V0VT.getVectorElementType();
8506 if (!V0EltVT.isInteger())
8509 const unsigned EltSize = EltVT.getSizeInBits();
8510 const unsigned V0EltSize = V0EltVT.getSizeInBits();
8512 // This is only a truncation if the original element type is larger.
8513 if (V0EltSize <= EltSize)
8516 assert(((V0EltSize % EltSize) == 0) &&
8517 "Scalar type sizes must all be powers of 2 on x86!");
8519 const unsigned V0Opc = V0.getOpcode();
8520 const unsigned Scale = V0EltSize / EltSize;
8521 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
8523 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
8524 V0Opc != ISD::BUILD_VECTOR)
8527 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
8529 // If we're extracting non-least-significant bits, shift so we can truncate.
8530 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
8531 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
8532 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
8533 if (const int OffsetIdx = BroadcastIdx % Scale)
8534 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
8535 DAG.getConstant(OffsetIdx * EltSize, DL, Scalar.getValueType()));
8537 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
8538 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
8541 /// \brief Try to lower broadcast of a single element.
8543 /// For convenience, this code also bundles all of the subtarget feature set
8544 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8545 /// a convenient way to factor it out.
8546 /// FIXME: This is very similar to LowerVectorBroadcast - can we merge them?
8547 static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
8548 SDValue V1, SDValue V2,
8550 const X86Subtarget &Subtarget,
8551 SelectionDAG &DAG) {
8552 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
8553 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
8554 (Subtarget.hasAVX2() && VT.isInteger())))
8557 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
8558 // we can only broadcast from a register with AVX2.
8559 unsigned NumElts = Mask.size();
8560 unsigned Opcode = VT == MVT::v2f64 ? X86ISD::MOVDDUP : X86ISD::VBROADCAST;
8561 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
8563 // Check that the mask is a broadcast.
8564 int BroadcastIdx = -1;
8565 for (int i = 0; i != (int)NumElts; ++i) {
8566 SmallVector<int, 8> BroadcastMask(NumElts, i);
8567 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
8573 if (BroadcastIdx < 0)
8575 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8576 "a sorted mask where the broadcast "
8579 // Go up the chain of (vector) values to find a scalar load that we can
8580 // combine with the broadcast.
8583 switch (V.getOpcode()) {
8584 case ISD::BITCAST: {
8585 SDValue VSrc = V.getOperand(0);
8586 MVT SrcVT = VSrc.getSimpleValueType();
8587 if (VT.getScalarSizeInBits() != SrcVT.getScalarSizeInBits())
8592 case ISD::CONCAT_VECTORS: {
8593 int OperandSize = Mask.size() / V.getNumOperands();
8594 V = V.getOperand(BroadcastIdx / OperandSize);
8595 BroadcastIdx %= OperandSize;
8598 case ISD::INSERT_SUBVECTOR: {
8599 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8600 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8604 int BeginIdx = (int)ConstantIdx->getZExtValue();
8606 BeginIdx + (int)VInner.getSimpleValueType().getVectorNumElements();
8607 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8608 BroadcastIdx -= BeginIdx;
8619 // Check if this is a broadcast of a scalar. We special case lowering
8620 // for scalars so that we can more effectively fold with loads.
8621 // First, look through bitcast: if the original value has a larger element
8622 // type than the shuffle, the broadcast element is in essence truncated.
8623 // Make that explicit to ease folding.
8624 if (V.getOpcode() == ISD::BITCAST && VT.isInteger())
8625 if (SDValue TruncBroadcast = lowerVectorShuffleAsTruncBroadcast(
8626 DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG))
8627 return TruncBroadcast;
8629 MVT BroadcastVT = VT;
8631 // Peek through any bitcast (only useful for loads).
8632 SDValue BC = peekThroughBitcasts(V);
8634 // Also check the simpler case, where we can directly reuse the scalar.
8635 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8636 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8637 V = V.getOperand(BroadcastIdx);
8639 // If we can't broadcast from a register, check that the input is a load.
8640 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
8642 } else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
8643 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
8644 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
8645 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
8646 Opcode = (BroadcastVT.is128BitVector() ? X86ISD::MOVDDUP : Opcode);
8649 // If we are broadcasting a load that is only used by the shuffle
8650 // then we can reduce the vector load to the broadcasted scalar load.
8651 LoadSDNode *Ld = cast<LoadSDNode>(BC);
8652 SDValue BaseAddr = Ld->getOperand(1);
8653 EVT SVT = BroadcastVT.getScalarType();
8654 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
8655 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
8656 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
8657 DAG.getMachineFunction().getMachineMemOperand(
8658 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
8659 } else if (!BroadcastFromReg) {
8660 // We can't broadcast from a vector register.
8662 } else if (BroadcastIdx != 0) {
8663 // We can only broadcast from the zero-element of a vector register,
8664 // but it can be advantageous to broadcast from the zero-element of a
8666 if (!VT.is256BitVector() && !VT.is512BitVector())
8669 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
8670 if (VT == MVT::v4f64 || VT == MVT::v4i64)
8673 // Only broadcast the zero-element of a 128-bit subvector.
8674 unsigned EltSize = VT.getScalarSizeInBits();
8675 if (((BroadcastIdx * EltSize) % 128) != 0)
8678 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 128 / EltSize);
8679 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtVT, V,
8680 DAG.getIntPtrConstant(BroadcastIdx, DL));
8683 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
8684 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
8685 DAG.getBitcast(MVT::f64, V));
8687 // Bitcast back to the same scalar type as BroadcastVT.
8688 MVT SrcVT = V.getSimpleValueType();
8689 if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
8690 assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
8691 "Unexpected vector element size");
8692 if (SrcVT.isVector()) {
8693 unsigned NumSrcElts = SrcVT.getVectorNumElements();
8694 SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
8696 SrcVT = BroadcastVT.getScalarType();
8698 V = DAG.getBitcast(SrcVT, V);
8701 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
8704 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8705 // INSERTPS when the V1 elements are already in the correct locations
8706 // because otherwise we can just always use two SHUFPS instructions which
8707 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8708 // perform INSERTPS if a single V1 element is out of place and all V2
8709 // elements are zeroable.
8710 static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2,
8711 unsigned &InsertPSMask,
8712 const SmallBitVector &Zeroable,
8714 SelectionDAG &DAG) {
8715 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
8716 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
8717 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8719 int V1DstIndex = -1;
8720 int V2DstIndex = -1;
8721 bool V1UsedInPlace = false;
8723 for (int i = 0; i < 4; ++i) {
8724 // Synthesize a zero mask from the zeroable elements (includes undefs).
8730 // Flag if we use any V1 inputs in place.
8732 V1UsedInPlace = true;
8736 // We can only insert a single non-zeroable element.
8737 if (V1DstIndex >= 0 || V2DstIndex >= 0)
8741 // V1 input out of place for insertion.
8744 // V2 input for insertion.
8749 // Don't bother if we have no (non-zeroable) element for insertion.
8750 if (V1DstIndex < 0 && V2DstIndex < 0)
8753 // Determine element insertion src/dst indices. The src index is from the
8754 // start of the inserted vector, not the start of the concatenated vector.
8755 unsigned V2SrcIndex = 0;
8756 if (V1DstIndex >= 0) {
8757 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8758 // and don't use the original V2 at all.
8759 V2SrcIndex = Mask[V1DstIndex];
8760 V2DstIndex = V1DstIndex;
8763 V2SrcIndex = Mask[V2DstIndex] - 4;
8766 // If no V1 inputs are used in place, then the result is created only from
8767 // the zero mask and the V2 insertion - so remove V1 dependency.
8769 V1 = DAG.getUNDEF(MVT::v4f32);
8771 // Insert the V2 element into the desired position.
8772 InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8773 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8777 static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1,
8778 SDValue V2, ArrayRef<int> Mask,
8779 SelectionDAG &DAG) {
8780 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8781 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8782 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8784 // Attempt to match the insertps pattern.
8785 unsigned InsertPSMask;
8786 if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
8789 // Insert the V2 element into the desired position.
8790 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8791 DAG.getConstant(InsertPSMask, DL, MVT::i8));
8794 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8795 /// UNPCK instruction.
8797 /// This specifically targets cases where we end up with alternating between
8798 /// the two inputs, and so can permute them into something that feeds a single
8799 /// UNPCK instruction. Note that this routine only targets integer vectors
8800 /// because for floating point vectors we have a generalized SHUFPS lowering
8801 /// strategy that handles everything that doesn't *exactly* match an unpack,
8802 /// making this clever lowering unnecessary.
8803 static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
8804 SDValue V1, SDValue V2,
8806 SelectionDAG &DAG) {
8807 assert(!VT.isFloatingPoint() &&
8808 "This routine only supports integer vectors.");
8809 assert(VT.is128BitVector() &&
8810 "This routine only works on 128-bit vectors.");
8811 assert(!V2.isUndef() &&
8812 "This routine should only be used when blending two inputs.");
8813 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8815 int Size = Mask.size();
8818 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
8820 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
8822 bool UnpackLo = NumLoInputs >= NumHiInputs;
8824 auto TryUnpack = [&](int ScalarSize, int Scale) {
8825 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
8826 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
8828 for (int i = 0; i < Size; ++i) {
8832 // Each element of the unpack contains Scale elements from this mask.
8833 int UnpackIdx = i / Scale;
8835 // We only handle the case where V1 feeds the first slots of the unpack.
8836 // We rely on canonicalization to ensure this is the case.
8837 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8840 // Setup the mask for this input. The indexing is tricky as we have to
8841 // handle the unpack stride.
8842 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8843 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8847 // If we will have to shuffle both inputs to use the unpack, check whether
8848 // we can just unpack first and shuffle the result. If so, skip this unpack.
8849 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
8850 !isNoopShuffleMask(V2Mask))
8853 // Shuffle the inputs into place.
8854 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8855 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8857 // Cast the inputs to the type we will use to unpack them.
8858 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
8859 V1 = DAG.getBitcast(UnpackVT, V1);
8860 V2 = DAG.getBitcast(UnpackVT, V2);
8862 // Unpack the inputs and cast the result back to the desired type.
8863 return DAG.getBitcast(
8864 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
8868 // We try each unpack from the largest to the smallest to try and find one
8869 // that fits this mask.
8870 int OrigScalarSize = VT.getScalarSizeInBits();
8871 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
8872 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
8875 // If none of the unpack-rooted lowerings worked (or were profitable) try an
8877 if (NumLoInputs == 0 || NumHiInputs == 0) {
8878 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
8879 "We have to have *some* inputs!");
8880 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
8882 // FIXME: We could consider the total complexity of the permute of each
8883 // possible unpacking. Or at the least we should consider how many
8884 // half-crossings are created.
8885 // FIXME: We could consider commuting the unpacks.
8887 SmallVector<int, 32> PermMask((unsigned)Size, -1);
8888 for (int i = 0; i < Size; ++i) {
8892 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
8895 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
8897 return DAG.getVectorShuffle(
8898 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
8900 DAG.getUNDEF(VT), PermMask);
8906 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8908 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8909 /// support for floating point shuffles but not integer shuffles. These
8910 /// instructions will incur a domain crossing penalty on some chips though so
8911 /// it is better to avoid lowering through this for integer vectors where
8913 static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
8914 SDValue V1, SDValue V2,
8915 const X86Subtarget &Subtarget,
8916 SelectionDAG &DAG) {
8917 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8918 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8919 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8922 // Check for being able to broadcast a single element.
8923 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
8924 DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
8927 // Straight shuffle of a single input vector. Simulate this by using the
8928 // single input as both of the "inputs" to this instruction..
8929 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8931 if (Subtarget.hasAVX()) {
8932 // If we have AVX, we can use VPERMILPS which will allow folding a load
8933 // into the shuffle.
8934 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8935 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
8938 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V1,
8939 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
8941 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8942 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8944 // If we have a single input, insert that into V1 if we can do so cheaply.
8945 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8946 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8947 DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
8949 // Try inverting the insertion since for v2 masks it is easy to do and we
8950 // can't reliably sort the mask one way or the other.
8951 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8952 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8953 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8954 DL, MVT::v2f64, V2, V1, InverseMask, Subtarget, DAG))
8958 // Try to use one of the special instruction patterns to handle two common
8959 // blend patterns if a zero-blend above didn't work.
8960 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
8961 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
8962 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8963 // We can either use a special instruction to load over the low double or
8964 // to move just the low double.
8966 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8968 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8970 if (Subtarget.hasSSE41())
8971 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8975 // Use dedicated unpack instructions for masks that match their pattern.
8977 lowerVectorShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
8980 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8981 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
8982 DAG.getConstant(SHUFPDMask, DL, MVT::i8));
8985 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8987 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8988 /// the integer unit to minimize domain crossing penalties. However, for blends
8989 /// it falls back to the floating point shuffle operation with appropriate bit
8991 static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
8992 SDValue V1, SDValue V2,
8993 const X86Subtarget &Subtarget,
8994 SelectionDAG &DAG) {
8995 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8996 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8997 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
9000 // Check for being able to broadcast a single element.
9001 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
9002 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
9005 // Straight shuffle of a single input vector. For everything from SSE2
9006 // onward this has a single fast instruction with no scary immediates.
9007 // We have to map the mask as it is actually a v4i32 shuffle instruction.
9008 V1 = DAG.getBitcast(MVT::v4i32, V1);
9009 int WidenedMask[4] = {
9010 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
9011 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
9012 return DAG.getBitcast(
9014 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
9015 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
9017 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
9018 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
9019 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
9020 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
9022 // If we have a blend of two same-type PACKUS operations and the blend aligns
9023 // with the low and high halves, we can just merge the PACKUS operations.
9024 // This is particularly important as it lets us merge shuffles that this
9025 // routine itself creates.
9026 auto GetPackNode = [](SDValue V) {
9027 V = peekThroughBitcasts(V);
9028 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
9030 if (SDValue V1Pack = GetPackNode(V1))
9031 if (SDValue V2Pack = GetPackNode(V2)) {
9032 EVT PackVT = V1Pack.getValueType();
9033 if (PackVT == V2Pack.getValueType())
9034 return DAG.getBitcast(MVT::v2i64,
9035 DAG.getNode(X86ISD::PACKUS, DL, PackVT,
9036 Mask[0] == 0 ? V1Pack.getOperand(0)
9037 : V1Pack.getOperand(1),
9038 Mask[1] == 2 ? V2Pack.getOperand(0)
9039 : V2Pack.getOperand(1)));
9042 // Try to use shift instructions.
9043 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
9047 // When loading a scalar and then shuffling it into a vector we can often do
9048 // the insertion cheaply.
9049 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
9050 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
9052 // Try inverting the insertion since for v2 masks it is easy to do and we
9053 // can't reliably sort the mask one way or the other.
9054 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
9055 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
9056 DL, MVT::v2i64, V2, V1, InverseMask, Subtarget, DAG))
9059 // We have different paths for blend lowering, but they all must use the
9060 // *exact* same predicate.
9061 bool IsBlendSupported = Subtarget.hasSSE41();
9062 if (IsBlendSupported)
9063 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
9067 // Use dedicated unpack instructions for masks that match their pattern.
9069 lowerVectorShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
9072 // Try to use byte rotation instructions.
9073 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
9074 if (Subtarget.hasSSSE3())
9075 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9076 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
9079 // If we have direct support for blends, we should lower by decomposing into
9080 // a permute. That will be faster than the domain cross.
9081 if (IsBlendSupported)
9082 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
9085 // We implement this with SHUFPD which is pretty lame because it will likely
9086 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
9087 // However, all the alternatives are still more cycles and newer chips don't
9088 // have this problem. It would be really nice if x86 had better shuffles here.
9089 V1 = DAG.getBitcast(MVT::v2f64, V1);
9090 V2 = DAG.getBitcast(MVT::v2f64, V2);
9091 return DAG.getBitcast(MVT::v2i64,
9092 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
9095 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
9097 /// This is used to disable more specialized lowerings when the shufps lowering
9098 /// will happen to be efficient.
9099 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
9100 // This routine only handles 128-bit shufps.
9101 assert(Mask.size() == 4 && "Unsupported mask size!");
9102 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
9103 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
9104 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
9105 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
9107 // To lower with a single SHUFPS we need to have the low half and high half
9108 // each requiring a single input.
9109 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
9111 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
9117 /// \brief Lower a vector shuffle using the SHUFPS instruction.
9119 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
9120 /// It makes no assumptions about whether this is the *best* lowering, it simply
9122 static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
9123 ArrayRef<int> Mask, SDValue V1,
9124 SDValue V2, SelectionDAG &DAG) {
9125 SDValue LowV = V1, HighV = V2;
9126 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
9128 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
9130 if (NumV2Elements == 1) {
9132 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
9135 // Compute the index adjacent to V2Index and in the same half by toggling
9137 int V2AdjIndex = V2Index ^ 1;
9139 if (Mask[V2AdjIndex] < 0) {
9140 // Handles all the cases where we have a single V2 element and an undef.
9141 // This will only ever happen in the high lanes because we commute the
9142 // vector otherwise.
9144 std::swap(LowV, HighV);
9145 NewMask[V2Index] -= 4;
9147 // Handle the case where the V2 element ends up adjacent to a V1 element.
9148 // To make this work, blend them together as the first step.
9149 int V1Index = V2AdjIndex;
9150 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
9151 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
9152 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
9154 // Now proceed to reconstruct the final blend as we have the necessary
9155 // high or low half formed.
9162 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
9163 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
9165 } else if (NumV2Elements == 2) {
9166 if (Mask[0] < 4 && Mask[1] < 4) {
9167 // Handle the easy case where we have V1 in the low lanes and V2 in the
9171 } else if (Mask[2] < 4 && Mask[3] < 4) {
9172 // We also handle the reversed case because this utility may get called
9173 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
9174 // arrange things in the right direction.
9180 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
9181 // trying to place elements directly, just blend them and set up the final
9182 // shuffle to place them.
9184 // The first two blend mask elements are for V1, the second two are for
9186 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
9187 Mask[2] < 4 ? Mask[2] : Mask[3],
9188 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
9189 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
9190 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
9191 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
9193 // Now we do a normal shuffle of V1 by giving V1 as both operands to
9196 NewMask[0] = Mask[0] < 4 ? 0 : 2;
9197 NewMask[1] = Mask[0] < 4 ? 2 : 0;
9198 NewMask[2] = Mask[2] < 4 ? 1 : 3;
9199 NewMask[3] = Mask[2] < 4 ? 3 : 1;
9202 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
9203 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
9206 /// \brief Lower 4-lane 32-bit floating point shuffles.
9208 /// Uses instructions exclusively from the floating point unit to minimize
9209 /// domain crossing penalties, as these are sufficient to implement all v4f32
9211 static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
9212 SDValue V1, SDValue V2,
9213 const X86Subtarget &Subtarget,
9214 SelectionDAG &DAG) {
9215 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
9216 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
9217 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
9219 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
9221 if (NumV2Elements == 0) {
9222 // Check for being able to broadcast a single element.
9223 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
9224 DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG))
9227 // Use even/odd duplicate instructions for masks that match their pattern.
9228 if (Subtarget.hasSSE3()) {
9229 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
9230 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
9231 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
9232 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
9235 if (Subtarget.hasAVX()) {
9236 // If we have AVX, we can use VPERMILPS which will allow folding a load
9237 // into the shuffle.
9238 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
9239 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
9242 // Otherwise, use a straight shuffle of a single input vector. We pass the
9243 // input vector to both operands to simulate this with a SHUFPS.
9244 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
9245 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
9248 // There are special ways we can lower some single-element blends. However, we
9249 // have custom ways we can lower more complex single-element blends below that
9250 // we defer to if both this and BLENDPS fail to match, so restrict this to
9251 // when the V2 input is targeting element 0 of the mask -- that is the fast
9253 if (NumV2Elements == 1 && Mask[0] >= 4)
9254 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v4f32, V1, V2,
9255 Mask, Subtarget, DAG))
9258 if (Subtarget.hasSSE41()) {
9259 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
9263 // Use INSERTPS if we can complete the shuffle efficiently.
9264 if (SDValue V = lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, DAG))
9267 if (!isSingleSHUFPSMask(Mask))
9268 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
9269 DL, MVT::v4f32, V1, V2, Mask, DAG))
9273 // Use low/high mov instructions.
9274 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
9275 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
9276 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
9277 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
9279 // Use dedicated unpack instructions for masks that match their pattern.
9281 lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
9284 // Otherwise fall back to a SHUFPS lowering strategy.
9285 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
9288 /// \brief Lower 4-lane i32 vector shuffles.
9290 /// We try to handle these with integer-domain shuffles where we can, but for
9291 /// blends we use the floating point domain blend instructions.
9292 static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
9293 SDValue V1, SDValue V2,
9294 const X86Subtarget &Subtarget,
9295 SelectionDAG &DAG) {
9296 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
9297 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
9298 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
9300 // Whenever we can lower this as a zext, that instruction is strictly faster
9301 // than any alternative. It also allows us to fold memory operands into the
9302 // shuffle in many cases.
9303 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
9304 Mask, Subtarget, DAG))
9307 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
9309 if (NumV2Elements == 0) {
9310 // Check for being able to broadcast a single element.
9311 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
9312 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
9315 // Straight shuffle of a single input vector. For everything from SSE2
9316 // onward this has a single fast instruction with no scary immediates.
9317 // We coerce the shuffle pattern to be compatible with UNPCK instructions
9318 // but we aren't actually going to use the UNPCK instruction because doing
9319 // so prevents folding a load into this instruction or making a copy.
9320 const int UnpackLoMask[] = {0, 0, 1, 1};
9321 const int UnpackHiMask[] = {2, 2, 3, 3};
9322 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
9323 Mask = UnpackLoMask;
9324 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
9325 Mask = UnpackHiMask;
9327 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
9328 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
9331 // Try to use shift instructions.
9332 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
9336 // There are special ways we can lower some single-element blends.
9337 if (NumV2Elements == 1)
9338 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v4i32, V1, V2,
9339 Mask, Subtarget, DAG))
9342 // We have different paths for blend lowering, but they all must use the
9343 // *exact* same predicate.
9344 bool IsBlendSupported = Subtarget.hasSSE41();
9345 if (IsBlendSupported)
9346 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
9350 if (SDValue Masked =
9351 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
9354 // Use dedicated unpack instructions for masks that match their pattern.
9356 lowerVectorShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
9359 // Try to use byte rotation instructions.
9360 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
9361 if (Subtarget.hasSSSE3())
9362 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9363 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
9366 // If we have direct support for blends, we should lower by decomposing into
9367 // a permute. That will be faster than the domain cross.
9368 if (IsBlendSupported)
9369 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
9372 // Try to lower by permuting the inputs into an unpack instruction.
9373 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1,
9377 // We implement this with SHUFPS because it can blend from two vectors.
9378 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
9379 // up the inputs, bypassing domain shift penalties that we would encur if we
9380 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
9382 return DAG.getBitcast(
9384 DAG.getVectorShuffle(MVT::v4f32, DL, DAG.getBitcast(MVT::v4f32, V1),
9385 DAG.getBitcast(MVT::v4f32, V2), Mask));
9388 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
9389 /// shuffle lowering, and the most complex part.
9391 /// The lowering strategy is to try to form pairs of input lanes which are
9392 /// targeted at the same half of the final vector, and then use a dword shuffle
9393 /// to place them onto the right half, and finally unpack the paired lanes into
9394 /// their final position.
9396 /// The exact breakdown of how to form these dword pairs and align them on the
9397 /// correct sides is really tricky. See the comments within the function for
9398 /// more of the details.
9400 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
9401 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
9402 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
9403 /// vector, form the analogous 128-bit 8-element Mask.
9404 static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
9405 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
9406 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
9407 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
9408 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
9410 assert(Mask.size() == 8 && "Shuffle mask length doen't match!");
9411 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9412 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9414 SmallVector<int, 4> LoInputs;
9415 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9416 [](int M) { return M >= 0; });
9417 std::sort(LoInputs.begin(), LoInputs.end());
9418 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9419 SmallVector<int, 4> HiInputs;
9420 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9421 [](int M) { return M >= 0; });
9422 std::sort(HiInputs.begin(), HiInputs.end());
9423 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9425 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9426 int NumHToL = LoInputs.size() - NumLToL;
9428 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9429 int NumHToH = HiInputs.size() - NumLToH;
9430 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9431 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9432 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9433 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9435 // If we are splatting two values from one half - one to each half, then
9436 // we can shuffle that half so each is splatted to a dword, then splat those
9437 // to their respective halves.
9438 auto SplatHalfs = [&](int LoInput, int HiInput, unsigned ShufWOp,
9440 int PSHUFHalfMask[] = {LoInput % 4, LoInput % 4, HiInput % 4, HiInput % 4};
9441 int PSHUFDMask[] = {DOffset + 0, DOffset + 0, DOffset + 1, DOffset + 1};
9442 V = DAG.getNode(ShufWOp, DL, VT, V,
9443 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
9444 V = DAG.getBitcast(PSHUFDVT, V);
9445 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
9446 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
9447 return DAG.getBitcast(VT, V);
9450 if (NumLToL == 1 && NumLToH == 1 && (NumHToL + NumHToH) == 0)
9451 return SplatHalfs(LToLInputs[0], LToHInputs[0], X86ISD::PSHUFLW, 0);
9452 if (NumHToL == 1 && NumHToH == 1 && (NumLToL + NumLToH) == 0)
9453 return SplatHalfs(HToLInputs[0], HToHInputs[0], X86ISD::PSHUFHW, 2);
9455 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9456 // such inputs we can swap two of the dwords across the half mark and end up
9457 // with <=2 inputs to each half in each half. Once there, we can fall through
9458 // to the generic code below. For example:
9460 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9461 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9463 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9464 // and an existing 2-into-2 on the other half. In this case we may have to
9465 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9466 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9467 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9468 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9469 // half than the one we target for fixing) will be fixed when we re-enter this
9470 // path. We will also combine away any sequence of PSHUFD instructions that
9471 // result into a single instruction. Here is an example of the tricky case:
9473 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9474 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9476 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9478 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9479 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9481 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9482 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9484 // The result is fine to be handled by the generic logic.
9485 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9486 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9487 int AOffset, int BOffset) {
9488 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9489 "Must call this with A having 3 or 1 inputs from the A half.");
9490 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9491 "Must call this with B having 1 or 3 inputs from the B half.");
9492 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9493 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9495 bool ThreeAInputs = AToAInputs.size() == 3;
9497 // Compute the index of dword with only one word among the three inputs in
9498 // a half by taking the sum of the half with three inputs and subtracting
9499 // the sum of the actual three inputs. The difference is the remaining
9502 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
9503 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
9504 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
9505 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
9506 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
9507 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9508 int TripleNonInputIdx =
9509 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9510 TripleDWord = TripleNonInputIdx / 2;
9512 // We use xor with one to compute the adjacent DWord to whichever one the
9514 OneInputDWord = (OneInput / 2) ^ 1;
9516 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9517 // and BToA inputs. If there is also such a problem with the BToB and AToB
9518 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9519 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9520 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9521 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9522 // Compute how many inputs will be flipped by swapping these DWords. We
9524 // to balance this to ensure we don't form a 3-1 shuffle in the other
9526 int NumFlippedAToBInputs =
9527 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9528 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9529 int NumFlippedBToBInputs =
9530 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9531 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9532 if ((NumFlippedAToBInputs == 1 &&
9533 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9534 (NumFlippedBToBInputs == 1 &&
9535 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9536 // We choose whether to fix the A half or B half based on whether that
9537 // half has zero flipped inputs. At zero, we may not be able to fix it
9538 // with that half. We also bias towards fixing the B half because that
9539 // will more commonly be the high half, and we have to bias one way.
9540 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9541 ArrayRef<int> Inputs) {
9542 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9543 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9544 PinnedIdx ^ 1) != Inputs.end();
9545 // Determine whether the free index is in the flipped dword or the
9546 // unflipped dword based on where the pinned index is. We use this bit
9547 // in an xor to conditionally select the adjacent dword.
9548 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9549 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9550 FixFreeIdx) != Inputs.end();
9551 if (IsFixIdxInput == IsFixFreeIdxInput)
9553 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9554 FixFreeIdx) != Inputs.end();
9555 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9556 "We need to be changing the number of flipped inputs!");
9557 int PSHUFHalfMask[] = {0, 1, 2, 3};
9558 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9559 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9561 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
9564 if (M >= 0 && M == FixIdx)
9566 else if (M >= 0 && M == FixFreeIdx)
9569 if (NumFlippedBToBInputs != 0) {
9571 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9572 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9574 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9575 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
9576 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9581 int PSHUFDMask[] = {0, 1, 2, 3};
9582 PSHUFDMask[ADWord] = BDWord;
9583 PSHUFDMask[BDWord] = ADWord;
9586 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
9587 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
9589 // Adjust the mask to match the new locations of A and B.
9591 if (M >= 0 && M/2 == ADWord)
9592 M = 2 * BDWord + M % 2;
9593 else if (M >= 0 && M/2 == BDWord)
9594 M = 2 * ADWord + M % 2;
9596 // Recurse back into this routine to re-compute state now that this isn't
9597 // a 3 and 1 problem.
9598 return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget,
9601 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9602 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9603 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9604 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9606 // At this point there are at most two inputs to the low and high halves from
9607 // each half. That means the inputs can always be grouped into dwords and
9608 // those dwords can then be moved to the correct half with a dword shuffle.
9609 // We use at most one low and one high word shuffle to collect these paired
9610 // inputs into dwords, and finally a dword shuffle to place them.
9611 int PSHUFLMask[4] = {-1, -1, -1, -1};
9612 int PSHUFHMask[4] = {-1, -1, -1, -1};
9613 int PSHUFDMask[4] = {-1, -1, -1, -1};
9615 // First fix the masks for all the inputs that are staying in their
9616 // original halves. This will then dictate the targets of the cross-half
9618 auto fixInPlaceInputs =
9619 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9620 MutableArrayRef<int> SourceHalfMask,
9621 MutableArrayRef<int> HalfMask, int HalfOffset) {
9622 if (InPlaceInputs.empty())
9624 if (InPlaceInputs.size() == 1) {
9625 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9626 InPlaceInputs[0] - HalfOffset;
9627 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9630 if (IncomingInputs.empty()) {
9631 // Just fix all of the in place inputs.
9632 for (int Input : InPlaceInputs) {
9633 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9634 PSHUFDMask[Input / 2] = Input / 2;
9639 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9640 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9641 InPlaceInputs[0] - HalfOffset;
9642 // Put the second input next to the first so that they are packed into
9643 // a dword. We find the adjacent index by toggling the low bit.
9644 int AdjIndex = InPlaceInputs[0] ^ 1;
9645 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9646 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9647 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9649 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9650 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9652 // Now gather the cross-half inputs and place them into a free dword of
9653 // their target half.
9654 // FIXME: This operation could almost certainly be simplified dramatically to
9655 // look more like the 3-1 fixing operation.
9656 auto moveInputsToRightHalf = [&PSHUFDMask](
9657 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9658 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9659 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9661 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9662 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
9664 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9666 int LowWord = Word & ~1;
9667 int HighWord = Word | 1;
9668 return isWordClobbered(SourceHalfMask, LowWord) ||
9669 isWordClobbered(SourceHalfMask, HighWord);
9672 if (IncomingInputs.empty())
9675 if (ExistingInputs.empty()) {
9676 // Map any dwords with inputs from them into the right half.
9677 for (int Input : IncomingInputs) {
9678 // If the source half mask maps over the inputs, turn those into
9679 // swaps and use the swapped lane.
9680 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9681 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
9682 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9683 Input - SourceOffset;
9684 // We have to swap the uses in our half mask in one sweep.
9685 for (int &M : HalfMask)
9686 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9688 else if (M == Input)
9689 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9691 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9692 Input - SourceOffset &&
9693 "Previous placement doesn't match!");
9695 // Note that this correctly re-maps both when we do a swap and when
9696 // we observe the other side of the swap above. We rely on that to
9697 // avoid swapping the members of the input list directly.
9698 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9701 // Map the input's dword into the correct half.
9702 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
9703 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9705 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9707 "Previous placement doesn't match!");
9710 // And just directly shift any other-half mask elements to be same-half
9711 // as we will have mirrored the dword containing the element into the
9712 // same position within that half.
9713 for (int &M : HalfMask)
9714 if (M >= SourceOffset && M < SourceOffset + 4) {
9715 M = M - SourceOffset + DestOffset;
9716 assert(M >= 0 && "This should never wrap below zero!");
9721 // Ensure we have the input in a viable dword of its current half. This
9722 // is particularly tricky because the original position may be clobbered
9723 // by inputs being moved and *staying* in that half.
9724 if (IncomingInputs.size() == 1) {
9725 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9726 int InputFixed = std::find(std::begin(SourceHalfMask),
9727 std::end(SourceHalfMask), -1) -
9728 std::begin(SourceHalfMask) + SourceOffset;
9729 SourceHalfMask[InputFixed - SourceOffset] =
9730 IncomingInputs[0] - SourceOffset;
9731 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9733 IncomingInputs[0] = InputFixed;
9735 } else if (IncomingInputs.size() == 2) {
9736 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9737 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9738 // We have two non-adjacent or clobbered inputs we need to extract from
9739 // the source half. To do this, we need to map them into some adjacent
9740 // dword slot in the source mask.
9741 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9742 IncomingInputs[1] - SourceOffset};
9744 // If there is a free slot in the source half mask adjacent to one of
9745 // the inputs, place the other input in it. We use (Index XOR 1) to
9746 // compute an adjacent index.
9747 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9748 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
9749 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9750 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9751 InputsFixed[1] = InputsFixed[0] ^ 1;
9752 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9753 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
9754 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9755 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9756 InputsFixed[0] = InputsFixed[1] ^ 1;
9757 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
9758 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
9759 // The two inputs are in the same DWord but it is clobbered and the
9760 // adjacent DWord isn't used at all. Move both inputs to the free
9762 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9763 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9764 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9765 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9767 // The only way we hit this point is if there is no clobbering
9768 // (because there are no off-half inputs to this half) and there is no
9769 // free slot adjacent to one of the inputs. In this case, we have to
9770 // swap an input with a non-input.
9771 for (int i = 0; i < 4; ++i)
9772 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
9773 "We can't handle any clobbers here!");
9774 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9775 "Cannot have adjacent inputs here!");
9777 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9778 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9780 // We also have to update the final source mask in this case because
9781 // it may need to undo the above swap.
9782 for (int &M : FinalSourceHalfMask)
9783 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9784 M = InputsFixed[1] + SourceOffset;
9785 else if (M == InputsFixed[1] + SourceOffset)
9786 M = (InputsFixed[0] ^ 1) + SourceOffset;
9788 InputsFixed[1] = InputsFixed[0] ^ 1;
9791 // Point everything at the fixed inputs.
9792 for (int &M : HalfMask)
9793 if (M == IncomingInputs[0])
9794 M = InputsFixed[0] + SourceOffset;
9795 else if (M == IncomingInputs[1])
9796 M = InputsFixed[1] + SourceOffset;
9798 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9799 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9802 llvm_unreachable("Unhandled input size!");
9805 // Now hoist the DWord down to the right half.
9806 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
9807 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
9808 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9809 for (int &M : HalfMask)
9810 for (int Input : IncomingInputs)
9812 M = FreeDWord * 2 + Input % 2;
9814 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9815 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9816 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9817 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9819 // Now enact all the shuffles we've computed to move the inputs into their
9821 if (!isNoopShuffleMask(PSHUFLMask))
9822 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
9823 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
9824 if (!isNoopShuffleMask(PSHUFHMask))
9825 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
9826 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
9827 if (!isNoopShuffleMask(PSHUFDMask))
9830 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
9831 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
9833 // At this point, each half should contain all its inputs, and we can then
9834 // just shuffle them into their final position.
9835 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
9836 "Failed to lift all the high half inputs to the low mask!");
9837 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
9838 "Failed to lift all the low half inputs to the high mask!");
9840 // Do a half shuffle for the low mask.
9841 if (!isNoopShuffleMask(LoMask))
9842 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
9843 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
9845 // Do a half shuffle with the high mask after shifting its values down.
9846 for (int &M : HiMask)
9849 if (!isNoopShuffleMask(HiMask))
9850 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
9851 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
9856 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
9857 /// blend if only one input is used.
9858 static SDValue lowerVectorShuffleAsBlendOfPSHUFBs(
9859 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
9860 SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
9861 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9867 int Size = Mask.size();
9868 int Scale = 16 / Size;
9869 for (int i = 0; i < 16; ++i) {
9870 if (Mask[i / Scale] < 0) {
9871 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9873 const int ZeroMask = 0x80;
9874 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
9876 int V2Idx = Mask[i / Scale] < Size
9878 : (Mask[i / Scale] - Size) * Scale + i % Scale;
9879 if (Zeroable[i / Scale])
9880 V1Idx = V2Idx = ZeroMask;
9881 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
9882 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
9883 V1InUse |= (ZeroMask != V1Idx);
9884 V2InUse |= (ZeroMask != V2Idx);
9889 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9890 DAG.getBitcast(MVT::v16i8, V1),
9891 DAG.getBuildVector(MVT::v16i8, DL, V1Mask));
9893 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9894 DAG.getBitcast(MVT::v16i8, V2),
9895 DAG.getBuildVector(MVT::v16i8, DL, V2Mask));
9897 // If we need shuffled inputs from both, blend the two.
9899 if (V1InUse && V2InUse)
9900 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9902 V = V1InUse ? V1 : V2;
9904 // Cast the result back to the correct type.
9905 return DAG.getBitcast(VT, V);
9908 /// \brief Generic lowering of 8-lane i16 shuffles.
9910 /// This handles both single-input shuffles and combined shuffle/blends with
9911 /// two inputs. The single input shuffles are immediately delegated to
9912 /// a dedicated lowering routine.
9914 /// The blends are lowered in one of three fundamental ways. If there are few
9915 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9916 /// of the input is significantly cheaper when lowered as an interleaving of
9917 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9918 /// halves of the inputs separately (making them have relatively few inputs)
9919 /// and then concatenate them.
9920 static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
9921 SDValue V1, SDValue V2,
9922 const X86Subtarget &Subtarget,
9923 SelectionDAG &DAG) {
9924 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9925 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9926 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9928 // Whenever we can lower this as a zext, that instruction is strictly faster
9929 // than any alternative.
9930 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9931 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9934 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
9936 if (NumV2Inputs == 0) {
9937 // Check for being able to broadcast a single element.
9938 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
9939 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9942 // Try to use shift instructions.
9943 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
9947 // Use dedicated unpack instructions for masks that match their pattern.
9949 lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
9952 // Try to use byte rotation instructions.
9953 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1,
9954 Mask, Subtarget, DAG))
9957 // Make a copy of the mask so it can be modified.
9958 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
9959 return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1,
9960 MutableMask, Subtarget,
9964 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
9965 "All single-input shuffles should be canonicalized to be V1-input "
9968 // Try to use shift instructions.
9969 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
9973 // See if we can use SSE4A Extraction / Insertion.
9974 if (Subtarget.hasSSE4A())
9975 if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask, DAG))
9978 // There are special ways we can lower some single-element blends.
9979 if (NumV2Inputs == 1)
9980 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v8i16, V1, V2,
9981 Mask, Subtarget, DAG))
9984 // We have different paths for blend lowering, but they all must use the
9985 // *exact* same predicate.
9986 bool IsBlendSupported = Subtarget.hasSSE41();
9987 if (IsBlendSupported)
9988 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9992 if (SDValue Masked =
9993 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9996 // Use dedicated unpack instructions for masks that match their pattern.
9998 lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
10001 // Try to use byte rotation instructions.
10002 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10003 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
10006 if (SDValue BitBlend =
10007 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
10010 // Try to lower by permuting the inputs into an unpack instruction.
10011 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
10015 // If we can't directly blend but can use PSHUFB, that will be better as it
10016 // can both shuffle and set up the inefficient blend.
10017 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
10018 bool V1InUse, V2InUse;
10019 return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask, DAG,
10023 // We can always bit-blend if we have to so the fallback strategy is to
10024 // decompose into single-input permutes and blends.
10025 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
10029 /// \brief Check whether a compaction lowering can be done by dropping even
10030 /// elements and compute how many times even elements must be dropped.
10032 /// This handles shuffles which take every Nth element where N is a power of
10033 /// two. Example shuffle masks:
10035 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
10036 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
10037 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
10038 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
10039 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
10040 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
10042 /// Any of these lanes can of course be undef.
10044 /// This routine only supports N <= 3.
10045 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
10048 /// \returns N above, or the number of times even elements must be dropped if
10049 /// there is such a number. Otherwise returns zero.
10050 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
10051 bool IsSingleInput) {
10052 // The modulus for the shuffle vector entries is based on whether this is
10053 // a single input or not.
10054 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
10055 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
10056 "We should only be called with masks with a power-of-2 size!");
10058 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
10060 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
10061 // and 2^3 simultaneously. This is because we may have ambiguity with
10062 // partially undef inputs.
10063 bool ViableForN[3] = {true, true, true};
10065 for (int i = 0, e = Mask.size(); i < e; ++i) {
10066 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
10071 bool IsAnyViable = false;
10072 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
10073 if (ViableForN[j]) {
10074 uint64_t N = j + 1;
10076 // The shuffle mask must be equal to (i * 2^N) % M.
10077 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
10078 IsAnyViable = true;
10080 ViableForN[j] = false;
10082 // Early exit if we exhaust the possible powers of two.
10087 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
10091 // Return 0 as there is no viable power of two.
10095 /// \brief Generic lowering of v16i8 shuffles.
10097 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
10098 /// detect any complexity reducing interleaving. If that doesn't help, it uses
10099 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
10100 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
10102 static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
10103 SDValue V1, SDValue V2,
10104 const X86Subtarget &Subtarget,
10105 SelectionDAG &DAG) {
10106 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
10107 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
10108 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10110 // Try to use shift instructions.
10111 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
10115 // Try to use byte rotation instructions.
10116 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10117 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
10120 // Try to use a zext lowering.
10121 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
10122 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
10125 // See if we can use SSE4A Extraction / Insertion.
10126 if (Subtarget.hasSSE4A())
10127 if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask, DAG))
10130 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
10132 // For single-input shuffles, there are some nicer lowering tricks we can use.
10133 if (NumV2Elements == 0) {
10134 // Check for being able to broadcast a single element.
10135 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
10136 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
10139 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
10140 // Notably, this handles splat and partial-splat shuffles more efficiently.
10141 // However, it only makes sense if the pre-duplication shuffle simplifies
10142 // things significantly. Currently, this means we need to be able to
10143 // express the pre-duplication shuffle as an i16 shuffle.
10145 // FIXME: We should check for other patterns which can be widened into an
10146 // i16 shuffle as well.
10147 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
10148 for (int i = 0; i < 16; i += 2)
10149 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
10154 auto tryToWidenViaDuplication = [&]() -> SDValue {
10155 if (!canWidenViaDuplication(Mask))
10157 SmallVector<int, 4> LoInputs;
10158 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
10159 [](int M) { return M >= 0 && M < 8; });
10160 std::sort(LoInputs.begin(), LoInputs.end());
10161 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
10163 SmallVector<int, 4> HiInputs;
10164 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
10165 [](int M) { return M >= 8; });
10166 std::sort(HiInputs.begin(), HiInputs.end());
10167 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
10170 bool TargetLo = LoInputs.size() >= HiInputs.size();
10171 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
10172 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
10174 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
10175 SmallDenseMap<int, int, 8> LaneMap;
10176 for (int I : InPlaceInputs) {
10177 PreDupI16Shuffle[I/2] = I/2;
10180 int j = TargetLo ? 0 : 4, je = j + 4;
10181 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
10182 // Check if j is already a shuffle of this input. This happens when
10183 // there are two adjacent bytes after we move the low one.
10184 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
10185 // If we haven't yet mapped the input, search for a slot into which
10187 while (j < je && PreDupI16Shuffle[j] >= 0)
10191 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
10194 // Map this input with the i16 shuffle.
10195 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
10198 // Update the lane map based on the mapping we ended up with.
10199 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
10201 V1 = DAG.getBitcast(
10203 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
10204 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
10206 // Unpack the bytes to form the i16s that will be shuffled into place.
10207 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
10208 MVT::v16i8, V1, V1);
10210 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10211 for (int i = 0; i < 16; ++i)
10212 if (Mask[i] >= 0) {
10213 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
10214 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
10215 if (PostDupI16Shuffle[i / 2] < 0)
10216 PostDupI16Shuffle[i / 2] = MappedMask;
10218 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
10219 "Conflicting entrties in the original shuffle!");
10221 return DAG.getBitcast(
10223 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
10224 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
10226 if (SDValue V = tryToWidenViaDuplication())
10230 if (SDValue Masked =
10231 lowerVectorShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask, DAG))
10234 // Use dedicated unpack instructions for masks that match their pattern.
10236 lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
10239 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
10240 // with PSHUFB. It is important to do this before we attempt to generate any
10241 // blends but after all of the single-input lowerings. If the single input
10242 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
10243 // want to preserve that and we can DAG combine any longer sequences into
10244 // a PSHUFB in the end. But once we start blending from multiple inputs,
10245 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
10246 // and there are *very* few patterns that would actually be faster than the
10247 // PSHUFB approach because of its ability to zero lanes.
10249 // FIXME: The only exceptions to the above are blends which are exact
10250 // interleavings with direct instructions supporting them. We currently don't
10251 // handle those well here.
10252 if (Subtarget.hasSSSE3()) {
10253 bool V1InUse = false;
10254 bool V2InUse = false;
10256 SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs(
10257 DL, MVT::v16i8, V1, V2, Mask, DAG, V1InUse, V2InUse);
10259 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
10260 // do so. This avoids using them to handle blends-with-zero which is
10261 // important as a single pshufb is significantly faster for that.
10262 if (V1InUse && V2InUse) {
10263 if (Subtarget.hasSSE41())
10264 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
10265 Mask, Subtarget, DAG))
10268 // We can use an unpack to do the blending rather than an or in some
10269 // cases. Even though the or may be (very minorly) more efficient, we
10270 // preference this lowering because there are common cases where part of
10271 // the complexity of the shuffles goes away when we do the final blend as
10273 // FIXME: It might be worth trying to detect if the unpack-feeding
10274 // shuffles will both be pshufb, in which case we shouldn't bother with
10276 if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
10277 DL, MVT::v16i8, V1, V2, Mask, DAG))
10284 // There are special ways we can lower some single-element blends.
10285 if (NumV2Elements == 1)
10286 if (SDValue V = lowerVectorShuffleAsElementInsertion(DL, MVT::v16i8, V1, V2,
10287 Mask, Subtarget, DAG))
10290 if (SDValue BitBlend =
10291 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
10294 // Check whether a compaction lowering can be done. This handles shuffles
10295 // which take every Nth element for some even N. See the helper function for
10298 // We special case these as they can be particularly efficiently handled with
10299 // the PACKUSB instruction on x86 and they show up in common patterns of
10300 // rearranging bytes to truncate wide elements.
10301 bool IsSingleInput = V2.isUndef();
10302 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
10303 // NumEvenDrops is the power of two stride of the elements. Another way of
10304 // thinking about it is that we need to drop the even elements this many
10305 // times to get the original input.
10307 // First we need to zero all the dropped bytes.
10308 assert(NumEvenDrops <= 3 &&
10309 "No support for dropping even elements more than 3 times.");
10310 // We use the mask type to pick which bytes are preserved based on how many
10311 // elements are dropped.
10312 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
10313 SDValue ByteClearMask = DAG.getBitcast(
10314 MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
10315 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
10316 if (!IsSingleInput)
10317 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
10319 // Now pack things back together.
10320 V1 = DAG.getBitcast(MVT::v8i16, V1);
10321 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
10322 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10323 for (int i = 1; i < NumEvenDrops; ++i) {
10324 Result = DAG.getBitcast(MVT::v8i16, Result);
10325 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10331 // Handle multi-input cases by blending single-input shuffles.
10332 if (NumV2Elements > 0)
10333 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
10336 // The fallback path for single-input shuffles widens this into two v8i16
10337 // vectors with unpacks, shuffles those, and then pulls them back together
10341 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10342 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10343 for (int i = 0; i < 16; ++i)
10345 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
10347 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10349 SDValue VLoHalf, VHiHalf;
10350 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10351 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10353 if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
10354 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10355 std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
10356 [](int M) { return M >= 0 && M % 2 == 1; })) {
10357 // Use a mask to drop the high bytes.
10358 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
10359 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
10360 DAG.getConstant(0x00FF, DL, MVT::v8i16));
10362 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
10363 VHiHalf = DAG.getUNDEF(MVT::v8i16);
10365 // Squash the masks to point directly into VLoHalf.
10366 for (int &M : LoBlendMask)
10369 for (int &M : HiBlendMask)
10373 // Otherwise just unpack the low half of V into VLoHalf and the high half into
10374 // VHiHalf so that we can blend them as i16s.
10375 VLoHalf = DAG.getBitcast(
10376 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10377 VHiHalf = DAG.getBitcast(
10378 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10381 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
10382 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
10384 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10387 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10389 /// This routine breaks down the specific type of 128-bit shuffle and
10390 /// dispatches to the lowering routines accordingly.
10391 static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
10392 MVT VT, SDValue V1, SDValue V2,
10393 const X86Subtarget &Subtarget,
10394 SelectionDAG &DAG) {
10395 switch (VT.SimpleTy) {
10397 return lowerV2I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
10399 return lowerV2F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
10401 return lowerV4I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
10403 return lowerV4F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
10405 return lowerV8I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
10407 return lowerV16I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
10410 llvm_unreachable("Unimplemented!");
10414 /// \brief Helper function to test whether a shuffle mask could be
10415 /// simplified by widening the elements being shuffled.
10417 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10418 /// leaves it in an unspecified state.
10420 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10421 /// shuffle masks. The latter have the special property of a '-2' representing
10422 /// a zero-ed lane of a vector.
10423 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10424 SmallVectorImpl<int> &WidenedMask) {
10425 WidenedMask.assign(Mask.size() / 2, 0);
10426 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10427 // If both elements are undef, its trivial.
10428 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10429 WidenedMask[i/2] = SM_SentinelUndef;
10433 // Check for an undef mask and a mask value properly aligned to fit with
10434 // a pair of values. If we find such a case, use the non-undef mask's value.
10435 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10436 WidenedMask[i/2] = Mask[i + 1] / 2;
10439 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10440 WidenedMask[i/2] = Mask[i] / 2;
10444 // When zeroing, we need to spread the zeroing across both lanes to widen.
10445 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10446 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10447 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10448 WidenedMask[i/2] = SM_SentinelZero;
10454 // Finally check if the two mask values are adjacent and aligned with
10456 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10457 WidenedMask[i/2] = Mask[i] / 2;
10461 // Otherwise we can't safely widen the elements used in this shuffle.
10464 assert(WidenedMask.size() == Mask.size() / 2 &&
10465 "Incorrect size of mask after widening the elements!");
10470 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10472 /// This routine just extracts two subvectors, shuffles them independently, and
10473 /// then concatenates them back together. This should work effectively with all
10474 /// AVX vector shuffle types.
10475 static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
10476 SDValue V2, ArrayRef<int> Mask,
10477 SelectionDAG &DAG) {
10478 assert(VT.getSizeInBits() >= 256 &&
10479 "Only for 256-bit or wider vector shuffles!");
10480 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10481 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10483 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10484 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10486 int NumElements = VT.getVectorNumElements();
10487 int SplitNumElements = NumElements / 2;
10488 MVT ScalarVT = VT.getVectorElementType();
10489 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10491 // Rather than splitting build-vectors, just build two narrower build
10492 // vectors. This helps shuffling with splats and zeros.
10493 auto SplitVector = [&](SDValue V) {
10494 V = peekThroughBitcasts(V);
10496 MVT OrigVT = V.getSimpleValueType();
10497 int OrigNumElements = OrigVT.getVectorNumElements();
10498 int OrigSplitNumElements = OrigNumElements / 2;
10499 MVT OrigScalarVT = OrigVT.getVectorElementType();
10500 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10504 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10506 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10507 DAG.getIntPtrConstant(0, DL));
10508 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10509 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
10512 SmallVector<SDValue, 16> LoOps, HiOps;
10513 for (int i = 0; i < OrigSplitNumElements; ++i) {
10514 LoOps.push_back(BV->getOperand(i));
10515 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10517 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
10518 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
10520 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
10521 DAG.getBitcast(SplitVT, HiV));
10524 SDValue LoV1, HiV1, LoV2, HiV2;
10525 std::tie(LoV1, HiV1) = SplitVector(V1);
10526 std::tie(LoV2, HiV2) = SplitVector(V2);
10528 // Now create two 4-way blends of these half-width vectors.
10529 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10530 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10531 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
10532 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
10533 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
10534 for (int i = 0; i < SplitNumElements; ++i) {
10535 int M = HalfMask[i];
10536 if (M >= NumElements) {
10537 if (M >= NumElements + SplitNumElements)
10541 V2BlendMask[i] = M - NumElements;
10542 BlendMask[i] = SplitNumElements + i;
10543 } else if (M >= 0) {
10544 if (M >= SplitNumElements)
10548 V1BlendMask[i] = M;
10553 // Because the lowering happens after all combining takes place, we need to
10554 // manually combine these blend masks as much as possible so that we create
10555 // a minimal number of high-level vector shuffle nodes.
10557 // First try just blending the halves of V1 or V2.
10558 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10559 return DAG.getUNDEF(SplitVT);
10560 if (!UseLoV2 && !UseHiV2)
10561 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10562 if (!UseLoV1 && !UseHiV1)
10563 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10565 SDValue V1Blend, V2Blend;
10566 if (UseLoV1 && UseHiV1) {
10568 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10570 // We only use half of V1 so map the usage down into the final blend mask.
10571 V1Blend = UseLoV1 ? LoV1 : HiV1;
10572 for (int i = 0; i < SplitNumElements; ++i)
10573 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10574 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10576 if (UseLoV2 && UseHiV2) {
10578 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10580 // We only use half of V2 so map the usage down into the final blend mask.
10581 V2Blend = UseLoV2 ? LoV2 : HiV2;
10582 for (int i = 0; i < SplitNumElements; ++i)
10583 if (BlendMask[i] >= SplitNumElements)
10584 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10586 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10588 SDValue Lo = HalfBlend(LoMask);
10589 SDValue Hi = HalfBlend(HiMask);
10590 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10593 /// \brief Either split a vector in halves or decompose the shuffles and the
10596 /// This is provided as a good fallback for many lowerings of non-single-input
10597 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10598 /// between splitting the shuffle into 128-bit components and stitching those
10599 /// back together vs. extracting the single-input shuffles and blending those
10601 static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT,
10602 SDValue V1, SDValue V2,
10603 ArrayRef<int> Mask,
10604 SelectionDAG &DAG) {
10605 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
10606 "shuffles as it could then recurse on itself.");
10607 int Size = Mask.size();
10609 // If this can be modeled as a broadcast of two elements followed by a blend,
10610 // prefer that lowering. This is especially important because broadcasts can
10611 // often fold with memory operands.
10612 auto DoBothBroadcast = [&] {
10613 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10616 if (V2BroadcastIdx < 0)
10617 V2BroadcastIdx = M - Size;
10618 else if (M - Size != V2BroadcastIdx)
10620 } else if (M >= 0) {
10621 if (V1BroadcastIdx < 0)
10622 V1BroadcastIdx = M;
10623 else if (M != V1BroadcastIdx)
10628 if (DoBothBroadcast())
10629 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10632 // If the inputs all stem from a single 128-bit lane of each input, then we
10633 // split them rather than blending because the split will decompose to
10634 // unusually few instructions.
10635 int LaneCount = VT.getSizeInBits() / 128;
10636 int LaneSize = Size / LaneCount;
10637 SmallBitVector LaneInputs[2];
10638 LaneInputs[0].resize(LaneCount, false);
10639 LaneInputs[1].resize(LaneCount, false);
10640 for (int i = 0; i < Size; ++i)
10642 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10643 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10644 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10646 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10647 // that the decomposed single-input shuffles don't end up here.
10648 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10651 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10652 /// a permutation and blend of those lanes.
10654 /// This essentially blends the out-of-lane inputs to each lane into the lane
10655 /// from a permuted copy of the vector. This lowering strategy results in four
10656 /// instructions in the worst case for a single-input cross lane shuffle which
10657 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10658 /// of. Special cases for each particular shuffle pattern should be handled
10659 /// prior to trying this lowering.
10660 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT,
10661 SDValue V1, SDValue V2,
10662 ArrayRef<int> Mask,
10663 SelectionDAG &DAG) {
10664 // FIXME: This should probably be generalized for 512-bit vectors as well.
10665 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
10666 int Size = Mask.size();
10667 int LaneSize = Size / 2;
10669 // If there are only inputs from one 128-bit lane, splitting will in fact be
10670 // less expensive. The flags track whether the given lane contains an element
10671 // that crosses to another lane.
10672 bool LaneCrossing[2] = {false, false};
10673 for (int i = 0; i < Size; ++i)
10674 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10675 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10676 if (!LaneCrossing[0] || !LaneCrossing[1])
10677 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10679 assert(V2.isUndef() &&
10680 "This last part of this routine only works on single input shuffles");
10682 SmallVector<int, 32> FlippedBlendMask(Size);
10683 for (int i = 0; i < Size; ++i)
10684 FlippedBlendMask[i] =
10685 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10687 : Mask[i] % LaneSize +
10688 (i / LaneSize) * LaneSize + Size);
10690 // Flip the vector, and blend the results which should now be in-lane. The
10691 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10692 // 5 for the high source. The value 3 selects the high half of source 2 and
10693 // the value 2 selects the low half of source 2. We only use source 2 to
10694 // allow folding it into a memory operand.
10695 unsigned PERMMask = 3 | 2 << 4;
10696 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10697 V1, DAG.getConstant(PERMMask, DL, MVT::i8));
10698 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10701 /// \brief Handle lowering 2-lane 128-bit shuffles.
10702 static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
10703 SDValue V2, ArrayRef<int> Mask,
10704 const X86Subtarget &Subtarget,
10705 SelectionDAG &DAG) {
10706 // TODO: If minimizing size and one of the inputs is a zero vector and the
10707 // the zero vector has only one use, we could use a VPERM2X128 to save the
10708 // instruction bytes needed to explicitly generate the zero vector.
10710 // Blends are faster and handle all the non-lane-crossing cases.
10711 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10715 bool IsV1Zero = ISD::isBuildVectorAllZeros(V1.getNode());
10716 bool IsV2Zero = ISD::isBuildVectorAllZeros(V2.getNode());
10718 // If either input operand is a zero vector, use VPERM2X128 because its mask
10719 // allows us to replace the zero input with an implicit zero.
10720 if (!IsV1Zero && !IsV2Zero) {
10721 // Check for patterns which can be matched with a single insert of a 128-bit
10723 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
10724 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
10725 // With AVX2 we should use VPERMQ/VPERMPD to allow memory folding.
10726 if (Subtarget.hasAVX2() && V2.isUndef())
10729 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10730 VT.getVectorNumElements() / 2);
10731 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10732 DAG.getIntPtrConstant(0, DL));
10733 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10734 OnlyUsesV1 ? V1 : V2,
10735 DAG.getIntPtrConstant(0, DL));
10736 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10740 // Otherwise form a 128-bit permutation. After accounting for undefs,
10741 // convert the 64-bit shuffle mask selection values into 128-bit
10742 // selection bits by dividing the indexes by 2 and shifting into positions
10743 // defined by a vperm2*128 instruction's immediate control byte.
10745 // The immediate permute control byte looks like this:
10746 // [1:0] - select 128 bits from sources for low half of destination
10748 // [3] - zero low half of destination
10749 // [5:4] - select 128 bits from sources for high half of destination
10751 // [7] - zero high half of destination
10753 int MaskLO = Mask[0];
10754 if (MaskLO == SM_SentinelUndef)
10755 MaskLO = Mask[1] == SM_SentinelUndef ? 0 : Mask[1];
10757 int MaskHI = Mask[2];
10758 if (MaskHI == SM_SentinelUndef)
10759 MaskHI = Mask[3] == SM_SentinelUndef ? 0 : Mask[3];
10761 unsigned PermMask = MaskLO / 2 | (MaskHI / 2) << 4;
10763 // If either input is a zero vector, replace it with an undef input.
10764 // Shuffle mask values < 4 are selecting elements of V1.
10765 // Shuffle mask values >= 4 are selecting elements of V2.
10766 // Adjust each half of the permute mask by clearing the half that was
10767 // selecting the zero vector and setting the zero mask bit.
10769 V1 = DAG.getUNDEF(VT);
10771 PermMask = (PermMask & 0xf0) | 0x08;
10773 PermMask = (PermMask & 0x0f) | 0x80;
10776 V2 = DAG.getUNDEF(VT);
10778 PermMask = (PermMask & 0xf0) | 0x08;
10780 PermMask = (PermMask & 0x0f) | 0x80;
10783 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10784 DAG.getConstant(PermMask, DL, MVT::i8));
10787 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10788 /// shuffling each lane.
10790 /// This will only succeed when the result of fixing the 128-bit lanes results
10791 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10792 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10793 /// the lane crosses early and then use simpler shuffles within each lane.
10795 /// FIXME: It might be worthwhile at some point to support this without
10796 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10797 /// in x86 only floating point has interesting non-repeating shuffles, and even
10798 /// those are still *marginally* more expensive.
10799 static SDValue lowerVectorShuffleByMerging128BitLanes(
10800 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10801 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
10802 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
10804 int Size = Mask.size();
10805 int LaneSize = 128 / VT.getScalarSizeInBits();
10806 int NumLanes = Size / LaneSize;
10807 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10809 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10810 // check whether the in-128-bit lane shuffles share a repeating pattern.
10811 SmallVector<int, 4> Lanes((unsigned)NumLanes, -1);
10812 SmallVector<int, 4> InLaneMask((unsigned)LaneSize, -1);
10813 for (int i = 0; i < Size; ++i) {
10817 int j = i / LaneSize;
10819 if (Lanes[j] < 0) {
10820 // First entry we've seen for this lane.
10821 Lanes[j] = Mask[i] / LaneSize;
10822 } else if (Lanes[j] != Mask[i] / LaneSize) {
10823 // This doesn't match the lane selected previously!
10827 // Check that within each lane we have a consistent shuffle mask.
10828 int k = i % LaneSize;
10829 if (InLaneMask[k] < 0) {
10830 InLaneMask[k] = Mask[i] % LaneSize;
10831 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10832 // This doesn't fit a repeating in-lane mask.
10837 // First shuffle the lanes into place.
10838 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10839 VT.getSizeInBits() / 64);
10840 SmallVector<int, 8> LaneMask((unsigned)NumLanes * 2, -1);
10841 for (int i = 0; i < NumLanes; ++i)
10842 if (Lanes[i] >= 0) {
10843 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10844 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10847 V1 = DAG.getBitcast(LaneVT, V1);
10848 V2 = DAG.getBitcast(LaneVT, V2);
10849 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10851 // Cast it back to the type we actually want.
10852 LaneShuffle = DAG.getBitcast(VT, LaneShuffle);
10854 // Now do a simple shuffle that isn't lane crossing.
10855 SmallVector<int, 8> NewMask((unsigned)Size, -1);
10856 for (int i = 0; i < Size; ++i)
10858 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10859 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10860 "Must not introduce lane crosses at this point!");
10862 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10865 /// Lower shuffles where an entire half of a 256-bit vector is UNDEF.
10866 /// This allows for fast cases such as subvector extraction/insertion
10867 /// or shuffling smaller vector types which can lower more efficiently.
10868 static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT,
10869 SDValue V1, SDValue V2,
10870 ArrayRef<int> Mask,
10871 const X86Subtarget &Subtarget,
10872 SelectionDAG &DAG) {
10873 assert(VT.is256BitVector() && "Expected 256-bit vector");
10875 unsigned NumElts = VT.getVectorNumElements();
10876 unsigned HalfNumElts = NumElts / 2;
10877 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
10879 bool UndefLower = isUndefInRange(Mask, 0, HalfNumElts);
10880 bool UndefUpper = isUndefInRange(Mask, HalfNumElts, HalfNumElts);
10881 if (!UndefLower && !UndefUpper)
10884 // Upper half is undef and lower half is whole upper subvector.
10885 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
10887 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
10888 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
10889 DAG.getIntPtrConstant(HalfNumElts, DL));
10890 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
10891 DAG.getIntPtrConstant(0, DL));
10894 // Lower half is undef and upper half is whole lower subvector.
10895 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
10897 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
10898 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
10899 DAG.getIntPtrConstant(0, DL));
10900 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
10901 DAG.getIntPtrConstant(HalfNumElts, DL));
10904 // If the shuffle only uses two of the four halves of the input operands,
10905 // then extract them and perform the 'half' shuffle at half width.
10906 // e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
10907 int HalfIdx1 = -1, HalfIdx2 = -1;
10908 SmallVector<int, 8> HalfMask(HalfNumElts);
10909 unsigned Offset = UndefLower ? HalfNumElts : 0;
10910 for (unsigned i = 0; i != HalfNumElts; ++i) {
10911 int M = Mask[i + Offset];
10917 // Determine which of the 4 half vectors this element is from.
10918 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
10919 int HalfIdx = M / HalfNumElts;
10921 // Determine the element index into its half vector source.
10922 int HalfElt = M % HalfNumElts;
10924 // We can shuffle with up to 2 half vectors, set the new 'half'
10925 // shuffle mask accordingly.
10926 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
10927 HalfMask[i] = HalfElt;
10928 HalfIdx1 = HalfIdx;
10931 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
10932 HalfMask[i] = HalfElt + HalfNumElts;
10933 HalfIdx2 = HalfIdx;
10937 // Too many half vectors referenced.
10940 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
10942 // Only shuffle the halves of the inputs when useful.
10943 int NumLowerHalves =
10944 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
10945 int NumUpperHalves =
10946 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
10948 // uuuuXXXX - don't extract uppers just to insert again.
10949 if (UndefLower && NumUpperHalves != 0)
10952 // XXXXuuuu - don't extract both uppers, instead shuffle and then extract.
10953 if (UndefUpper && NumUpperHalves == 2)
10956 // AVX2 - XXXXuuuu - always extract lowers.
10957 if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) {
10958 // AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
10959 if (VT == MVT::v4f64 || VT == MVT::v4i64)
10961 // AVX2 supports variable 32-bit element cross-lane shuffles.
10962 if (VT == MVT::v8f32 || VT == MVT::v8i32) {
10963 // XXXXuuuu - don't extract lowers and uppers.
10964 if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0)
10969 auto GetHalfVector = [&](int HalfIdx) {
10971 return DAG.getUNDEF(HalfVT);
10972 SDValue V = (HalfIdx < 2 ? V1 : V2);
10973 HalfIdx = (HalfIdx % 2) * HalfNumElts;
10974 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
10975 DAG.getIntPtrConstant(HalfIdx, DL));
10978 SDValue Half1 = GetHalfVector(HalfIdx1);
10979 SDValue Half2 = GetHalfVector(HalfIdx2);
10980 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
10981 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
10982 DAG.getIntPtrConstant(Offset, DL));
10985 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10988 /// This returns true if the elements from a particular input are already in the
10989 /// slot required by the given mask and require no permutation.
10990 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10991 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10992 int Size = Mask.size();
10993 for (int i = 0; i < Size; ++i)
10994 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
11000 /// Handle case where shuffle sources are coming from the same 128-bit lane and
11001 /// every lane can be represented as the same repeating mask - allowing us to
11002 /// shuffle the sources with the repeating shuffle and then permute the result
11003 /// to the destination lanes.
11004 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
11005 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11006 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11007 int NumElts = VT.getVectorNumElements();
11008 int NumLanes = VT.getSizeInBits() / 128;
11009 int NumLaneElts = NumElts / NumLanes;
11011 // On AVX2 we may be able to just shuffle the lowest elements and then
11012 // broadcast the result.
11013 if (Subtarget.hasAVX2()) {
11014 for (unsigned BroadcastSize : {16, 32, 64}) {
11015 if (BroadcastSize <= VT.getScalarSizeInBits())
11017 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
11019 // Attempt to match a repeating pattern every NumBroadcastElts,
11020 // accounting for UNDEFs but only references the lowest 128-bit
11021 // lane of the inputs.
11022 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
11023 for (int i = 0; i != NumElts; i += NumBroadcastElts)
11024 for (int j = 0; j != NumBroadcastElts; ++j) {
11025 int M = Mask[i + j];
11028 int &R = RepeatMask[j];
11029 if (0 != ((M % NumElts) / NumLaneElts))
11031 if (0 <= R && R != M)
11038 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
11039 if (!FindRepeatingBroadcastMask(RepeatMask))
11042 // Shuffle the (lowest) repeated elements in place for broadcast.
11043 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
11045 // Shuffle the actual broadcast.
11046 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
11047 for (int i = 0; i != NumElts; i += NumBroadcastElts)
11048 for (int j = 0; j != NumBroadcastElts; ++j)
11049 BroadcastMask[i + j] = j;
11050 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
11055 // Bail if the shuffle mask doesn't cross 128-bit lanes.
11056 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
11059 // Bail if we already have a repeated lane shuffle mask.
11060 SmallVector<int, 8> RepeatedShuffleMask;
11061 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
11064 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
11065 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
11066 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
11067 int NumSubLanes = NumLanes * SubLaneScale;
11068 int NumSubLaneElts = NumLaneElts / SubLaneScale;
11070 // Check that all the sources are coming from the same lane and see if we can
11071 // form a repeating shuffle mask (local to each sub-lane). At the same time,
11072 // determine the source sub-lane for each destination sub-lane.
11073 int TopSrcSubLane = -1;
11074 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
11075 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
11076 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
11077 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
11079 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
11080 // Extract the sub-lane mask, check that it all comes from the same lane
11081 // and normalize the mask entries to come from the first lane.
11083 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
11084 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
11085 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
11088 int Lane = (M % NumElts) / NumLaneElts;
11089 if ((0 <= SrcLane) && (SrcLane != Lane))
11092 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
11093 SubLaneMask[Elt] = LocalM;
11096 // Whole sub-lane is UNDEF.
11100 // Attempt to match against the candidate repeated sub-lane masks.
11101 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
11102 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
11103 for (int i = 0; i != NumSubLaneElts; ++i) {
11104 if (M1[i] < 0 || M2[i] < 0)
11106 if (M1[i] != M2[i])
11112 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
11113 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
11116 // Merge the sub-lane mask into the matching repeated sub-lane mask.
11117 for (int i = 0; i != NumSubLaneElts; ++i) {
11118 int M = SubLaneMask[i];
11121 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
11122 "Unexpected mask element");
11123 RepeatedSubLaneMask[i] = M;
11126 // Track the top most source sub-lane - by setting the remaining to UNDEF
11127 // we can greatly simplify shuffle matching.
11128 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
11129 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
11130 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
11134 // Bail if we failed to find a matching repeated sub-lane mask.
11135 if (Dst2SrcSubLanes[DstSubLane] < 0)
11138 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
11139 "Unexpected source lane");
11141 // Create a repeating shuffle mask for the entire vector.
11142 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
11143 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
11144 int Lane = SubLane / SubLaneScale;
11145 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
11146 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
11147 int M = RepeatedSubLaneMask[Elt];
11150 int Idx = (SubLane * NumSubLaneElts) + Elt;
11151 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
11154 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
11156 // Shuffle each source sub-lane to its destination.
11157 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
11158 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
11159 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
11160 if (SrcSubLane < 0)
11162 for (int j = 0; j != NumSubLaneElts; ++j)
11163 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
11166 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
11170 static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
11171 ArrayRef<int> Mask, SDValue V1,
11172 SDValue V2, SelectionDAG &DAG) {
11174 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
11175 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
11176 assert(VT.getScalarSizeInBits() == 64 && "Unexpected data type for VSHUFPD");
11177 int NumElts = VT.getVectorNumElements();
11178 bool ShufpdMask = true;
11179 bool CommutableMask = true;
11180 unsigned Immediate = 0;
11181 for (int i = 0; i < NumElts; ++i) {
11184 int Val = (i & 6) + NumElts * (i & 1);
11185 int CommutVal = (i & 0xe) + NumElts * ((i & 1)^1);
11186 if (Mask[i] < Val || Mask[i] > Val + 1)
11187 ShufpdMask = false;
11188 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
11189 CommutableMask = false;
11190 Immediate |= (Mask[i] % 2) << i;
11193 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
11194 DAG.getConstant(Immediate, DL, MVT::i8));
11195 if (CommutableMask)
11196 return DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
11197 DAG.getConstant(Immediate, DL, MVT::i8));
11201 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
11203 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
11204 /// isn't available.
11205 static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11206 SDValue V1, SDValue V2,
11207 const X86Subtarget &Subtarget,
11208 SelectionDAG &DAG) {
11209 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
11210 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
11211 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
11213 SmallVector<int, 4> WidenedMask;
11214 if (canWidenShuffleElements(Mask, WidenedMask))
11215 if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask,
11219 if (V2.isUndef()) {
11220 // Check for being able to broadcast a single element.
11221 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
11222 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
11225 // Use low duplicate instructions for masks that match their pattern.
11226 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
11227 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
11229 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
11230 // Non-half-crossing single input shuffles can be lowered with an
11231 // interleaved permutation.
11232 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
11233 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
11234 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
11235 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
11238 // With AVX2 we have direct support for this permutation.
11239 if (Subtarget.hasAVX2())
11240 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
11241 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
11243 // Try to create an in-lane repeating shuffle mask and then shuffle the
11244 // the results into the target lanes.
11245 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
11246 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
11249 // Otherwise, fall back.
11250 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
11254 // Use dedicated unpack instructions for masks that match their pattern.
11256 lowerVectorShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
11259 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
11263 // Check if the blend happens to exactly fit that of SHUFPD.
11265 lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
11268 // Try to create an in-lane repeating shuffle mask and then shuffle the
11269 // the results into the target lanes.
11270 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
11271 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
11274 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11275 // shuffle. However, if we have AVX2 and either inputs are already in place,
11276 // we will be able to shuffle even across lanes the other input in a single
11277 // instruction so skip this pattern.
11278 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
11279 isShuffleMaskInputInPlace(1, Mask))))
11280 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11281 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
11284 // If we have AVX2 then we always want to lower with a blend because an v4 we
11285 // can fully permute the elements.
11286 if (Subtarget.hasAVX2())
11287 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
11290 // Otherwise fall back on generic lowering.
11291 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
11294 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
11296 /// This routine is only called when we have AVX2 and thus a reasonable
11297 /// instruction set for v4i64 shuffling..
11298 static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11299 SDValue V1, SDValue V2,
11300 const X86Subtarget &Subtarget,
11301 SelectionDAG &DAG) {
11302 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
11303 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
11304 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
11305 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
11307 SmallVector<int, 4> WidenedMask;
11308 if (canWidenShuffleElements(Mask, WidenedMask))
11309 if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask,
11313 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
11317 // Check for being able to broadcast a single element.
11318 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2,
11319 Mask, Subtarget, DAG))
11322 if (V2.isUndef()) {
11323 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
11324 // can use lower latency instructions that will operate on both lanes.
11325 SmallVector<int, 2> RepeatedMask;
11326 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
11327 SmallVector<int, 4> PSHUFDMask;
11328 scaleShuffleMask(2, RepeatedMask, PSHUFDMask);
11329 return DAG.getBitcast(
11331 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
11332 DAG.getBitcast(MVT::v8i32, V1),
11333 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11336 // AVX2 provides a direct instruction for permuting a single input across
11338 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
11339 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
11342 // Try to use shift instructions.
11343 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
11347 // Use dedicated unpack instructions for masks that match their pattern.
11349 lowerVectorShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
11352 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11353 // shuffle. However, if we have AVX2 and either inputs are already in place,
11354 // we will be able to shuffle even across lanes the other input in a single
11355 // instruction so skip this pattern.
11356 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
11357 isShuffleMaskInputInPlace(1, Mask))))
11358 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11359 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
11362 // Otherwise fall back on generic blend lowering.
11363 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
11367 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
11369 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
11370 /// isn't available.
11371 static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11372 SDValue V1, SDValue V2,
11373 const X86Subtarget &Subtarget,
11374 SelectionDAG &DAG) {
11375 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
11376 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
11377 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11379 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
11383 // Check for being able to broadcast a single element.
11384 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2,
11385 Mask, Subtarget, DAG))
11388 // If the shuffle mask is repeated in each 128-bit lane, we have many more
11389 // options to efficiently lower the shuffle.
11390 SmallVector<int, 4> RepeatedMask;
11391 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
11392 assert(RepeatedMask.size() == 4 &&
11393 "Repeated masks must be half the mask width!");
11395 // Use even/odd duplicate instructions for masks that match their pattern.
11396 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
11397 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
11398 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
11399 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
11402 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
11403 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
11405 // Use dedicated unpack instructions for masks that match their pattern.
11407 lowerVectorShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
11410 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
11411 // have already handled any direct blends.
11412 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
11415 // Try to create an in-lane repeating shuffle mask and then shuffle the
11416 // the results into the target lanes.
11417 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
11418 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
11421 // If we have a single input shuffle with different shuffle patterns in the
11422 // two 128-bit lanes use the variable mask to VPERMILPS.
11423 if (V2.isUndef()) {
11424 SDValue VPermMask[8];
11425 for (int i = 0; i < 8; ++i)
11426 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
11427 : DAG.getConstant(Mask[i], DL, MVT::i32);
11428 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
11429 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
11430 DAG.getBuildVector(MVT::v8i32, DL, VPermMask));
11432 if (Subtarget.hasAVX2())
11433 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
11434 DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
11436 // Otherwise, fall back.
11437 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
11441 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11443 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11444 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
11447 // If we have AVX2 then we always want to lower with a blend because at v8 we
11448 // can fully permute the elements.
11449 if (Subtarget.hasAVX2())
11450 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
11453 // Otherwise fall back on generic lowering.
11454 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
11457 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
11459 /// This routine is only called when we have AVX2 and thus a reasonable
11460 /// instruction set for v8i32 shuffling..
11461 static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11462 SDValue V1, SDValue V2,
11463 const X86Subtarget &Subtarget,
11464 SelectionDAG &DAG) {
11465 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
11466 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
11467 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11468 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
11470 // Whenever we can lower this as a zext, that instruction is strictly faster
11471 // than any alternative. It also allows us to fold memory operands into the
11472 // shuffle in many cases.
11473 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
11474 Mask, Subtarget, DAG))
11477 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
11481 // Check for being able to broadcast a single element.
11482 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2,
11483 Mask, Subtarget, DAG))
11486 // If the shuffle mask is repeated in each 128-bit lane we can use more
11487 // efficient instructions that mirror the shuffles across the two 128-bit
11489 SmallVector<int, 4> RepeatedMask;
11490 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
11491 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
11493 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
11494 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
11496 // Use dedicated unpack instructions for masks that match their pattern.
11498 lowerVectorShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
11502 // Try to use shift instructions.
11503 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
11507 // Try to use byte rotation instructions.
11508 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11509 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
11512 // Try to create an in-lane repeating shuffle mask and then shuffle the
11513 // the results into the target lanes.
11514 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
11515 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
11518 // If the shuffle patterns aren't repeated but it is a single input, directly
11519 // generate a cross-lane VPERMD instruction.
11520 if (V2.isUndef()) {
11521 SDValue VPermMask[8];
11522 for (int i = 0; i < 8; ++i)
11523 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
11524 : DAG.getConstant(Mask[i], DL, MVT::i32);
11525 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32,
11526 DAG.getBuildVector(MVT::v8i32, DL, VPermMask), V1);
11529 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11531 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11532 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
11535 // Otherwise fall back on generic blend lowering.
11536 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
11540 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
11542 /// This routine is only called when we have AVX2 and thus a reasonable
11543 /// instruction set for v16i16 shuffling..
11544 static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11545 SDValue V1, SDValue V2,
11546 const X86Subtarget &Subtarget,
11547 SelectionDAG &DAG) {
11548 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11549 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11550 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11551 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
11553 // Whenever we can lower this as a zext, that instruction is strictly faster
11554 // than any alternative. It also allows us to fold memory operands into the
11555 // shuffle in many cases.
11556 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
11557 Mask, Subtarget, DAG))
11560 // Check for being able to broadcast a single element.
11561 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2,
11562 Mask, Subtarget, DAG))
11565 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
11569 // Use dedicated unpack instructions for masks that match their pattern.
11571 lowerVectorShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
11574 // Try to use shift instructions.
11575 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
11579 // Try to use byte rotation instructions.
11580 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11581 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11584 // Try to create an in-lane repeating shuffle mask and then shuffle the
11585 // the results into the target lanes.
11586 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
11587 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11590 if (V2.isUndef()) {
11591 // There are no generalized cross-lane shuffle operations available on i16
11593 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
11594 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
11597 SmallVector<int, 8> RepeatedMask;
11598 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11599 // As this is a single-input shuffle, the repeated mask should be
11600 // a strictly valid v8i16 mask that we can pass through to the v8i16
11601 // lowering to handle even the v16 case.
11602 return lowerV8I16GeneralSingleInputVectorShuffle(
11603 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
11607 if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1,
11608 V2, Subtarget, DAG))
11611 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11613 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11614 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11617 // Otherwise fall back on generic lowering.
11618 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
11621 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
11623 /// This routine is only called when we have AVX2 and thus a reasonable
11624 /// instruction set for v32i8 shuffling..
11625 static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11626 SDValue V1, SDValue V2,
11627 const X86Subtarget &Subtarget,
11628 SelectionDAG &DAG) {
11629 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11630 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11631 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11632 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
11634 // Whenever we can lower this as a zext, that instruction is strictly faster
11635 // than any alternative. It also allows us to fold memory operands into the
11636 // shuffle in many cases.
11637 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11638 Mask, Subtarget, DAG))
11641 // Check for being able to broadcast a single element.
11642 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2,
11643 Mask, Subtarget, DAG))
11646 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11650 // Use dedicated unpack instructions for masks that match their pattern.
11652 lowerVectorShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
11655 // Try to use shift instructions.
11656 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
11660 // Try to use byte rotation instructions.
11661 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11662 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11665 // Try to create an in-lane repeating shuffle mask and then shuffle the
11666 // the results into the target lanes.
11667 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
11668 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11671 // There are no generalized cross-lane shuffle operations available on i8
11673 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11674 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask,
11677 if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1,
11678 V2, Subtarget, DAG))
11681 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11683 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11684 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11687 // Otherwise fall back on generic lowering.
11688 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11691 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11693 /// This routine either breaks down the specific type of a 256-bit x86 vector
11694 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11695 /// together based on the available instructions.
11696 static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11697 MVT VT, SDValue V1, SDValue V2,
11698 const X86Subtarget &Subtarget,
11699 SelectionDAG &DAG) {
11700 // If we have a single input to the zero element, insert that into V1 if we
11701 // can do so cheaply.
11702 int NumElts = VT.getVectorNumElements();
11703 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
11705 if (NumV2Elements == 1 && Mask[0] >= NumElts)
11706 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
11707 DL, VT, V1, V2, Mask, Subtarget, DAG))
11710 // Handle special cases where the lower or upper half is UNDEF.
11712 lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
11715 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
11716 // can check for those subtargets here and avoid much of the subtarget
11717 // querying in the per-vector-type lowering routines. With AVX1 we have
11718 // essentially *zero* ability to manipulate a 256-bit vector with integer
11719 // types. Since we'll use floating point types there eventually, just
11720 // immediately cast everything to a float and operate entirely in that domain.
11721 if (VT.isInteger() && !Subtarget.hasAVX2()) {
11722 int ElementBits = VT.getScalarSizeInBits();
11723 if (ElementBits < 32) {
11724 // No floating point type available, if we can't use the bit operations
11725 // for masking/blending then decompose into 128-bit vectors.
11726 if (SDValue V = lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, DAG))
11728 if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
11730 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11733 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11734 VT.getVectorNumElements());
11735 V1 = DAG.getBitcast(FpVT, V1);
11736 V2 = DAG.getBitcast(FpVT, V2);
11737 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11740 switch (VT.SimpleTy) {
11742 return lowerV4F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
11744 return lowerV4I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
11746 return lowerV8F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
11748 return lowerV8I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
11750 return lowerV16I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
11752 return lowerV32I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
11755 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11759 /// \brief Try to lower a vector shuffle as a 128-bit shuffles.
11760 static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
11761 ArrayRef<int> Mask, SDValue V1,
11762 SDValue V2, SelectionDAG &DAG) {
11763 assert(VT.getScalarSizeInBits() == 64 &&
11764 "Unexpected element type size for 128bit shuffle.");
11766 // To handle 256 bit vector requires VLX and most probably
11767 // function lowerV2X128VectorShuffle() is better solution.
11768 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
11770 SmallVector<int, 4> WidenedMask;
11771 if (!canWidenShuffleElements(Mask, WidenedMask))
11774 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11775 // Insure elements came from the same Op.
11776 int MaxOp1Index = VT.getVectorNumElements()/2 - 1;
11777 for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
11778 if (WidenedMask[i] == SM_SentinelZero)
11780 if (WidenedMask[i] == SM_SentinelUndef)
11783 SDValue Op = WidenedMask[i] > MaxOp1Index ? V2 : V1;
11784 unsigned OpIndex = (i < Size/2) ? 0 : 1;
11785 if (Ops[OpIndex].isUndef())
11787 else if (Ops[OpIndex] != Op)
11791 // Form a 128-bit permutation.
11792 // Convert the 64-bit shuffle mask selection values into 128-bit selection
11793 // bits defined by a vshuf64x2 instruction's immediate control byte.
11794 unsigned PermMask = 0, Imm = 0;
11795 unsigned ControlBitsNum = WidenedMask.size() / 2;
11797 for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
11798 // Use first element in place of undef mask.
11799 Imm = (WidenedMask[i] == SM_SentinelUndef) ? 0 : WidenedMask[i];
11800 PermMask |= (Imm % WidenedMask.size()) << (i * ControlBitsNum);
11803 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
11804 DAG.getConstant(PermMask, DL, MVT::i8));
11807 static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT,
11808 ArrayRef<int> Mask, SDValue V1,
11809 SDValue V2, SelectionDAG &DAG) {
11811 assert(VT.getScalarSizeInBits() >= 16 && "Unexpected data type for PERMV");
11813 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
11814 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
11816 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
11818 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
11820 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
11823 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11824 static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11825 SDValue V1, SDValue V2,
11826 const X86Subtarget &Subtarget,
11827 SelectionDAG &DAG) {
11828 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11829 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11830 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11832 if (V2.isUndef()) {
11833 // Use low duplicate instructions for masks that match their pattern.
11834 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
11835 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
11837 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
11838 // Non-half-crossing single input shuffles can be lowered with an
11839 // interleaved permutation.
11840 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
11841 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
11842 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
11843 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
11844 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
11845 DAG.getConstant(VPERMILPMask, DL, MVT::i8));
11848 SmallVector<int, 4> RepeatedMask;
11849 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
11850 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
11851 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
11854 if (SDValue Shuf128 =
11855 lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, V1, V2, DAG))
11858 if (SDValue Unpck =
11859 lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
11862 // Check if the blend happens to exactly fit that of SHUFPD.
11864 lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
11867 return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
11870 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11871 static SDValue lowerV16F32VectorShuffle(SDLoc DL, ArrayRef<int> Mask,
11872 SDValue V1, SDValue V2,
11873 const X86Subtarget &Subtarget,
11874 SelectionDAG &DAG) {
11875 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11876 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11877 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11879 // If the shuffle mask is repeated in each 128-bit lane, we have many more
11880 // options to efficiently lower the shuffle.
11881 SmallVector<int, 4> RepeatedMask;
11882 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
11883 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
11885 // Use even/odd duplicate instructions for masks that match their pattern.
11886 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
11887 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
11888 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
11889 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
11892 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
11893 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
11895 // Use dedicated unpack instructions for masks that match their pattern.
11896 if (SDValue Unpck =
11897 lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
11900 // Otherwise, fall back to a SHUFPS sequence.
11901 return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
11904 return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
11907 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11908 static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11909 SDValue V1, SDValue V2,
11910 const X86Subtarget &Subtarget,
11911 SelectionDAG &DAG) {
11912 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11913 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11914 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11916 if (SDValue Shuf128 =
11917 lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, V1, V2, DAG))
11920 if (V2.isUndef()) {
11921 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
11922 // can use lower latency instructions that will operate on all four
11924 SmallVector<int, 2> Repeated128Mask;
11925 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
11926 SmallVector<int, 4> PSHUFDMask;
11927 scaleShuffleMask(2, Repeated128Mask, PSHUFDMask);
11928 return DAG.getBitcast(
11930 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
11931 DAG.getBitcast(MVT::v16i32, V1),
11932 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11935 SmallVector<int, 4> Repeated256Mask;
11936 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
11937 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
11938 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
11941 // Try to use shift instructions.
11942 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
11946 if (SDValue Unpck =
11947 lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
11950 return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
11953 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11954 static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11955 SDValue V1, SDValue V2,
11956 const X86Subtarget &Subtarget,
11957 SelectionDAG &DAG) {
11958 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11959 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11960 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11962 // If the shuffle mask is repeated in each 128-bit lane we can use more
11963 // efficient instructions that mirror the shuffles across the four 128-bit
11965 SmallVector<int, 4> RepeatedMask;
11966 if (is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask)) {
11967 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
11969 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
11970 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
11972 // Use dedicated unpack instructions for masks that match their pattern.
11974 lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
11978 // Try to use shift instructions.
11979 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
11983 // Try to use byte rotation instructions.
11984 if (Subtarget.hasBWI())
11985 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11986 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
11989 return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
11992 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11993 static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
11994 SDValue V1, SDValue V2,
11995 const X86Subtarget &Subtarget,
11996 SelectionDAG &DAG) {
11997 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11998 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11999 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
12000 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
12002 // Use dedicated unpack instructions for masks that match their pattern.
12004 lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
12007 // Try to use shift instructions.
12008 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
12012 // Try to use byte rotation instructions.
12013 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
12014 DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
12017 if (V2.isUndef()) {
12018 SmallVector<int, 8> RepeatedMask;
12019 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
12020 // As this is a single-input shuffle, the repeated mask should be
12021 // a strictly valid v8i16 mask that we can pass through to the v8i16
12022 // lowering to handle even the v32 case.
12023 return lowerV8I16GeneralSingleInputVectorShuffle(
12024 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
12028 return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
12031 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
12032 static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12033 SDValue V1, SDValue V2,
12034 const X86Subtarget &Subtarget,
12035 SelectionDAG &DAG) {
12036 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
12037 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
12038 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
12039 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
12041 // Use dedicated unpack instructions for masks that match their pattern.
12043 lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
12046 // Try to use shift instructions.
12047 if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
12051 // Try to use byte rotation instructions.
12052 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
12053 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
12056 if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1,
12057 V2, Subtarget, DAG))
12060 // FIXME: Implement direct support for this type!
12061 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
12064 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
12066 /// This routine either breaks down the specific type of a 512-bit x86 vector
12067 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
12068 /// together based on the available instructions.
12069 static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12070 MVT VT, SDValue V1, SDValue V2,
12071 const X86Subtarget &Subtarget,
12072 SelectionDAG &DAG) {
12073 assert(Subtarget.hasAVX512() &&
12074 "Cannot lower 512-bit vectors w/ basic ISA!");
12076 // Check for being able to broadcast a single element.
12077 if (SDValue Broadcast =
12078 lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
12081 // Dispatch to each element type for lowering. If we don't have support for
12082 // specific element type shuffles at 512 bits, immediately split them and
12083 // lower them. Each lowering routine of a given type is allowed to assume that
12084 // the requisite ISA extensions for that element type are available.
12085 switch (VT.SimpleTy) {
12087 return lowerV8F64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
12089 return lowerV16F32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
12091 return lowerV8I64VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
12093 return lowerV16I32VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
12095 return lowerV32I16VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
12097 return lowerV64I8VectorShuffle(DL, Mask, V1, V2, Subtarget, DAG);
12100 llvm_unreachable("Not a valid 512-bit x86 vector type!");
12104 // Lower vXi1 vector shuffles.
12105 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
12106 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
12107 // vector, shuffle and then truncate it back.
12108 static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
12109 MVT VT, SDValue V1, SDValue V2,
12110 const X86Subtarget &Subtarget,
12111 SelectionDAG &DAG) {
12112 assert(Subtarget.hasAVX512() &&
12113 "Cannot lower 512-bit vectors w/o basic ISA!");
12115 switch (VT.SimpleTy) {
12117 llvm_unreachable("Expected a vector of i1 elements");
12119 ExtVT = MVT::v2i64;
12122 ExtVT = MVT::v4i32;
12125 ExtVT = MVT::v8i64; // Take 512-bit type, more shuffles on KNL
12128 ExtVT = MVT::v16i32;
12131 ExtVT = MVT::v32i16;
12134 ExtVT = MVT::v64i8;
12138 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12139 V1 = getZeroVector(ExtVT, Subtarget, DAG, DL);
12140 else if (ISD::isBuildVectorAllOnes(V1.getNode()))
12141 V1 = getOnesVector(ExtVT, Subtarget, DAG, DL);
12143 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
12146 V2 = DAG.getUNDEF(ExtVT);
12147 else if (ISD::isBuildVectorAllZeros(V2.getNode()))
12148 V2 = getZeroVector(ExtVT, Subtarget, DAG, DL);
12149 else if (ISD::isBuildVectorAllOnes(V2.getNode()))
12150 V2 = getOnesVector(ExtVT, Subtarget, DAG, DL);
12152 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
12153 return DAG.getNode(ISD::TRUNCATE, DL, VT,
12154 DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask));
12156 /// \brief Top-level lowering for x86 vector shuffles.
12158 /// This handles decomposition, canonicalization, and lowering of all x86
12159 /// vector shuffles. Most of the specific lowering strategies are encapsulated
12160 /// above in helper routines. The canonicalization attempts to widen shuffles
12161 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
12162 /// s.t. only one of the two inputs needs to be tested, etc.
12163 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
12164 SelectionDAG &DAG) {
12165 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12166 ArrayRef<int> Mask = SVOp->getMask();
12167 SDValue V1 = Op.getOperand(0);
12168 SDValue V2 = Op.getOperand(1);
12169 MVT VT = Op.getSimpleValueType();
12170 int NumElements = VT.getVectorNumElements();
12172 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
12174 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
12175 "Can't lower MMX shuffles");
12177 bool V1IsUndef = V1.isUndef();
12178 bool V2IsUndef = V2.isUndef();
12179 if (V1IsUndef && V2IsUndef)
12180 return DAG.getUNDEF(VT);
12182 // When we create a shuffle node we put the UNDEF node to second operand,
12183 // but in some cases the first operand may be transformed to UNDEF.
12184 // In this case we should just commute the node.
12186 return DAG.getCommutedVectorShuffle(*SVOp);
12188 // Check for non-undef masks pointing at an undef vector and make the masks
12189 // undef as well. This makes it easier to match the shuffle based solely on
12193 if (M >= NumElements) {
12194 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
12195 for (int &M : NewMask)
12196 if (M >= NumElements)
12198 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
12201 // We actually see shuffles that are entirely re-arrangements of a set of
12202 // zero inputs. This mostly happens while decomposing complex shuffles into
12203 // simple ones. Directly lower these as a buildvector of zeros.
12204 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
12205 if (Zeroable.all())
12206 return getZeroVector(VT, Subtarget, DAG, DL);
12208 // Try to collapse shuffles into using a vector type with fewer elements but
12209 // wider element types. We cap this to not form integers or floating point
12210 // elements wider than 64 bits, but it might be interesting to form i128
12211 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
12212 SmallVector<int, 16> WidenedMask;
12213 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
12214 canWidenShuffleElements(Mask, WidenedMask)) {
12215 MVT NewEltVT = VT.isFloatingPoint()
12216 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
12217 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
12218 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
12219 // Make sure that the new vector type is legal. For example, v2f64 isn't
12221 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
12222 V1 = DAG.getBitcast(NewVT, V1);
12223 V2 = DAG.getBitcast(NewVT, V2);
12224 return DAG.getBitcast(
12225 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
12229 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
12232 ++NumUndefElements;
12233 else if (M < NumElements)
12238 // Commute the shuffle as needed such that more elements come from V1 than
12239 // V2. This allows us to match the shuffle pattern strictly on how many
12240 // elements come from V1 without handling the symmetric cases.
12241 if (NumV2Elements > NumV1Elements)
12242 return DAG.getCommutedVectorShuffle(*SVOp);
12244 assert(NumV1Elements > 0 && "No V1 indices");
12245 assert((NumV2Elements > 0 || V2IsUndef) && "V2 not undef, but not used");
12247 // When the number of V1 and V2 elements are the same, try to minimize the
12248 // number of uses of V2 in the low half of the vector. When that is tied,
12249 // ensure that the sum of indices for V1 is equal to or lower than the sum
12250 // indices for V2. When those are equal, try to ensure that the number of odd
12251 // indices for V1 is lower than the number of odd indices for V2.
12252 if (NumV1Elements == NumV2Elements) {
12253 int LowV1Elements = 0, LowV2Elements = 0;
12254 for (int M : Mask.slice(0, NumElements / 2))
12255 if (M >= NumElements)
12259 if (LowV2Elements > LowV1Elements)
12260 return DAG.getCommutedVectorShuffle(*SVOp);
12261 if (LowV2Elements == LowV1Elements) {
12262 int SumV1Indices = 0, SumV2Indices = 0;
12263 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12264 if (Mask[i] >= NumElements)
12266 else if (Mask[i] >= 0)
12268 if (SumV2Indices < SumV1Indices)
12269 return DAG.getCommutedVectorShuffle(*SVOp);
12270 if (SumV2Indices == SumV1Indices) {
12271 int NumV1OddIndices = 0, NumV2OddIndices = 0;
12272 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12273 if (Mask[i] >= NumElements)
12274 NumV2OddIndices += i % 2;
12275 else if (Mask[i] >= 0)
12276 NumV1OddIndices += i % 2;
12277 if (NumV2OddIndices < NumV1OddIndices)
12278 return DAG.getCommutedVectorShuffle(*SVOp);
12283 // For each vector width, delegate to a specialized lowering routine.
12284 if (VT.is128BitVector())
12285 return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
12287 if (VT.is256BitVector())
12288 return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
12290 if (VT.is512BitVector())
12291 return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
12294 return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Subtarget, DAG);
12296 llvm_unreachable("Unimplemented!");
12299 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
12300 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
12301 const X86Subtarget &Subtarget,
12302 SelectionDAG &DAG) {
12303 SDValue Cond = Op.getOperand(0);
12304 SDValue LHS = Op.getOperand(1);
12305 SDValue RHS = Op.getOperand(2);
12307 MVT VT = Op.getSimpleValueType();
12309 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12311 auto *CondBV = cast<BuildVectorSDNode>(Cond);
12313 // Only non-legal VSELECTs reach this lowering, convert those into generic
12314 // shuffles and re-use the shuffle lowering path for blends.
12315 SmallVector<int, 32> Mask;
12316 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
12317 SDValue CondElt = CondBV->getOperand(i);
12319 isa<ConstantSDNode>(CondElt) ? i + (isNullConstant(CondElt) ? Size : 0)
12322 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
12325 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12326 // A vselect where all conditions and data are constants can be optimized into
12327 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12328 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12329 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12330 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12333 // Try to lower this to a blend-style vector shuffle. This can handle all
12334 // constant condition cases.
12335 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
12338 // Variable blends are only legal from SSE4.1 onward.
12339 if (!Subtarget.hasSSE41())
12342 // Only some types will be legal on some subtargets. If we can emit a legal
12343 // VSELECT-matching blend, return Op, and but if we need to expand, return
12345 switch (Op.getSimpleValueType().SimpleTy) {
12347 // Most of the vector types have blends past SSE4.1.
12351 // The byte blends for AVX vectors were introduced only in AVX2.
12352 if (Subtarget.hasAVX2())
12359 // AVX-512 BWI and VLX features support VSELECT with i16 elements.
12360 if (Subtarget.hasBWI() && Subtarget.hasVLX())
12363 // FIXME: We should custom lower this by fixing the condition and using i8
12369 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12370 MVT VT = Op.getSimpleValueType();
12373 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12376 if (VT.getSizeInBits() == 8) {
12377 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12378 Op.getOperand(0), Op.getOperand(1));
12379 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12380 DAG.getValueType(VT));
12381 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12384 if (VT.getSizeInBits() == 16) {
12385 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
12386 if (isNullConstant(Op.getOperand(1)))
12387 return DAG.getNode(
12388 ISD::TRUNCATE, dl, MVT::i16,
12389 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12390 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
12391 Op.getOperand(1)));
12392 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
12393 Op.getOperand(0), Op.getOperand(1));
12394 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12395 DAG.getValueType(VT));
12396 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12399 if (VT == MVT::f32) {
12400 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
12401 // the result back to FR32 register. It's only worth matching if the
12402 // result has a single use which is a store or a bitcast to i32. And in
12403 // the case of a store, it's not worth it if the index is a constant 0,
12404 // because a MOVSSmr can be used instead, which is smaller and faster.
12405 if (!Op.hasOneUse())
12407 SDNode *User = *Op.getNode()->use_begin();
12408 if ((User->getOpcode() != ISD::STORE ||
12409 isNullConstant(Op.getOperand(1))) &&
12410 (User->getOpcode() != ISD::BITCAST ||
12411 User->getValueType(0) != MVT::i32))
12413 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12414 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
12416 return DAG.getBitcast(MVT::f32, Extract);
12419 if (VT == MVT::i32 || VT == MVT::i64) {
12420 // ExtractPS/pextrq works with constant index.
12421 if (isa<ConstantSDNode>(Op.getOperand(1)))
12427 /// Extract one bit from mask vector, like v16i1 or v8i1.
12428 /// AVX-512 feature.
12430 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
12431 SDValue Vec = Op.getOperand(0);
12433 MVT VecVT = Vec.getSimpleValueType();
12434 SDValue Idx = Op.getOperand(1);
12435 MVT EltVT = Op.getSimpleValueType();
12437 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
12438 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
12439 "Unexpected vector type in ExtractBitFromMaskVector");
12441 // variable index can't be handled in mask registers,
12442 // extend vector to VR512
12443 if (!isa<ConstantSDNode>(Idx)) {
12444 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12445 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
12446 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
12447 ExtVT.getVectorElementType(), Ext, Idx);
12448 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
12451 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12452 if (!Subtarget.hasDQI() && (VecVT.getVectorNumElements() <= 8)) {
12453 // Use kshiftlw/rw instruction.
12454 VecVT = MVT::v16i1;
12455 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT,
12456 DAG.getUNDEF(VecVT),
12458 DAG.getIntPtrConstant(0, dl));
12460 unsigned MaxSift = VecVT.getVectorNumElements() - 1;
12461 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
12462 DAG.getConstant(MaxSift - IdxVal, dl, MVT::i8));
12463 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
12464 DAG.getConstant(MaxSift, dl, MVT::i8));
12465 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
12466 DAG.getIntPtrConstant(0, dl));
12470 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
12471 SelectionDAG &DAG) const {
12473 SDValue Vec = Op.getOperand(0);
12474 MVT VecVT = Vec.getSimpleValueType();
12475 SDValue Idx = Op.getOperand(1);
12477 if (Op.getSimpleValueType() == MVT::i1)
12478 return ExtractBitFromMaskVector(Op, DAG);
12480 if (!isa<ConstantSDNode>(Idx)) {
12481 if (VecVT.is512BitVector() ||
12482 (VecVT.is256BitVector() && Subtarget.hasInt256() &&
12483 VecVT.getVectorElementType().getSizeInBits() == 32)) {
12486 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
12487 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
12488 MaskEltVT.getSizeInBits());
12490 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
12491 auto PtrVT = getPointerTy(DAG.getDataLayout());
12492 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
12493 getZeroVector(MaskVT, Subtarget, DAG, dl), Idx,
12494 DAG.getConstant(0, dl, PtrVT));
12495 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
12496 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Perm,
12497 DAG.getConstant(0, dl, PtrVT));
12502 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12504 // If this is a 256-bit vector result, first extract the 128-bit vector and
12505 // then extract the element from the 128-bit vector.
12506 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
12507 // Get the 128-bit vector.
12508 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
12509 MVT EltVT = VecVT.getVectorElementType();
12511 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
12512 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
12514 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
12515 // this can be done with a mask.
12516 IdxVal &= ElemsPerChunk - 1;
12517 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
12518 DAG.getConstant(IdxVal, dl, MVT::i32));
12521 assert(VecVT.is128BitVector() && "Unexpected vector length");
12523 if (Subtarget.hasSSE41())
12524 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
12527 MVT VT = Op.getSimpleValueType();
12528 // TODO: handle v16i8.
12529 if (VT.getSizeInBits() == 16) {
12531 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12532 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12533 DAG.getBitcast(MVT::v4i32, Vec), Idx));
12535 // Transform it so it match pextrw which produces a 32-bit result.
12536 MVT EltVT = MVT::i32;
12537 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, Vec, Idx);
12538 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
12539 DAG.getValueType(VT));
12540 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12543 if (VT.getSizeInBits() == 32) {
12547 // SHUFPS the element to the lowest double word, then movss.
12548 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
12549 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
12550 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12551 DAG.getIntPtrConstant(0, dl));
12554 if (VT.getSizeInBits() == 64) {
12555 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
12556 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
12557 // to match extract_elt for f64.
12561 // UNPCKHPD the element to the lowest double word, then movsd.
12562 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
12563 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
12564 int Mask[2] = { 1, -1 };
12565 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
12566 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12567 DAG.getIntPtrConstant(0, dl));
12573 /// Insert one bit to mask vector, like v16i1 or v8i1.
12574 /// AVX-512 feature.
12576 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
12578 SDValue Vec = Op.getOperand(0);
12579 SDValue Elt = Op.getOperand(1);
12580 SDValue Idx = Op.getOperand(2);
12581 MVT VecVT = Vec.getSimpleValueType();
12583 if (!isa<ConstantSDNode>(Idx)) {
12584 // Non constant index. Extend source and destination,
12585 // insert element and then truncate the result.
12586 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12587 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
12588 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
12589 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
12590 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
12591 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
12594 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12595 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
12597 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
12598 DAG.getConstant(IdxVal, dl, MVT::i8));
12601 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
12604 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
12605 SelectionDAG &DAG) const {
12606 MVT VT = Op.getSimpleValueType();
12607 MVT EltVT = VT.getVectorElementType();
12608 unsigned NumElts = VT.getVectorNumElements();
12610 if (EltVT == MVT::i1)
12611 return InsertBitToMaskVector(Op, DAG);
12614 SDValue N0 = Op.getOperand(0);
12615 SDValue N1 = Op.getOperand(1);
12616 SDValue N2 = Op.getOperand(2);
12617 if (!isa<ConstantSDNode>(N2))
12619 auto *N2C = cast<ConstantSDNode>(N2);
12620 unsigned IdxVal = N2C->getZExtValue();
12622 // If we are clearing out a element, we do this more efficiently with a
12623 // blend shuffle than a costly integer insertion.
12624 // TODO: would other rematerializable values (e.g. allbits) benefit as well?
12625 // TODO: pre-SSE41 targets will tend to use bit masking - this could still
12626 // be beneficial if we are inserting several zeros and can combine the masks.
12627 if (X86::isZeroNode(N1) && Subtarget.hasSSE41() && NumElts <= 8) {
12628 SmallVector<int, 8> ClearMask;
12629 for (unsigned i = 0; i != NumElts; ++i)
12630 ClearMask.push_back(i == IdxVal ? i + NumElts : i);
12631 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, dl);
12632 return DAG.getVectorShuffle(VT, dl, N0, ZeroVector, ClearMask);
12635 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
12636 // into that, and then insert the subvector back into the result.
12637 if (VT.is256BitVector() || VT.is512BitVector()) {
12638 // With a 256-bit vector, we can insert into the zero element efficiently
12639 // using a blend if we have AVX or AVX2 and the right data type.
12640 if (VT.is256BitVector() && IdxVal == 0) {
12641 // TODO: It is worthwhile to cast integer to floating point and back
12642 // and incur a domain crossing penalty if that's what we'll end up
12643 // doing anyway after extracting to a 128-bit vector.
12644 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
12645 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
12646 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
12647 N2 = DAG.getIntPtrConstant(1, dl);
12648 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
12652 // Get the desired 128-bit vector chunk.
12653 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
12655 // Insert the element into the desired chunk.
12656 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
12657 assert(isPowerOf2_32(NumEltsIn128));
12658 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
12659 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
12661 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
12662 DAG.getConstant(IdxIn128, dl, MVT::i32));
12664 // Insert the changed part back into the bigger vector
12665 return insert128BitVector(N0, V, IdxVal, DAG, dl);
12667 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
12669 if (Subtarget.hasSSE41()) {
12670 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
12672 if (VT == MVT::v8i16) {
12673 Opc = X86ISD::PINSRW;
12675 assert(VT == MVT::v16i8);
12676 Opc = X86ISD::PINSRB;
12679 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
12681 if (N1.getValueType() != MVT::i32)
12682 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
12683 if (N2.getValueType() != MVT::i32)
12684 N2 = DAG.getIntPtrConstant(IdxVal, dl);
12685 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
12688 if (EltVT == MVT::f32) {
12689 // Bits [7:6] of the constant are the source select. This will always be
12690 // zero here. The DAG Combiner may combine an extract_elt index into
12691 // these bits. For example (insert (extract, 3), 2) could be matched by
12692 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
12693 // Bits [5:4] of the constant are the destination select. This is the
12694 // value of the incoming immediate.
12695 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
12696 // combine either bitwise AND or insert of float 0.0 to set these bits.
12698 bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
12699 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
12700 // If this is an insertion of 32-bits into the low 32-bits of
12701 // a vector, we prefer to generate a blend with immediate rather
12702 // than an insertps. Blends are simpler operations in hardware and so
12703 // will always have equal or better performance than insertps.
12704 // But if optimizing for size and there's a load folding opportunity,
12705 // generate insertps because blendps does not have a 32-bit memory
12707 N2 = DAG.getIntPtrConstant(1, dl);
12708 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
12709 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
12711 N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
12712 // Create this as a scalar to vector..
12713 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
12714 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
12717 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
12718 // PINSR* works with constant index.
12723 if (EltVT == MVT::i8)
12726 if (EltVT.getSizeInBits() == 16) {
12727 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
12728 // as its second argument.
12729 if (N1.getValueType() != MVT::i32)
12730 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
12731 if (N2.getValueType() != MVT::i32)
12732 N2 = DAG.getIntPtrConstant(IdxVal, dl);
12733 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
12738 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
12740 MVT OpVT = Op.getSimpleValueType();
12742 // If this is a 256-bit vector result, first insert into a 128-bit
12743 // vector and then insert into the 256-bit vector.
12744 if (!OpVT.is128BitVector()) {
12745 // Insert into a 128-bit vector.
12746 unsigned SizeFactor = OpVT.getSizeInBits()/128;
12747 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
12748 OpVT.getVectorNumElements() / SizeFactor);
12750 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
12752 // Insert the 128-bit vector.
12753 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
12756 if (OpVT == MVT::v1i64 &&
12757 Op.getOperand(0).getValueType() == MVT::i64)
12758 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
12760 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
12761 assert(OpVT.is128BitVector() && "Expected an SSE type!");
12762 return DAG.getBitcast(
12763 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
12766 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
12767 // a simple subregister reference or explicit instructions to grab
12768 // upper bits of a vector.
12769 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
12770 SelectionDAG &DAG) {
12772 SDValue In = Op.getOperand(0);
12773 SDValue Idx = Op.getOperand(1);
12774 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12775 MVT ResVT = Op.getSimpleValueType();
12776 MVT InVT = In.getSimpleValueType();
12778 if (Subtarget.hasFp256()) {
12779 if (ResVT.is128BitVector() &&
12780 (InVT.is256BitVector() || InVT.is512BitVector()) &&
12781 isa<ConstantSDNode>(Idx)) {
12782 return extract128BitVector(In, IdxVal, DAG, dl);
12784 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
12785 isa<ConstantSDNode>(Idx)) {
12786 return extract256BitVector(In, IdxVal, DAG, dl);
12792 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
12793 // simple superregister reference or explicit instructions to insert
12794 // the upper bits of a vector.
12795 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
12796 SelectionDAG &DAG) {
12797 if (!Subtarget.hasAVX())
12801 SDValue Vec = Op.getOperand(0);
12802 SDValue SubVec = Op.getOperand(1);
12803 SDValue Idx = Op.getOperand(2);
12805 if (!isa<ConstantSDNode>(Idx))
12808 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12809 MVT OpVT = Op.getSimpleValueType();
12810 MVT SubVecVT = SubVec.getSimpleValueType();
12812 // Fold two 16-byte subvector loads into one 32-byte load:
12813 // (insert_subvector (insert_subvector undef, (load addr), 0),
12814 // (load addr + 16), Elts/2)
12816 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
12817 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
12818 OpVT.is256BitVector() && SubVecVT.is128BitVector()) {
12819 auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2));
12820 if (Idx2 && Idx2->getZExtValue() == 0) {
12821 // If needed, look through bitcasts to get to the load.
12822 SDValue SubVec2 = peekThroughBitcasts(Vec.getOperand(1));
12823 if (auto *FirstLd = dyn_cast<LoadSDNode>(SubVec2)) {
12825 unsigned Alignment = FirstLd->getAlignment();
12826 unsigned AS = FirstLd->getAddressSpace();
12827 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
12828 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
12829 OpVT, AS, Alignment, &Fast) && Fast) {
12830 SDValue Ops[] = { SubVec2, SubVec };
12831 if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false))
12838 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
12839 SubVecVT.is128BitVector())
12840 return insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
12842 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
12843 return insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
12845 if (OpVT.getVectorElementType() == MVT::i1)
12846 return insert1BitVector(Op, DAG, Subtarget);
12851 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
12852 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
12853 // one of the above mentioned nodes. It has to be wrapped because otherwise
12854 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
12855 // be used to form addressing mode. These wrapped nodes will be selected
12858 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
12859 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
12861 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12862 // global base reg.
12863 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
12864 unsigned WrapperKind = X86ISD::Wrapper;
12865 CodeModel::Model M = DAG.getTarget().getCodeModel();
12867 if (Subtarget.isPICStyleRIPRel() &&
12868 (M == CodeModel::Small || M == CodeModel::Kernel))
12869 WrapperKind = X86ISD::WrapperRIP;
12871 auto PtrVT = getPointerTy(DAG.getDataLayout());
12872 SDValue Result = DAG.getTargetConstantPool(
12873 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
12875 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12876 // With PIC, the address is actually $g + Offset.
12879 DAG.getNode(ISD::ADD, DL, PtrVT,
12880 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
12886 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
12887 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
12889 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12890 // global base reg.
12891 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
12892 unsigned WrapperKind = X86ISD::Wrapper;
12893 CodeModel::Model M = DAG.getTarget().getCodeModel();
12895 if (Subtarget.isPICStyleRIPRel() &&
12896 (M == CodeModel::Small || M == CodeModel::Kernel))
12897 WrapperKind = X86ISD::WrapperRIP;
12899 auto PtrVT = getPointerTy(DAG.getDataLayout());
12900 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
12902 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12904 // With PIC, the address is actually $g + Offset.
12907 DAG.getNode(ISD::ADD, DL, PtrVT,
12908 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
12914 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
12915 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
12917 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
12918 // global base reg.
12919 const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
12920 unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
12921 unsigned WrapperKind = X86ISD::Wrapper;
12922 CodeModel::Model M = DAG.getTarget().getCodeModel();
12924 if (Subtarget.isPICStyleRIPRel() &&
12925 (M == CodeModel::Small || M == CodeModel::Kernel))
12926 WrapperKind = X86ISD::WrapperRIP;
12928 auto PtrVT = getPointerTy(DAG.getDataLayout());
12929 SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
12932 Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
12934 // With PIC, the address is actually $g + Offset.
12935 if (isPositionIndependent() && !Subtarget.is64Bit()) {
12937 DAG.getNode(ISD::ADD, DL, PtrVT,
12938 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
12941 // For symbols that require a load from a stub to get the address, emit the
12943 if (isGlobalStubReference(OpFlag))
12944 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
12945 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
12951 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
12952 // Create the TargetBlockAddressAddress node.
12953 unsigned char OpFlags =
12954 Subtarget.classifyBlockAddressReference();
12955 CodeModel::Model M = DAG.getTarget().getCodeModel();
12956 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
12957 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
12959 auto PtrVT = getPointerTy(DAG.getDataLayout());
12960 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
12962 if (Subtarget.isPICStyleRIPRel() &&
12963 (M == CodeModel::Small || M == CodeModel::Kernel))
12964 Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
12966 Result = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, Result);
12968 // With PIC, the address is actually $g + Offset.
12969 if (isGlobalRelativeToPICBase(OpFlags)) {
12970 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
12971 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
12977 SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
12978 const SDLoc &dl, int64_t Offset,
12979 SelectionDAG &DAG) const {
12980 // Create the TargetGlobalAddress node, folding in the constant
12981 // offset if it is legal.
12982 unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
12983 CodeModel::Model M = DAG.getTarget().getCodeModel();
12984 auto PtrVT = getPointerTy(DAG.getDataLayout());
12986 if (OpFlags == X86II::MO_NO_FLAG &&
12987 X86::isOffsetSuitableForCodeModel(Offset, M)) {
12988 // A direct static reference to a global.
12989 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
12992 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
12995 if (Subtarget.isPICStyleRIPRel() &&
12996 (M == CodeModel::Small || M == CodeModel::Kernel))
12997 Result = DAG.getNode(X86ISD::WrapperRIP, dl, PtrVT, Result);
12999 Result = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, Result);
13001 // With PIC, the address is actually $g + Offset.
13002 if (isGlobalRelativeToPICBase(OpFlags)) {
13003 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
13004 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
13007 // For globals that require a load from a stub to get the address, emit the
13009 if (isGlobalStubReference(OpFlags))
13010 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
13011 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
13013 // If there was a non-zero offset that we didn't fold, create an explicit
13014 // addition for it.
13016 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
13017 DAG.getConstant(Offset, dl, PtrVT));
13023 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13024 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13025 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13026 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13030 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13031 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13032 unsigned char OperandFlags, bool LocalDynamic = false) {
13033 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13034 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13036 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13037 GA->getValueType(0),
13041 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13045 SDValue Ops[] = { Chain, TGA, *InFlag };
13046 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13048 SDValue Ops[] = { Chain, TGA };
13049 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13052 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13053 MFI->setAdjustsStack(true);
13054 MFI->setHasCalls(true);
13056 SDValue Flag = Chain.getValue(1);
13057 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13060 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13062 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13065 SDLoc dl(GA); // ? function entry point might be better
13066 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13067 DAG.getNode(X86ISD::GlobalBaseReg,
13068 SDLoc(), PtrVT), InFlag);
13069 InFlag = Chain.getValue(1);
13071 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13074 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13076 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13078 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13079 X86::RAX, X86II::MO_TLSGD);
13082 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13088 // Get the start address of the TLS block for this module.
13089 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13090 .getInfo<X86MachineFunctionInfo>();
13091 MFI->incNumLocalDynamicTLSAccesses();
13095 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13096 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13099 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13100 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13101 InFlag = Chain.getValue(1);
13102 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13103 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13106 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13110 unsigned char OperandFlags = X86II::MO_DTPOFF;
13111 unsigned WrapperKind = X86ISD::Wrapper;
13112 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13113 GA->getValueType(0),
13114 GA->getOffset(), OperandFlags);
13115 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13117 // Add x@dtpoff with the base.
13118 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13121 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13122 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13123 const EVT PtrVT, TLSModel::Model model,
13124 bool is64Bit, bool isPIC) {
13127 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13128 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13129 is64Bit ? 257 : 256));
13131 SDValue ThreadPointer =
13132 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
13133 MachinePointerInfo(Ptr));
13135 unsigned char OperandFlags = 0;
13136 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13138 unsigned WrapperKind = X86ISD::Wrapper;
13139 if (model == TLSModel::LocalExec) {
13140 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13141 } else if (model == TLSModel::InitialExec) {
13143 OperandFlags = X86II::MO_GOTTPOFF;
13144 WrapperKind = X86ISD::WrapperRIP;
13146 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13149 llvm_unreachable("Unexpected model");
13152 // emit "addl x@ntpoff,%eax" (local exec)
13153 // or "addl x@indntpoff,%eax" (initial exec)
13154 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13156 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13157 GA->getOffset(), OperandFlags);
13158 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13160 if (model == TLSModel::InitialExec) {
13161 if (isPIC && !is64Bit) {
13162 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13163 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13167 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13168 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
13171 // The address of the thread local variable is the add of the thread
13172 // pointer with the offset of the variable.
13173 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13177 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13179 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13181 if (DAG.getTarget().Options.EmulatedTLS)
13182 return LowerToTLSEmulatedModel(GA, DAG);
13184 const GlobalValue *GV = GA->getGlobal();
13185 auto PtrVT = getPointerTy(DAG.getDataLayout());
13186 bool PositionIndependent = isPositionIndependent();
13188 if (Subtarget.isTargetELF()) {
13189 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13191 case TLSModel::GeneralDynamic:
13192 if (Subtarget.is64Bit())
13193 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
13194 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
13195 case TLSModel::LocalDynamic:
13196 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
13197 Subtarget.is64Bit());
13198 case TLSModel::InitialExec:
13199 case TLSModel::LocalExec:
13200 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
13201 PositionIndependent);
13203 llvm_unreachable("Unknown TLS model.");
13206 if (Subtarget.isTargetDarwin()) {
13207 // Darwin only has one model of TLS. Lower to that.
13208 unsigned char OpFlag = 0;
13209 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
13210 X86ISD::WrapperRIP : X86ISD::Wrapper;
13212 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13213 // global base reg.
13214 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
13216 OpFlag = X86II::MO_TLVP_PIC_BASE;
13218 OpFlag = X86II::MO_TLVP;
13220 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13221 GA->getValueType(0),
13222 GA->getOffset(), OpFlag);
13223 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
13225 // With PIC32, the address is actually $g + Offset.
13227 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
13228 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13231 // Lowering the machine isd will make sure everything is in the right
13233 SDValue Chain = DAG.getEntryNode();
13234 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13235 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, DL, true), DL);
13236 SDValue Args[] = { Chain, Offset };
13237 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13238 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
13239 DAG.getIntPtrConstant(0, DL, true),
13240 Chain.getValue(1), DL);
13242 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13243 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13244 MFI->setAdjustsStack(true);
13246 // And our return value (tls address) is in the standard call return value
13248 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
13249 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
13252 if (Subtarget.isTargetKnownWindowsMSVC() ||
13253 Subtarget.isTargetWindowsItanium() ||
13254 Subtarget.isTargetWindowsGNU()) {
13255 // Just use the implicit TLS architecture
13256 // Need to generate someting similar to:
13257 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13259 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13260 // mov rcx, qword [rdx+rcx*8]
13261 // mov eax, .tls$:tlsvar
13262 // [rax+rcx] contains the address
13263 // Windows 64bit: gs:0x58
13264 // Windows 32bit: fs:__tls_array
13267 SDValue Chain = DAG.getEntryNode();
13269 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13270 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13271 // use its literal value of 0x2C.
13272 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
13273 ? Type::getInt8PtrTy(*DAG.getContext(),
13275 : Type::getInt32PtrTy(*DAG.getContext(),
13278 SDValue TlsArray = Subtarget.is64Bit()
13279 ? DAG.getIntPtrConstant(0x58, dl)
13280 : (Subtarget.isTargetWindowsGNU()
13281 ? DAG.getIntPtrConstant(0x2C, dl)
13282 : DAG.getExternalSymbol("_tls_array", PtrVT));
13284 SDValue ThreadPointer =
13285 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
13288 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
13289 res = ThreadPointer;
13291 // Load the _tls_index variable
13292 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
13293 if (Subtarget.is64Bit())
13294 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
13295 MachinePointerInfo(), MVT::i32);
13297 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
13299 auto &DL = DAG.getDataLayout();
13301 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, PtrVT);
13302 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
13304 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
13307 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
13309 // Get the offset of start of .tls section
13310 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13311 GA->getValueType(0),
13312 GA->getOffset(), X86II::MO_SECREL);
13313 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
13315 // The address of the thread local variable is the add of the thread
13316 // pointer with the offset of the variable.
13317 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
13320 llvm_unreachable("TLS not implemented for this target.");
13323 /// Lower SRA_PARTS and friends, which return two i32 values
13324 /// and take a 2 x i32 value to shift plus a shift amount.
13325 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13326 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13327 MVT VT = Op.getSimpleValueType();
13328 unsigned VTBits = VT.getSizeInBits();
13330 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13331 SDValue ShOpLo = Op.getOperand(0);
13332 SDValue ShOpHi = Op.getOperand(1);
13333 SDValue ShAmt = Op.getOperand(2);
13334 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13335 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13337 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13338 DAG.getConstant(VTBits - 1, dl, MVT::i8));
13339 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13340 DAG.getConstant(VTBits - 1, dl, MVT::i8))
13341 : DAG.getConstant(0, dl, VT);
13343 SDValue Tmp2, Tmp3;
13344 if (Op.getOpcode() == ISD::SHL_PARTS) {
13345 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13346 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13348 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13349 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13352 // If the shift amount is larger or equal than the width of a part we can't
13353 // rely on the results of shld/shrd. Insert a test and select the appropriate
13354 // values for large shift amounts.
13355 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13356 DAG.getConstant(VTBits, dl, MVT::i8));
13357 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13358 AndNode, DAG.getConstant(0, dl, MVT::i8));
13361 SDValue CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
13362 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13363 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13365 if (Op.getOpcode() == ISD::SHL_PARTS) {
13366 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13367 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13369 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13370 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13373 SDValue Ops[2] = { Lo, Hi };
13374 return DAG.getMergeValues(Ops, dl);
13377 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13378 SelectionDAG &DAG) const {
13379 SDValue Src = Op.getOperand(0);
13380 MVT SrcVT = Src.getSimpleValueType();
13381 MVT VT = Op.getSimpleValueType();
13384 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13385 if (SrcVT.isVector()) {
13386 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
13387 return DAG.getNode(X86ISD::CVTDQ2PD, dl, VT,
13388 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
13389 DAG.getUNDEF(SrcVT)));
13391 if (SrcVT.getVectorElementType() == MVT::i1) {
13392 if (SrcVT == MVT::v2i1 && TLI.isTypeLegal(SrcVT))
13393 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13394 DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v2i64, Src));
13395 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13396 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13397 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT, Src));
13402 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13403 "Unknown SINT_TO_FP to lower!");
13405 // These are really Legal; return the operand so the caller accepts it as
13407 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13409 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13410 Subtarget.is64Bit()) {
13414 SDValue ValueToStore = Op.getOperand(0);
13415 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13416 !Subtarget.is64Bit())
13417 // Bitcasting to f64 here allows us to do a single 64-bit store from
13418 // an SSE register, avoiding the store forwarding penalty that would come
13419 // with two 32-bit stores.
13420 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
13422 unsigned Size = SrcVT.getSizeInBits()/8;
13423 MachineFunction &MF = DAG.getMachineFunction();
13424 auto PtrVT = getPointerTy(MF.getDataLayout());
13425 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13426 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
13427 SDValue Chain = DAG.getStore(
13428 DAG.getEntryNode(), dl, ValueToStore, StackSlot,
13429 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
13430 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
13433 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
13435 SelectionDAG &DAG) const {
13439 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
13441 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
13443 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
13445 unsigned ByteSize = SrcVT.getSizeInBits()/8;
13447 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
13448 MachineMemOperand *MMO;
13450 int SSFI = FI->getIndex();
13451 MMO = DAG.getMachineFunction().getMachineMemOperand(
13452 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
13453 MachineMemOperand::MOLoad, ByteSize, ByteSize);
13455 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
13456 StackSlot = StackSlot.getOperand(1);
13458 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
13459 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
13461 Tys, Ops, SrcVT, MMO);
13464 Chain = Result.getValue(1);
13465 SDValue InFlag = Result.getValue(2);
13467 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
13468 // shouldn't be necessary except that RFP cannot be live across
13469 // multiple blocks. When stackifier is fixed, they can be uncoupled.
13470 MachineFunction &MF = DAG.getMachineFunction();
13471 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
13472 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
13473 auto PtrVT = getPointerTy(MF.getDataLayout());
13474 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
13475 Tys = DAG.getVTList(MVT::Other);
13477 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
13479 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
13480 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
13481 MachineMemOperand::MOStore, SSFISize, SSFISize);
13483 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
13484 Ops, Op.getValueType(), MMO);
13485 Result = DAG.getLoad(
13486 Op.getValueType(), DL, Chain, StackSlot,
13487 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
13493 /// 64-bit unsigned integer to double expansion.
13494 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
13495 SelectionDAG &DAG) const {
13496 // This algorithm is not obvious. Here it is what we're trying to output:
13499 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
13500 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
13502 haddpd %xmm0, %xmm0
13504 pshufd $0x4e, %xmm0, %xmm1
13510 LLVMContext *Context = DAG.getContext();
13512 // Build some magic constants.
13513 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
13514 Constant *C0 = ConstantDataVector::get(*Context, CV0);
13515 auto PtrVT = getPointerTy(DAG.getDataLayout());
13516 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
13518 SmallVector<Constant*,2> CV1;
13520 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13521 APInt(64, 0x4330000000000000ULL))));
13523 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13524 APInt(64, 0x4530000000000000ULL))));
13525 Constant *C1 = ConstantVector::get(CV1);
13526 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
13528 // Load the 64-bit value into an XMM register.
13529 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
13532 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
13533 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
13534 /* Alignment = */ 16);
13536 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
13539 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
13540 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
13541 /* Alignment = */ 16);
13542 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
13543 // TODO: Are there any fast-math-flags to propagate here?
13544 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
13547 if (Subtarget.hasSSE3()) {
13548 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
13549 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
13551 SDValue S2F = DAG.getBitcast(MVT::v4i32, Sub);
13552 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
13554 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
13555 DAG.getBitcast(MVT::v2f64, Shuffle), Sub);
13558 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
13559 DAG.getIntPtrConstant(0, dl));
13562 /// 32-bit unsigned integer to float expansion.
13563 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
13564 SelectionDAG &DAG) const {
13566 // FP constant to bias correct the final result.
13567 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
13570 // Load the 32-bit value into an XMM register.
13571 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
13574 // Zero out the upper parts of the register.
13575 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
13577 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13578 DAG.getBitcast(MVT::v2f64, Load),
13579 DAG.getIntPtrConstant(0, dl));
13581 // Or the load with the bias.
13582 SDValue Or = DAG.getNode(
13583 ISD::OR, dl, MVT::v2i64,
13584 DAG.getBitcast(MVT::v2i64,
13585 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
13586 DAG.getBitcast(MVT::v2i64,
13587 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
13589 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13590 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
13592 // Subtract the bias.
13593 // TODO: Are there any fast-math-flags to propagate here?
13594 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
13596 // Handle final rounding.
13597 MVT DestVT = Op.getSimpleValueType();
13599 if (DestVT.bitsLT(MVT::f64))
13600 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
13601 DAG.getIntPtrConstant(0, dl));
13602 if (DestVT.bitsGT(MVT::f64))
13603 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
13605 // Handle final rounding.
13609 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
13610 const X86Subtarget &Subtarget) {
13611 // The algorithm is the following:
13612 // #ifdef __SSE4_1__
13613 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13614 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13615 // (uint4) 0x53000000, 0xaa);
13617 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13618 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
13620 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
13621 // return (float4) lo + fhi;
13623 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
13624 // reassociate the two FADDs, and if we do that, the algorithm fails
13625 // spectacularly (PR24512).
13626 // FIXME: If we ever have some kind of Machine FMF, this should be marked
13627 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
13628 // there's also the MachineCombiner reassociations happening on Machine IR.
13629 if (DAG.getTarget().Options.UnsafeFPMath)
13633 SDValue V = Op->getOperand(0);
13634 MVT VecIntVT = V.getSimpleValueType();
13635 bool Is128 = VecIntVT == MVT::v4i32;
13636 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
13637 // If we convert to something else than the supported type, e.g., to v4f64,
13639 if (VecFloatVT != Op->getSimpleValueType(0))
13642 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
13643 "Unsupported custom type");
13645 // In the #idef/#else code, we have in common:
13646 // - The vector of constants:
13652 // Create the splat vector for 0x4b000000.
13653 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
13654 // Create the splat vector for 0x53000000.
13655 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
13657 // Create the right shift.
13658 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
13659 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
13662 if (Subtarget.hasSSE41()) {
13663 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
13664 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13665 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
13666 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
13667 // Low will be bitcasted right away, so do not bother bitcasting back to its
13669 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
13670 VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
13671 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13672 // (uint4) 0x53000000, 0xaa);
13673 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
13674 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
13675 // High will be bitcasted right away, so do not bother bitcasting back to
13676 // its original type.
13677 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
13678 VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
13680 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
13681 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13682 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
13683 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
13685 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
13686 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
13689 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
13690 SDValue VecCstFAdd = DAG.getConstantFP(
13691 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), DL, VecFloatVT);
13693 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
13694 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
13695 // TODO: Are there any fast-math-flags to propagate here?
13697 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
13698 // return (float4) lo + fhi;
13699 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
13700 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
13703 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
13704 SelectionDAG &DAG) const {
13705 SDValue N0 = Op.getOperand(0);
13706 MVT SVT = N0.getSimpleValueType();
13709 if (SVT.getVectorElementType() == MVT::i1) {
13710 if (SVT == MVT::v2i1)
13711 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
13712 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, N0));
13713 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
13714 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
13715 DAG.getNode(ISD::ZERO_EXTEND, dl, IntegerVT, N0));
13718 switch (SVT.SimpleTy) {
13720 llvm_unreachable("Custom UINT_TO_FP is not supported!");
13725 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
13726 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13727 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
13731 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
13734 assert(Subtarget.hasAVX512());
13735 return DAG.getNode(ISD::UINT_TO_FP, dl, Op.getValueType(),
13736 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, N0));
13740 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
13741 SelectionDAG &DAG) const {
13742 SDValue N0 = Op.getOperand(0);
13744 auto PtrVT = getPointerTy(DAG.getDataLayout());
13746 if (Op.getSimpleValueType().isVector())
13747 return lowerUINT_TO_FP_vec(Op, DAG);
13749 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
13750 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
13751 // the optimization here.
13752 if (DAG.SignBitIsZero(N0))
13753 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
13755 MVT SrcVT = N0.getSimpleValueType();
13756 MVT DstVT = Op.getSimpleValueType();
13758 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
13759 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
13760 // Conversions from unsigned i32 to f32/f64 are legal,
13761 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
13765 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
13766 return LowerUINT_TO_FP_i64(Op, DAG);
13767 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
13768 return LowerUINT_TO_FP_i32(Op, DAG);
13769 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
13772 // Make a 64-bit buffer, and use it to build an FILD.
13773 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
13774 if (SrcVT == MVT::i32) {
13775 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
13776 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13777 StackSlot, MachinePointerInfo());
13778 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
13779 OffsetSlot, MachinePointerInfo());
13780 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
13784 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
13785 SDValue ValueToStore = Op.getOperand(0);
13786 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
13787 // Bitcasting to f64 here allows us to do a single 64-bit store from
13788 // an SSE register, avoiding the store forwarding penalty that would come
13789 // with two 32-bit stores.
13790 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
13791 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
13792 MachinePointerInfo());
13793 // For i64 source, we need to add the appropriate power of 2 if the input
13794 // was negative. This is the same as the optimization in
13795 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
13796 // we must be careful to do the computation in x87 extended precision, not
13797 // in SSE. (The generic code can't know it's OK to do this, or how to.)
13798 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
13799 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
13800 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
13801 MachineMemOperand::MOLoad, 8, 8);
13803 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
13804 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
13805 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
13808 APInt FF(32, 0x5F800000ULL);
13810 // Check whether the sign bit is set.
13811 SDValue SignSet = DAG.getSetCC(
13812 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
13813 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
13815 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
13816 SDValue FudgePtr = DAG.getConstantPool(
13817 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
13819 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
13820 SDValue Zero = DAG.getIntPtrConstant(0, dl);
13821 SDValue Four = DAG.getIntPtrConstant(4, dl);
13822 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
13824 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
13826 // Load the value out, extending it from f32 to f80.
13827 // FIXME: Avoid the extend by constructing the right constant pool?
13828 SDValue Fudge = DAG.getExtLoad(
13829 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
13830 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
13831 /* Alignment = */ 4);
13832 // Extend everything to 80 bits to force it to be done on x87.
13833 // TODO: Are there any fast-math-flags to propagate here?
13834 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
13835 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
13836 DAG.getIntPtrConstant(0, dl));
13839 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
13840 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
13841 // just return an <SDValue(), SDValue()> pair.
13842 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
13843 // to i16, i32 or i64, and we lower it to a legal sequence.
13844 // If lowered to the final integer result we return a <result, SDValue()> pair.
13845 // Otherwise we lower it to a sequence ending with a FIST, return a
13846 // <FIST, StackSlot> pair, and the caller is responsible for loading
13847 // the final integer result from StackSlot.
13848 std::pair<SDValue,SDValue>
13849 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
13850 bool IsSigned, bool IsReplace) const {
13853 EVT DstTy = Op.getValueType();
13854 EVT TheVT = Op.getOperand(0).getValueType();
13855 auto PtrVT = getPointerTy(DAG.getDataLayout());
13857 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
13858 // f16 must be promoted before using the lowering in this routine.
13859 // fp128 does not use this lowering.
13860 return std::make_pair(SDValue(), SDValue());
13863 // If using FIST to compute an unsigned i64, we'll need some fixup
13864 // to handle values above the maximum signed i64. A FIST is always
13865 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
13866 bool UnsignedFixup = !IsSigned &&
13867 DstTy == MVT::i64 &&
13868 (!Subtarget.is64Bit() ||
13869 !isScalarFPTypeInSSEReg(TheVT));
13871 if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) {
13872 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
13873 // The low 32 bits of the fist result will have the correct uint32 result.
13874 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
13878 assert(DstTy.getSimpleVT() <= MVT::i64 &&
13879 DstTy.getSimpleVT() >= MVT::i16 &&
13880 "Unknown FP_TO_INT to lower!");
13882 // These are really Legal.
13883 if (DstTy == MVT::i32 &&
13884 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
13885 return std::make_pair(SDValue(), SDValue());
13886 if (Subtarget.is64Bit() &&
13887 DstTy == MVT::i64 &&
13888 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
13889 return std::make_pair(SDValue(), SDValue());
13891 // We lower FP->int64 into FISTP64 followed by a load from a temporary
13893 MachineFunction &MF = DAG.getMachineFunction();
13894 unsigned MemSize = DstTy.getSizeInBits()/8;
13895 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
13896 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
13899 switch (DstTy.getSimpleVT().SimpleTy) {
13900 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
13901 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
13902 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
13903 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
13906 SDValue Chain = DAG.getEntryNode();
13907 SDValue Value = Op.getOperand(0);
13908 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
13910 if (UnsignedFixup) {
13912 // Conversion to unsigned i64 is implemented with a select,
13913 // depending on whether the source value fits in the range
13914 // of a signed i64. Let Thresh be the FP equivalent of
13915 // 0x8000000000000000ULL.
13917 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
13918 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
13919 // Fist-to-mem64 FistSrc
13920 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
13921 // to XOR'ing the high 32 bits with Adjust.
13923 // Being a power of 2, Thresh is exactly representable in all FP formats.
13924 // For X87 we'd like to use the smallest FP type for this constant, but
13925 // for DAG type consistency we have to match the FP operand type.
13927 APFloat Thresh(APFloat::IEEEsingle, APInt(32, 0x5f000000));
13928 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
13929 bool LosesInfo = false;
13930 if (TheVT == MVT::f64)
13931 // The rounding mode is irrelevant as the conversion should be exact.
13932 Status = Thresh.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
13934 else if (TheVT == MVT::f80)
13935 Status = Thresh.convert(APFloat::x87DoubleExtended,
13936 APFloat::rmNearestTiesToEven, &LosesInfo);
13938 assert(Status == APFloat::opOK && !LosesInfo &&
13939 "FP conversion should have been exact");
13941 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
13943 SDValue Cmp = DAG.getSetCC(DL,
13944 getSetCCResultType(DAG.getDataLayout(),
13945 *DAG.getContext(), TheVT),
13946 Value, ThreshVal, ISD::SETLT);
13947 Adjust = DAG.getSelect(DL, MVT::i32, Cmp,
13948 DAG.getConstant(0, DL, MVT::i32),
13949 DAG.getConstant(0x80000000, DL, MVT::i32));
13950 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
13951 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
13952 *DAG.getContext(), TheVT),
13953 Value, ThreshVal, ISD::SETLT);
13954 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
13957 // FIXME This causes a redundant load/store if the SSE-class value is already
13958 // in memory, such as if it is on the callstack.
13959 if (isScalarFPTypeInSSEReg(TheVT)) {
13960 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
13961 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
13962 MachinePointerInfo::getFixedStack(MF, SSFI));
13963 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
13965 Chain, StackSlot, DAG.getValueType(TheVT)
13968 MachineMemOperand *MMO =
13969 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
13970 MachineMemOperand::MOLoad, MemSize, MemSize);
13971 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
13972 Chain = Value.getValue(1);
13973 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
13974 StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
13977 MachineMemOperand *MMO =
13978 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
13979 MachineMemOperand::MOStore, MemSize, MemSize);
13981 if (UnsignedFixup) {
13983 // Insert the FIST, load its result as two i32's,
13984 // and XOR the high i32 with Adjust.
13986 SDValue FistOps[] = { Chain, Value, StackSlot };
13987 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
13988 FistOps, DstTy, MMO);
13991 DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo());
13992 SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL);
13995 DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo());
13996 High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
13998 if (Subtarget.is64Bit()) {
13999 // Join High32 and Low32 into a 64-bit result.
14000 // (High32 << 32) | Low32
14001 Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
14002 High32 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, High32);
14003 High32 = DAG.getNode(ISD::SHL, DL, MVT::i64, High32,
14004 DAG.getConstant(32, DL, MVT::i8));
14005 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i64, High32, Low32);
14006 return std::make_pair(Result, SDValue());
14009 SDValue ResultOps[] = { Low32, High32 };
14011 SDValue pair = IsReplace
14012 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResultOps)
14013 : DAG.getMergeValues(ResultOps, DL);
14014 return std::make_pair(pair, SDValue());
14016 // Build the FP_TO_INT*_IN_MEM
14017 SDValue Ops[] = { Chain, Value, StackSlot };
14018 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14020 return std::make_pair(FIST, StackSlot);
14024 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14025 const X86Subtarget &Subtarget) {
14026 MVT VT = Op->getSimpleValueType(0);
14027 SDValue In = Op->getOperand(0);
14028 MVT InVT = In.getSimpleValueType();
14031 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
14032 return DAG.getNode(ISD::ZERO_EXTEND, dl, VT, In);
14034 // Optimize vectors in AVX mode:
14037 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14038 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14039 // Concat upper and lower parts.
14042 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14043 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14044 // Concat upper and lower parts.
14047 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14048 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14049 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14052 if (Subtarget.hasInt256())
14053 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14055 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14056 SDValue Undef = DAG.getUNDEF(InVT);
14057 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14058 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14059 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14061 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14062 VT.getVectorNumElements()/2);
14064 OpLo = DAG.getBitcast(HVT, OpLo);
14065 OpHi = DAG.getBitcast(HVT, OpHi);
14067 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14070 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14071 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14072 MVT VT = Op->getSimpleValueType(0);
14073 SDValue In = Op->getOperand(0);
14074 MVT InVT = In.getSimpleValueType();
14076 unsigned int NumElts = VT.getVectorNumElements();
14077 if (NumElts != 8 && NumElts != 16 && !Subtarget.hasBWI())
14080 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14081 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14083 assert(InVT.getVectorElementType() == MVT::i1);
14085 // Extend VT if the target is 256 or 128bit vector and VLX is not supported.
14087 if (!VT.is512BitVector() && !Subtarget.hasVLX())
14088 ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
14091 DAG.getConstant(APInt(ExtVT.getScalarSizeInBits(), 1), DL, ExtVT);
14093 DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), DL, ExtVT);
14095 SDValue SelectedVal = DAG.getNode(ISD::VSELECT, DL, ExtVT, In, One, Zero);
14097 return SelectedVal;
14098 return DAG.getNode(X86ISD::VTRUNC, DL, VT, SelectedVal);
14101 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
14102 SelectionDAG &DAG) {
14103 if (Subtarget.hasFp256())
14104 if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
14110 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
14111 SelectionDAG &DAG) {
14113 MVT VT = Op.getSimpleValueType();
14114 SDValue In = Op.getOperand(0);
14115 MVT SVT = In.getSimpleValueType();
14117 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14118 return LowerZERO_EXTEND_AVX512(Op, Subtarget, DAG);
14120 if (Subtarget.hasFp256())
14121 if (SDValue Res = LowerAVXExtend(Op, DAG, Subtarget))
14124 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14125 VT.getVectorNumElements() != SVT.getVectorNumElements());
14129 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
14130 const X86Subtarget &Subtarget) {
14133 MVT VT = Op.getSimpleValueType();
14134 SDValue In = Op.getOperand(0);
14135 MVT InVT = In.getSimpleValueType();
14137 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
14139 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
14140 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
14141 if (InVT.getScalarSizeInBits() <= 16) {
14142 if (Subtarget.hasBWI()) {
14143 // legal, will go to VPMOVB2M, VPMOVW2M
14144 // Shift packed bytes not supported natively, bitcast to word
14145 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
14146 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, ExtVT,
14147 DAG.getBitcast(ExtVT, In),
14148 DAG.getConstant(ShiftInx, DL, ExtVT));
14149 ShiftNode = DAG.getBitcast(InVT, ShiftNode);
14150 return DAG.getNode(X86ISD::CVT2MASK, DL, VT, ShiftNode);
14152 // Use TESTD/Q, extended vector to packed dword/qword.
14153 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
14154 "Unexpected vector type.");
14155 unsigned NumElts = InVT.getVectorNumElements();
14156 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(512/NumElts), NumElts);
14157 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14159 ShiftInx = InVT.getScalarSizeInBits() - 1;
14162 SDValue ShiftNode = DAG.getNode(ISD::SHL, DL, InVT, In,
14163 DAG.getConstant(ShiftInx, DL, InVT));
14164 return DAG.getNode(X86ISD::TESTM, DL, VT, ShiftNode, ShiftNode);
14167 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14169 MVT VT = Op.getSimpleValueType();
14170 SDValue In = Op.getOperand(0);
14171 MVT InVT = In.getSimpleValueType();
14173 if (VT == MVT::i1) {
14174 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14175 "Invalid scalar TRUNCATE operation");
14176 if (InVT.getSizeInBits() >= 32)
14178 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14179 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14181 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14182 "Invalid TRUNCATE operation");
14184 if (VT.getVectorElementType() == MVT::i1)
14185 return LowerTruncateVecI1(Op, DAG, Subtarget);
14187 // vpmovqb/w/d, vpmovdb/w, vpmovwb
14188 if (Subtarget.hasAVX512()) {
14189 // word to byte only under BWI
14190 if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) // v16i16 -> v16i8
14191 return DAG.getNode(X86ISD::VTRUNC, DL, VT,
14192 DAG.getNode(X86ISD::VSEXT, DL, MVT::v16i32, In));
14193 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14195 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14196 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14197 if (Subtarget.hasInt256()) {
14198 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14199 In = DAG.getBitcast(MVT::v8i32, In);
14200 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14202 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14203 DAG.getIntPtrConstant(0, DL));
14206 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14207 DAG.getIntPtrConstant(0, DL));
14208 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14209 DAG.getIntPtrConstant(2, DL));
14210 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
14211 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
14212 static const int ShufMask[] = {0, 2, 4, 6};
14213 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14216 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14217 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14218 if (Subtarget.hasInt256()) {
14219 In = DAG.getBitcast(MVT::v32i8, In);
14221 SmallVector<SDValue,32> pshufbMask;
14222 for (unsigned i = 0; i < 2; ++i) {
14223 pshufbMask.push_back(DAG.getConstant(0x0, DL, MVT::i8));
14224 pshufbMask.push_back(DAG.getConstant(0x1, DL, MVT::i8));
14225 pshufbMask.push_back(DAG.getConstant(0x4, DL, MVT::i8));
14226 pshufbMask.push_back(DAG.getConstant(0x5, DL, MVT::i8));
14227 pshufbMask.push_back(DAG.getConstant(0x8, DL, MVT::i8));
14228 pshufbMask.push_back(DAG.getConstant(0x9, DL, MVT::i8));
14229 pshufbMask.push_back(DAG.getConstant(0xc, DL, MVT::i8));
14230 pshufbMask.push_back(DAG.getConstant(0xd, DL, MVT::i8));
14231 for (unsigned j = 0; j < 8; ++j)
14232 pshufbMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
14234 SDValue BV = DAG.getBuildVector(MVT::v32i8, DL, pshufbMask);
14235 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14236 In = DAG.getBitcast(MVT::v4i64, In);
14238 static const int ShufMask[] = {0, 2, -1, -1};
14239 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14241 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14242 DAG.getIntPtrConstant(0, DL));
14243 return DAG.getBitcast(VT, In);
14246 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14247 DAG.getIntPtrConstant(0, DL));
14249 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14250 DAG.getIntPtrConstant(4, DL));
14252 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
14253 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
14255 // The PSHUFB mask:
14256 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14257 -1, -1, -1, -1, -1, -1, -1, -1};
14259 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14260 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14261 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14263 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
14264 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
14266 // The MOVLHPS Mask:
14267 static const int ShufMask2[] = {0, 1, 4, 5};
14268 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14269 return DAG.getBitcast(MVT::v8i16, res);
14272 // Handle truncation of V256 to V128 using shuffles.
14273 if (!VT.is128BitVector() || !InVT.is256BitVector())
14276 assert(Subtarget.hasFp256() && "256-bit vector without AVX!");
14278 unsigned NumElems = VT.getVectorNumElements();
14279 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14281 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14282 // Prepare truncation shuffle mask
14283 for (unsigned i = 0; i != NumElems; ++i)
14284 MaskVec[i] = i * 2;
14285 SDValue V = DAG.getVectorShuffle(NVT, DL, DAG.getBitcast(NVT, In),
14286 DAG.getUNDEF(NVT), MaskVec);
14287 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14288 DAG.getIntPtrConstant(0, DL));
14291 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14292 SelectionDAG &DAG) const {
14293 assert(!Op.getSimpleValueType().isVector());
14295 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14296 /*IsSigned=*/ true, /*IsReplace=*/ false);
14297 SDValue FIST = Vals.first, StackSlot = Vals.second;
14298 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14299 if (!FIST.getNode())
14302 if (StackSlot.getNode())
14303 // Load the result.
14304 return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot,
14305 MachinePointerInfo());
14307 // The node is the result.
14311 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14312 SelectionDAG &DAG) const {
14313 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14314 /*IsSigned=*/ false, /*IsReplace=*/ false);
14315 SDValue FIST = Vals.first, StackSlot = Vals.second;
14316 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14317 if (!FIST.getNode())
14320 if (StackSlot.getNode())
14321 // Load the result.
14322 return DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot,
14323 MachinePointerInfo());
14325 // The node is the result.
14329 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14331 MVT VT = Op.getSimpleValueType();
14332 SDValue In = Op.getOperand(0);
14333 MVT SVT = In.getSimpleValueType();
14335 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14337 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14338 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14339 In, DAG.getUNDEF(SVT)));
14342 /// The only differences between FABS and FNEG are the mask and the logic op.
14343 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14344 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14345 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14346 "Wrong opcode for lowering FABS or FNEG.");
14348 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14350 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14351 // into an FNABS. We'll lower the FABS after that if it is still in use.
14353 for (SDNode *User : Op->uses())
14354 if (User->getOpcode() == ISD::FNEG)
14358 MVT VT = Op.getSimpleValueType();
14360 bool IsF128 = (VT == MVT::f128);
14362 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14363 // decide if we should generate a 16-byte constant mask when we only need 4 or
14364 // 8 bytes for the scalar case.
14370 if (VT.isVector()) {
14372 EltVT = VT.getVectorElementType();
14373 NumElts = VT.getVectorNumElements();
14374 } else if (IsF128) {
14375 // SSE instructions are used for optimized f128 logical operations.
14376 LogicVT = MVT::f128;
14380 // There are no scalar bitwise logical SSE/AVX instructions, so we
14381 // generate a 16-byte vector constant and logic op even for the scalar case.
14382 // Using a 16-byte mask allows folding the load of the mask with
14383 // the logic op, so it can save (~4 bytes) on code size.
14384 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
14386 NumElts = (VT == MVT::f64) ? 2 : 4;
14389 unsigned EltBits = EltVT.getSizeInBits();
14390 LLVMContext *Context = DAG.getContext();
14391 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14393 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14394 Constant *C = ConstantInt::get(*Context, MaskElt);
14395 C = ConstantVector::getSplat(NumElts, C);
14396 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14397 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
14398 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14399 SDValue Mask = DAG.getLoad(
14400 LogicVT, dl, DAG.getEntryNode(), CPIdx,
14401 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Alignment);
14403 SDValue Op0 = Op.getOperand(0);
14404 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14406 IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14407 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14409 if (VT.isVector() || IsF128)
14410 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
14412 // For the scalar case extend to a 128-bit vector, perform the logic op,
14413 // and extract the scalar result back out.
14414 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
14415 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
14416 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
14417 DAG.getIntPtrConstant(0, dl));
14420 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14421 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14422 LLVMContext *Context = DAG.getContext();
14423 SDValue Op0 = Op.getOperand(0);
14424 SDValue Op1 = Op.getOperand(1);
14426 MVT VT = Op.getSimpleValueType();
14427 MVT SrcVT = Op1.getSimpleValueType();
14428 bool IsF128 = (VT == MVT::f128);
14430 // If second operand is smaller, extend it first.
14431 if (SrcVT.bitsLT(VT)) {
14432 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14435 // And if it is bigger, shrink it first.
14436 if (SrcVT.bitsGT(VT)) {
14437 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1, dl));
14441 // At this point the operands and the result should have the same
14442 // type, and that won't be f80 since that is not custom lowered.
14443 assert((VT == MVT::f64 || VT == MVT::f32 || IsF128) &&
14444 "Unexpected type in LowerFCOPYSIGN");
14446 const fltSemantics &Sem =
14447 VT == MVT::f64 ? APFloat::IEEEdouble :
14448 (IsF128 ? APFloat::IEEEquad : APFloat::IEEEsingle);
14449 const unsigned SizeInBits = VT.getSizeInBits();
14451 SmallVector<Constant *, 4> CV(
14452 VT == MVT::f64 ? 2 : (IsF128 ? 1 : 4),
14453 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14455 // First, clear all bits but the sign bit from the second operand (sign).
14456 CV[0] = ConstantFP::get(*Context,
14457 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14458 Constant *C = ConstantVector::get(CV);
14459 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
14460 SDValue CPIdx = DAG.getConstantPool(C, PtrVT, 16);
14462 // Perform all logic operations as 16-byte vectors because there are no
14463 // scalar FP logic instructions in SSE. This allows load folding of the
14464 // constants into the logic instructions.
14465 MVT LogicVT = (VT == MVT::f64) ? MVT::v2f64 : (IsF128 ? MVT::f128 : MVT::v4f32);
14467 DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
14468 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
14469 /* Alignment = */ 16);
14471 Op1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Op1);
14472 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Op1, Mask1);
14474 // Next, clear the sign bit from the first operand (magnitude).
14475 // If it's a constant, we can clear it here.
14476 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14477 APFloat APF = Op0CN->getValueAPF();
14478 // If the magnitude is a positive zero, the sign bit alone is enough.
14479 if (APF.isPosZero())
14480 return IsF128 ? SignBit :
14481 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcVT, SignBit,
14482 DAG.getIntPtrConstant(0, dl));
14484 CV[0] = ConstantFP::get(*Context, APF);
14486 CV[0] = ConstantFP::get(
14488 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14490 C = ConstantVector::get(CV);
14491 CPIdx = DAG.getConstantPool(C, PtrVT, 16);
14493 DAG.getLoad(LogicVT, dl, DAG.getEntryNode(), CPIdx,
14494 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
14495 /* Alignment = */ 16);
14496 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14497 if (!isa<ConstantFPSDNode>(Op0)) {
14499 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Op0);
14500 Val = DAG.getNode(X86ISD::FAND, dl, LogicVT, Op0, Val);
14502 // OR the magnitude value with the sign bit.
14503 Val = DAG.getNode(X86ISD::FOR, dl, LogicVT, Val, SignBit);
14504 return IsF128 ? Val :
14505 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcVT, Val,
14506 DAG.getIntPtrConstant(0, dl));
14509 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14510 SDValue N0 = Op.getOperand(0);
14512 MVT VT = Op.getSimpleValueType();
14514 MVT OpVT = N0.getSimpleValueType();
14515 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
14516 "Unexpected type for FGETSIGN");
14518 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
14519 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
14520 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
14521 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
14522 Res = DAG.getZExtOrTrunc(Res, dl, VT);
14523 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
14527 // Check whether an OR'd tree is PTEST-able.
14528 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget &Subtarget,
14529 SelectionDAG &DAG) {
14530 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14532 if (!Subtarget.hasSSE41())
14535 if (!Op->hasOneUse())
14538 SDNode *N = Op.getNode();
14541 SmallVector<SDValue, 8> Opnds;
14542 DenseMap<SDValue, unsigned> VecInMap;
14543 SmallVector<SDValue, 8> VecIns;
14544 EVT VT = MVT::Other;
14546 // Recognize a special case where a vector is casted into wide integer to
14548 Opnds.push_back(N->getOperand(0));
14549 Opnds.push_back(N->getOperand(1));
14551 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14552 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14553 // BFS traverse all OR'd operands.
14554 if (I->getOpcode() == ISD::OR) {
14555 Opnds.push_back(I->getOperand(0));
14556 Opnds.push_back(I->getOperand(1));
14557 // Re-evaluate the number of nodes to be traversed.
14558 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14562 // Quit if a non-EXTRACT_VECTOR_ELT
14563 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14566 // Quit if without a constant index.
14567 SDValue Idx = I->getOperand(1);
14568 if (!isa<ConstantSDNode>(Idx))
14571 SDValue ExtractedFromVec = I->getOperand(0);
14572 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14573 if (M == VecInMap.end()) {
14574 VT = ExtractedFromVec.getValueType();
14575 // Quit if not 128/256-bit vector.
14576 if (!VT.is128BitVector() && !VT.is256BitVector())
14578 // Quit if not the same type.
14579 if (VecInMap.begin() != VecInMap.end() &&
14580 VT != VecInMap.begin()->first.getValueType())
14582 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14583 VecIns.push_back(ExtractedFromVec);
14585 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14588 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14589 "Not extracted from 128-/256-bit vector.");
14591 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14593 for (DenseMap<SDValue, unsigned>::const_iterator
14594 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
14595 // Quit if not all elements are used.
14596 if (I->second != FullMask)
14600 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
14602 // Cast all vectors into TestVT for PTEST.
14603 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
14604 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
14606 // If more than one full vectors are evaluated, OR them first before PTEST.
14607 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
14608 // Each iteration will OR 2 nodes and append the result until there is only
14609 // 1 node left, i.e. the final OR'd value of all vectors.
14610 SDValue LHS = VecIns[Slot];
14611 SDValue RHS = VecIns[Slot + 1];
14612 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
14615 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
14616 VecIns.back(), VecIns.back());
14619 /// \brief return true if \c Op has a use that doesn't just read flags.
14620 static bool hasNonFlagsUse(SDValue Op) {
14621 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
14623 SDNode *User = *UI;
14624 unsigned UOpNo = UI.getOperandNo();
14625 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
14626 // Look pass truncate.
14627 UOpNo = User->use_begin().getOperandNo();
14628 User = *User->use_begin();
14631 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
14632 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
14638 // Emit KTEST instruction for bit vectors on AVX-512
14639 static SDValue EmitKTEST(SDValue Op, SelectionDAG &DAG,
14640 const X86Subtarget &Subtarget) {
14641 if (Op.getOpcode() == ISD::BITCAST) {
14642 auto hasKTEST = [&](MVT VT) {
14643 unsigned SizeInBits = VT.getSizeInBits();
14644 return (Subtarget.hasDQI() && (SizeInBits == 8 || SizeInBits == 16)) ||
14645 (Subtarget.hasBWI() && (SizeInBits == 32 || SizeInBits == 64));
14647 SDValue Op0 = Op.getOperand(0);
14648 MVT Op0VT = Op0.getValueType().getSimpleVT();
14649 if (Op0VT.isVector() && Op0VT.getVectorElementType() == MVT::i1 &&
14651 return DAG.getNode(X86ISD::KTEST, SDLoc(Op), Op0VT, Op0, Op0);
14656 /// Emit nodes that will be selected as "test Op0,Op0", or something
14658 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
14659 SelectionDAG &DAG) const {
14660 if (Op.getValueType() == MVT::i1) {
14661 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
14662 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
14663 DAG.getConstant(0, dl, MVT::i8));
14665 // CF and OF aren't always set the way we want. Determine which
14666 // of these we need.
14667 bool NeedCF = false;
14668 bool NeedOF = false;
14671 case X86::COND_A: case X86::COND_AE:
14672 case X86::COND_B: case X86::COND_BE:
14675 case X86::COND_G: case X86::COND_GE:
14676 case X86::COND_L: case X86::COND_LE:
14677 case X86::COND_O: case X86::COND_NO: {
14678 // Check if we really need to set the
14679 // Overflow flag. If NoSignedWrap is present
14680 // that is not actually needed.
14681 switch (Op->getOpcode()) {
14686 const auto *BinNode = cast<BinaryWithFlagsSDNode>(Op.getNode());
14687 if (BinNode->Flags.hasNoSignedWrap())
14697 // See if we can use the EFLAGS value from the operand instead of
14698 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
14699 // we prove that the arithmetic won't overflow, we can't use OF or CF.
14700 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
14701 // Emit KTEST for bit vectors
14702 if (auto Node = EmitKTEST(Op, DAG, Subtarget))
14704 // Emit a CMP with 0, which is the TEST pattern.
14705 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14706 DAG.getConstant(0, dl, Op.getValueType()));
14708 unsigned Opcode = 0;
14709 unsigned NumOperands = 0;
14711 // Truncate operations may prevent the merge of the SETCC instruction
14712 // and the arithmetic instruction before it. Attempt to truncate the operands
14713 // of the arithmetic instruction and use a reduced bit-width instruction.
14714 bool NeedTruncation = false;
14715 SDValue ArithOp = Op;
14716 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
14717 SDValue Arith = Op->getOperand(0);
14718 // Both the trunc and the arithmetic op need to have one user each.
14719 if (Arith->hasOneUse())
14720 switch (Arith.getOpcode()) {
14727 NeedTruncation = true;
14733 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
14734 // which may be the result of a CAST. We use the variable 'Op', which is the
14735 // non-casted variable when we check for possible users.
14736 switch (ArithOp.getOpcode()) {
14738 // Due to an isel shortcoming, be conservative if this add is likely to be
14739 // selected as part of a load-modify-store instruction. When the root node
14740 // in a match is a store, isel doesn't know how to remap non-chain non-flag
14741 // uses of other nodes in the match, such as the ADD in this case. This
14742 // leads to the ADD being left around and reselected, with the result being
14743 // two adds in the output. Alas, even if none our users are stores, that
14744 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
14745 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
14746 // climbing the DAG back to the root, and it doesn't seem to be worth the
14748 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14749 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14750 if (UI->getOpcode() != ISD::CopyToReg &&
14751 UI->getOpcode() != ISD::SETCC &&
14752 UI->getOpcode() != ISD::STORE)
14755 if (ConstantSDNode *C =
14756 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
14757 // An add of one will be selected as an INC.
14758 if (C->isOne() && !Subtarget.slowIncDec()) {
14759 Opcode = X86ISD::INC;
14764 // An add of negative one (subtract of one) will be selected as a DEC.
14765 if (C->isAllOnesValue() && !Subtarget.slowIncDec()) {
14766 Opcode = X86ISD::DEC;
14772 // Otherwise use a regular EFLAGS-setting add.
14773 Opcode = X86ISD::ADD;
14778 // If we have a constant logical shift that's only used in a comparison
14779 // against zero turn it into an equivalent AND. This allows turning it into
14780 // a TEST instruction later.
14781 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
14782 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
14783 EVT VT = Op.getValueType();
14784 unsigned BitWidth = VT.getSizeInBits();
14785 unsigned ShAmt = Op->getConstantOperandVal(1);
14786 if (ShAmt >= BitWidth) // Avoid undefined shifts.
14788 APInt Mask = ArithOp.getOpcode() == ISD::SRL
14789 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
14790 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
14791 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
14793 Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
14794 DAG.getConstant(Mask, dl, VT));
14799 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
14800 // because a TEST instruction will be better.
14801 if (!hasNonFlagsUse(Op)) {
14802 SDValue Op0 = ArithOp->getOperand(0);
14803 SDValue Op1 = ArithOp->getOperand(1);
14804 EVT VT = ArithOp.getValueType();
14805 bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
14806 bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
14808 // But if we can combine this into an ANDN operation, then create an AND
14809 // now and allow it to be pattern matched into an ANDN.
14810 if (!Subtarget.hasBMI() || !isAndn || !isLegalAndnType)
14817 // Due to the ISEL shortcoming noted above, be conservative if this op is
14818 // likely to be selected as part of a load-modify-store instruction.
14819 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14820 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14821 if (UI->getOpcode() == ISD::STORE)
14824 // Otherwise use a regular EFLAGS-setting instruction.
14825 switch (ArithOp.getOpcode()) {
14826 default: llvm_unreachable("unexpected operator!");
14827 case ISD::SUB: Opcode = X86ISD::SUB; break;
14828 case ISD::XOR: Opcode = X86ISD::XOR; break;
14829 case ISD::AND: Opcode = X86ISD::AND; break;
14831 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
14832 if (SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG))
14835 Opcode = X86ISD::OR;
14849 return SDValue(Op.getNode(), 1);
14855 // If we found that truncation is beneficial, perform the truncation and
14857 if (NeedTruncation) {
14858 EVT VT = Op.getValueType();
14859 SDValue WideVal = Op->getOperand(0);
14860 EVT WideVT = WideVal.getValueType();
14861 unsigned ConvertedOp = 0;
14862 // Use a target machine opcode to prevent further DAGCombine
14863 // optimizations that may separate the arithmetic operations
14864 // from the setcc node.
14865 switch (WideVal.getOpcode()) {
14867 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
14868 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
14869 case ISD::AND: ConvertedOp = X86ISD::AND; break;
14870 case ISD::OR: ConvertedOp = X86ISD::OR; break;
14871 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
14875 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14876 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
14877 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
14878 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
14879 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
14885 // Emit KTEST for bit vectors
14886 if (auto Node = EmitKTEST(Op, DAG, Subtarget))
14889 // Emit a CMP with 0, which is the TEST pattern.
14890 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14891 DAG.getConstant(0, dl, Op.getValueType()));
14893 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
14894 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
14896 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
14897 DAG.ReplaceAllUsesWith(Op, New);
14898 return SDValue(New.getNode(), 1);
14901 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
14903 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
14904 const SDLoc &dl, SelectionDAG &DAG) const {
14905 if (isNullConstant(Op1))
14906 return EmitTest(Op0, X86CC, dl, DAG);
14908 assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
14909 "Unexpected comparison operation for MVT::i1 operands");
14911 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
14912 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
14913 // Only promote the compare up to I32 if it is a 16 bit operation
14914 // with an immediate. 16 bit immediates are to be avoided.
14915 if ((Op0.getValueType() == MVT::i16 &&
14916 (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
14917 !DAG.getMachineFunction().getFunction()->optForMinSize() &&
14918 !Subtarget.isAtom()) {
14919 unsigned ExtendOp =
14920 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
14921 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
14922 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
14924 // Use SUB instead of CMP to enable CSE between SUB and CMP.
14925 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
14926 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
14928 return SDValue(Sub.getNode(), 1);
14930 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
14933 /// Convert a comparison if required by the subtarget.
14934 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
14935 SelectionDAG &DAG) const {
14936 // If the subtarget does not support the FUCOMI instruction, floating-point
14937 // comparisons have to be converted.
14938 if (Subtarget.hasCMov() ||
14939 Cmp.getOpcode() != X86ISD::CMP ||
14940 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
14941 !Cmp.getOperand(1).getValueType().isFloatingPoint())
14944 // The instruction selector will select an FUCOM instruction instead of
14945 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
14946 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
14947 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
14949 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
14950 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
14951 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
14952 DAG.getConstant(8, dl, MVT::i8));
14953 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
14955 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
14956 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
14957 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
14960 /// The minimum architected relative accuracy is 2^-12. We need one
14961 /// Newton-Raphson step to have a good float result (24 bits of precision).
14962 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
14963 DAGCombinerInfo &DCI,
14964 unsigned &RefinementSteps,
14965 bool &UseOneConstNR) const {
14966 EVT VT = Op.getValueType();
14967 const char *RecipOp;
14969 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
14970 // TODO: Add support for AVX512 (v16f32).
14971 // It is likely not profitable to do this for f64 because a double-precision
14972 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
14973 // instructions: convert to single, rsqrtss, convert back to double, refine
14974 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
14975 // along with FMA, this could be a throughput win.
14976 if (VT == MVT::f32 && Subtarget.hasSSE1())
14978 else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
14979 (VT == MVT::v8f32 && Subtarget.hasAVX()))
14980 RecipOp = "vec-sqrtf";
14984 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
14985 if (!Recips.isEnabled(RecipOp))
14988 RefinementSteps = Recips.getRefinementSteps(RecipOp);
14989 UseOneConstNR = false;
14990 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
14993 /// The minimum architected relative accuracy is 2^-12. We need one
14994 /// Newton-Raphson step to have a good float result (24 bits of precision).
14995 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
14996 DAGCombinerInfo &DCI,
14997 unsigned &RefinementSteps) const {
14998 EVT VT = Op.getValueType();
14999 const char *RecipOp;
15001 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15002 // TODO: Add support for AVX512 (v16f32).
15003 // It is likely not profitable to do this for f64 because a double-precision
15004 // reciprocal estimate with refinement on x86 prior to FMA requires
15005 // 15 instructions: convert to single, rcpss, convert back to double, refine
15006 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15007 // along with FMA, this could be a throughput win.
15008 if (VT == MVT::f32 && Subtarget.hasSSE1())
15010 else if ((VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
15011 (VT == MVT::v8f32 && Subtarget.hasAVX()))
15012 RecipOp = "vec-divf";
15016 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
15017 if (!Recips.isEnabled(RecipOp))
15020 RefinementSteps = Recips.getRefinementSteps(RecipOp);
15021 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15024 /// If we have at least two divisions that use the same divisor, convert to
15025 /// multplication by a reciprocal. This may need to be adjusted for a given
15026 /// CPU if a division's cost is not at least twice the cost of a multiplication.
15027 /// This is because we still need one division to calculate the reciprocal and
15028 /// then we need two multiplies by that reciprocal as replacements for the
15029 /// original divisions.
15030 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
15034 /// Result of 'and' is compared against zero. Change to a BT node if possible.
15035 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15036 const SDLoc &dl, SelectionDAG &DAG) const {
15037 SDValue Op0 = And.getOperand(0);
15038 SDValue Op1 = And.getOperand(1);
15039 if (Op0.getOpcode() == ISD::TRUNCATE)
15040 Op0 = Op0.getOperand(0);
15041 if (Op1.getOpcode() == ISD::TRUNCATE)
15042 Op1 = Op1.getOperand(0);
15045 if (Op1.getOpcode() == ISD::SHL)
15046 std::swap(Op0, Op1);
15047 if (Op0.getOpcode() == ISD::SHL) {
15048 if (isOneConstant(Op0.getOperand(0))) {
15049 // If we looked past a truncate, check that it's only truncating away
15051 unsigned BitWidth = Op0.getValueSizeInBits();
15052 unsigned AndBitWidth = And.getValueSizeInBits();
15053 if (BitWidth > AndBitWidth) {
15055 DAG.computeKnownBits(Op0, Zeros, Ones);
15056 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15060 RHS = Op0.getOperand(1);
15062 } else if (Op1.getOpcode() == ISD::Constant) {
15063 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15064 uint64_t AndRHSVal = AndRHS->getZExtValue();
15065 SDValue AndLHS = Op0;
15067 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15068 LHS = AndLHS.getOperand(0);
15069 RHS = AndLHS.getOperand(1);
15072 // Use BT if the immediate can't be encoded in a TEST instruction.
15073 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15075 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType());
15079 if (LHS.getNode()) {
15080 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15081 // instruction. Since the shift amount is in-range-or-undefined, we know
15082 // that doing a bittest on the i32 value is ok. We extend to i32 because
15083 // the encoding for the i16 version is larger than the i32 version.
15084 // Also promote i16 to i32 for performance / code size reason.
15085 if (LHS.getValueType() == MVT::i8 ||
15086 LHS.getValueType() == MVT::i16)
15087 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15089 // If the operand types disagree, extend the shift amount to match. Since
15090 // BT ignores high bits (like shifts) we can use anyextend.
15091 if (LHS.getValueType() != RHS.getValueType())
15092 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15094 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15095 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15096 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15097 DAG.getConstant(Cond, dl, MVT::i8), BT);
15103 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
15105 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15110 // SSE Condition code mapping:
15119 switch (SetCCOpcode) {
15120 default: llvm_unreachable("Unexpected SETCC condition");
15122 case ISD::SETEQ: SSECC = 0; break;
15124 case ISD::SETGT: Swap = true; // Fallthrough
15126 case ISD::SETOLT: SSECC = 1; break;
15128 case ISD::SETGE: Swap = true; // Fallthrough
15130 case ISD::SETOLE: SSECC = 2; break;
15131 case ISD::SETUO: SSECC = 3; break;
15133 case ISD::SETNE: SSECC = 4; break;
15134 case ISD::SETULE: Swap = true; // Fallthrough
15135 case ISD::SETUGE: SSECC = 5; break;
15136 case ISD::SETULT: Swap = true; // Fallthrough
15137 case ISD::SETUGT: SSECC = 6; break;
15138 case ISD::SETO: SSECC = 7; break;
15140 case ISD::SETONE: SSECC = 8; break;
15143 std::swap(Op0, Op1);
15148 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
15149 /// concatenate the result back.
15150 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15151 MVT VT = Op.getSimpleValueType();
15153 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15154 "Unsupported value type for operation");
15156 unsigned NumElems = VT.getVectorNumElements();
15158 SDValue CC = Op.getOperand(2);
15160 // Extract the LHS vectors
15161 SDValue LHS = Op.getOperand(0);
15162 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
15163 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
15165 // Extract the RHS vectors
15166 SDValue RHS = Op.getOperand(1);
15167 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
15168 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
15170 // Issue the operation on the smaller types and concatenate the result back
15171 MVT EltVT = VT.getVectorElementType();
15172 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15173 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15174 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15175 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15178 static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
15179 SDValue Op0 = Op.getOperand(0);
15180 SDValue Op1 = Op.getOperand(1);
15181 SDValue CC = Op.getOperand(2);
15182 MVT VT = Op.getSimpleValueType();
15185 assert(Op0.getSimpleValueType().getVectorElementType() == MVT::i1 &&
15186 "Unexpected type for boolean compare operation");
15187 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15188 SDValue NotOp0 = DAG.getNode(ISD::XOR, dl, VT, Op0,
15189 DAG.getConstant(-1, dl, VT));
15190 SDValue NotOp1 = DAG.getNode(ISD::XOR, dl, VT, Op1,
15191 DAG.getConstant(-1, dl, VT));
15192 switch (SetCCOpcode) {
15193 default: llvm_unreachable("Unexpected SETCC condition");
15195 // (x == y) -> ~(x ^ y)
15196 return DAG.getNode(ISD::XOR, dl, VT,
15197 DAG.getNode(ISD::XOR, dl, VT, Op0, Op1),
15198 DAG.getConstant(-1, dl, VT));
15200 // (x != y) -> (x ^ y)
15201 return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1);
15204 // (x > y) -> (x & ~y)
15205 return DAG.getNode(ISD::AND, dl, VT, Op0, NotOp1);
15208 // (x < y) -> (~x & y)
15209 return DAG.getNode(ISD::AND, dl, VT, NotOp0, Op1);
15212 // (x <= y) -> (~x | y)
15213 return DAG.getNode(ISD::OR, dl, VT, NotOp0, Op1);
15216 // (x >=y) -> (x | ~y)
15217 return DAG.getNode(ISD::OR, dl, VT, Op0, NotOp1);
15221 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
15223 SDValue Op0 = Op.getOperand(0);
15224 SDValue Op1 = Op.getOperand(1);
15225 SDValue CC = Op.getOperand(2);
15226 MVT VT = Op.getSimpleValueType();
15229 assert(VT.getVectorElementType() == MVT::i1 &&
15230 "Cannot set masked compare for this operation");
15232 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15234 bool Unsigned = false;
15237 switch (SetCCOpcode) {
15238 default: llvm_unreachable("Unexpected SETCC condition");
15239 case ISD::SETNE: SSECC = 4; break;
15240 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15241 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15242 case ISD::SETLT: Swap = true; //fall-through
15243 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15244 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15245 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15246 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15247 case ISD::SETULE: Unsigned = true; //fall-through
15248 case ISD::SETLE: SSECC = 2; break;
15252 std::swap(Op0, Op1);
15254 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15255 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15256 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15257 DAG.getConstant(SSECC, dl, MVT::i8));
15260 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15261 /// operand \p Op1. If non-trivial (for example because it's not constant)
15262 /// return an empty value.
15263 static SDValue ChangeVSETULTtoVSETULE(const SDLoc &dl, SDValue Op1,
15264 SelectionDAG &DAG) {
15265 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15269 MVT VT = Op1.getSimpleValueType();
15270 MVT EVT = VT.getVectorElementType();
15271 unsigned n = VT.getVectorNumElements();
15272 SmallVector<SDValue, 8> ULTOp1;
15274 for (unsigned i = 0; i < n; ++i) {
15275 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15276 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EVT)
15279 // Avoid underflow.
15280 APInt Val = Elt->getAPIntValue();
15284 ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
15287 return DAG.getBuildVector(VT, dl, ULTOp1);
15290 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
15291 SelectionDAG &DAG) {
15292 SDValue Op0 = Op.getOperand(0);
15293 SDValue Op1 = Op.getOperand(1);
15294 SDValue CC = Op.getOperand(2);
15295 MVT VT = Op.getSimpleValueType();
15296 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15297 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15302 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15303 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15307 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15308 assert(VT.getVectorNumElements() <= 16);
15309 Opc = X86ISD::CMPM;
15311 Opc = X86ISD::CMPP;
15312 // The SSE/AVX packed FP comparison nodes are defined with a
15313 // floating-point vector result that matches the operand type. This allows
15314 // them to work with an SSE1 target (integer vector types are not legal).
15315 VT = Op0.getSimpleValueType();
15318 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
15319 // emit two comparisons and a logic op to tie them together.
15320 // TODO: This can be avoided if Intel (and only Intel as of 2016) AVX is
15323 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15325 // LLVM predicate is SETUEQ or SETONE.
15327 unsigned CombineOpc;
15328 if (SetCCOpcode == ISD::SETUEQ) {
15331 CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FOR) :
15332 static_cast<unsigned>(ISD::OR);
15334 assert(SetCCOpcode == ISD::SETONE);
15337 CombineOpc = Opc == X86ISD::CMPP ? static_cast<unsigned>(X86ISD::FAND) :
15338 static_cast<unsigned>(ISD::AND);
15341 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15342 DAG.getConstant(CC0, dl, MVT::i8));
15343 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15344 DAG.getConstant(CC1, dl, MVT::i8));
15345 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15347 // Handle all other FP comparisons here.
15348 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
15349 DAG.getConstant(SSECC, dl, MVT::i8));
15352 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
15353 // result type of SETCC. The bitcast is expected to be optimized away
15354 // during combining/isel.
15355 if (Opc == X86ISD::CMPP)
15356 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
15361 MVT VTOp0 = Op0.getSimpleValueType();
15362 assert(VTOp0 == Op1.getSimpleValueType() &&
15363 "Expected operands with same type!");
15364 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
15365 "Invalid number of packed elements for source and destination!");
15367 if (VT.is128BitVector() && VTOp0.is256BitVector()) {
15368 // On non-AVX512 targets, a vector of MVT::i1 is promoted by the type
15369 // legalizer to a wider vector type. In the case of 'vsetcc' nodes, the
15370 // legalizer firstly checks if the first operand in input to the setcc has
15371 // a legal type. If so, then it promotes the return type to that same type.
15372 // Otherwise, the return type is promoted to the 'next legal type' which,
15373 // for a vector of MVT::i1 is always a 128-bit integer vector type.
15375 // We reach this code only if the following two conditions are met:
15376 // 1. Both return type and operand type have been promoted to wider types
15377 // by the type legalizer.
15378 // 2. The original operand type has been promoted to a 256-bit vector.
15380 // Note that condition 2. only applies for AVX targets.
15381 SDValue NewOp = DAG.getSetCC(dl, VTOp0, Op0, Op1, SetCCOpcode);
15382 return DAG.getZExtOrTrunc(NewOp, dl, VT);
15385 // The non-AVX512 code below works under the assumption that source and
15386 // destination types are the same.
15387 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
15388 "Value types for source and destination must be the same!");
15390 // Break 256-bit integer vector compare into smaller ones.
15391 if (VT.is256BitVector() && !Subtarget.hasInt256())
15392 return Lower256IntVSETCC(Op, DAG);
15394 // Operands are boolean (vectors of i1)
15395 MVT OpVT = Op1.getSimpleValueType();
15396 if (OpVT.getVectorElementType() == MVT::i1)
15397 return LowerBoolVSETCC_AVX512(Op, DAG);
15399 // The result is boolean, but operands are int/float
15400 if (VT.getVectorElementType() == MVT::i1) {
15401 // In AVX-512 architecture setcc returns mask with i1 elements,
15402 // But there is no compare instruction for i8 and i16 elements in KNL.
15403 // In this case use SSE compare
15404 bool UseAVX512Inst =
15405 (OpVT.is512BitVector() ||
15406 OpVT.getVectorElementType().getSizeInBits() >= 32 ||
15407 (Subtarget.hasBWI() && Subtarget.hasVLX()));
15410 return LowerIntVSETCC_AVX512(Op, DAG);
15412 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15413 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15416 // Lower using XOP integer comparisons.
15417 if ((VT == MVT::v16i8 || VT == MVT::v8i16 ||
15418 VT == MVT::v4i32 || VT == MVT::v2i64) && Subtarget.hasXOP()) {
15419 // Translate compare code to XOP PCOM compare mode.
15420 unsigned CmpMode = 0;
15421 switch (SetCCOpcode) {
15422 default: llvm_unreachable("Unexpected SETCC condition");
15424 case ISD::SETLT: CmpMode = 0x00; break;
15426 case ISD::SETLE: CmpMode = 0x01; break;
15428 case ISD::SETGT: CmpMode = 0x02; break;
15430 case ISD::SETGE: CmpMode = 0x03; break;
15431 case ISD::SETEQ: CmpMode = 0x04; break;
15432 case ISD::SETNE: CmpMode = 0x05; break;
15435 // Are we comparing unsigned or signed integers?
15436 unsigned Opc = ISD::isUnsignedIntSetCC(SetCCOpcode)
15437 ? X86ISD::VPCOMU : X86ISD::VPCOM;
15439 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15440 DAG.getConstant(CmpMode, dl, MVT::i8));
15443 // We are handling one of the integer comparisons here. Since SSE only has
15444 // GT and EQ comparisons for integer, swapping operands and multiple
15445 // operations may be required for some comparisons.
15447 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15448 bool Subus = false;
15450 switch (SetCCOpcode) {
15451 default: llvm_unreachable("Unexpected SETCC condition");
15452 case ISD::SETNE: Invert = true;
15453 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15454 case ISD::SETLT: Swap = true;
15455 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15456 case ISD::SETGE: Swap = true;
15457 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15458 Invert = true; break;
15459 case ISD::SETULT: Swap = true;
15460 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15461 FlipSigns = true; break;
15462 case ISD::SETUGE: Swap = true;
15463 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15464 FlipSigns = true; Invert = true; break;
15467 // Special case: Use min/max operations for SETULE/SETUGE
15468 MVT VET = VT.getVectorElementType();
15470 (Subtarget.hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15471 || (Subtarget.hasSSE2() && (VET == MVT::i8));
15474 switch (SetCCOpcode) {
15476 case ISD::SETULE: Opc = ISD::UMIN; MinMax = true; break;
15477 case ISD::SETUGE: Opc = ISD::UMAX; MinMax = true; break;
15480 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15483 bool hasSubus = Subtarget.hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15484 if (!MinMax && hasSubus) {
15485 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15487 // t = psubus Op0, Op1
15488 // pcmpeq t, <0..0>
15489 switch (SetCCOpcode) {
15491 case ISD::SETULT: {
15492 // If the comparison is against a constant we can turn this into a
15493 // setule. With psubus, setule does not require a swap. This is
15494 // beneficial because the constant in the register is no longer
15495 // destructed as the destination so it can be hoisted out of a loop.
15496 // Only do this pre-AVX since vpcmp* is no longer destructive.
15497 if (Subtarget.hasAVX())
15499 if (SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG)) {
15501 Subus = true; Invert = false; Swap = false;
15505 // Psubus is better than flip-sign because it requires no inversion.
15506 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15507 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15511 Opc = X86ISD::SUBUS;
15517 std::swap(Op0, Op1);
15519 // Check that the operation in question is available (most are plain SSE2,
15520 // but PCMPGTQ and PCMPEQQ have different requirements).
15521 if (VT == MVT::v2i64) {
15522 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
15523 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
15525 // First cast everything to the right type.
15526 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
15527 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
15529 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15530 // bits of the inputs before performing those operations. The lower
15531 // compare is always unsigned.
15534 SB = DAG.getConstant(0x80000000U, dl, MVT::v4i32);
15536 SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
15537 SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
15538 SB = DAG.getBuildVector(MVT::v4i32, dl, {Sign, Zero, Sign, Zero});
15540 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15541 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15543 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15544 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15545 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15547 // Create masks for only the low parts/high parts of the 64 bit integers.
15548 static const int MaskHi[] = { 1, 1, 3, 3 };
15549 static const int MaskLo[] = { 0, 0, 2, 2 };
15550 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15551 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15552 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15554 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15555 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15558 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15560 return DAG.getBitcast(VT, Result);
15563 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
15564 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15565 // pcmpeqd + pshufd + pand.
15566 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
15568 // First cast everything to the right type.
15569 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
15570 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
15573 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15575 // Make sure the lower and upper halves are both all-ones.
15576 static const int Mask[] = { 1, 0, 3, 2 };
15577 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15578 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15581 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15583 return DAG.getBitcast(VT, Result);
15587 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15588 // bits of the inputs before performing those operations.
15590 MVT EltVT = VT.getVectorElementType();
15591 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), dl,
15593 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15594 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15597 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15599 // If the logical-not of the result is required, perform that now.
15601 Result = DAG.getNOT(dl, Result, VT);
15604 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15607 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15608 getZeroVector(VT, Subtarget, DAG, dl));
15613 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15615 MVT VT = Op.getSimpleValueType();
15617 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15619 assert(((!Subtarget.hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15620 && "SetCC type must be 8-bit or 1-bit integer");
15621 SDValue Op0 = Op.getOperand(0);
15622 SDValue Op1 = Op.getOperand(1);
15624 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15626 // Optimize to BT if possible.
15627 // Lower (X & (1 << N)) == 0 to BT(X, N).
15628 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15629 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15630 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15631 isNullConstant(Op1) &&
15632 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15633 if (SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG)) {
15634 if (VT == MVT::i1) {
15635 NewSetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, NewSetCC,
15636 DAG.getValueType(MVT::i1));
15637 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15643 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15645 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
15646 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15648 // If the input is a setcc, then reuse the input setcc or use a new one with
15649 // the inverted condition.
15650 if (Op0.getOpcode() == X86ISD::SETCC) {
15651 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15652 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
15656 CCode = X86::GetOppositeBranchCondition(CCode);
15657 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15658 DAG.getConstant(CCode, dl, MVT::i8),
15659 Op0.getOperand(1));
15660 if (VT == MVT::i1) {
15661 SetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, SetCC,
15662 DAG.getValueType(MVT::i1));
15663 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15668 if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15669 if (isOneConstant(Op1)) {
15670 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15671 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, dl, MVT::i1), NewCC);
15673 if (!isNullConstant(Op1)) {
15674 SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i1, Op0, Op1);
15675 return DAG.getSetCC(dl, VT, Xor, DAG.getConstant(0, dl, MVT::i1), CC);
15679 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15680 unsigned X86CC = TranslateX86CC(CC, dl, isFP, Op0, Op1, DAG);
15681 if (X86CC == X86::COND_INVALID)
15684 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15685 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15686 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15687 DAG.getConstant(X86CC, dl, MVT::i8), EFLAGS);
15688 if (VT == MVT::i1) {
15689 SetCC = DAG.getNode(ISD::AssertZext, dl, MVT::i8, SetCC,
15690 DAG.getValueType(MVT::i1));
15691 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15696 SDValue X86TargetLowering::LowerSETCCE(SDValue Op, SelectionDAG &DAG) const {
15697 SDValue LHS = Op.getOperand(0);
15698 SDValue RHS = Op.getOperand(1);
15699 SDValue Carry = Op.getOperand(2);
15700 SDValue Cond = Op.getOperand(3);
15703 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
15704 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
15706 assert(Carry.getOpcode() != ISD::CARRY_FALSE);
15707 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15708 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry);
15709 SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
15710 DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
15711 if (Op.getSimpleValueType() == MVT::i1) {
15712 SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
15713 DAG.getValueType(MVT::i1));
15714 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
15719 /// Return true if opcode is a X86 logical comparison.
15720 static bool isX86LogicalCmp(SDValue Op) {
15721 unsigned Opc = Op.getNode()->getOpcode();
15722 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15723 Opc == X86ISD::SAHF)
15725 if (Op.getResNo() == 1 &&
15726 (Opc == X86ISD::ADD ||
15727 Opc == X86ISD::SUB ||
15728 Opc == X86ISD::ADC ||
15729 Opc == X86ISD::SBB ||
15730 Opc == X86ISD::SMUL ||
15731 Opc == X86ISD::UMUL ||
15732 Opc == X86ISD::INC ||
15733 Opc == X86ISD::DEC ||
15734 Opc == X86ISD::OR ||
15735 Opc == X86ISD::XOR ||
15736 Opc == X86ISD::AND))
15739 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15745 /// Returns the "condition" node, that may be wrapped with "truncate".
15746 /// Like this: (i1 (trunc (i8 X86ISD::SETCC))).
15747 static SDValue getCondAfterTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15748 if (V.getOpcode() != ISD::TRUNCATE)
15751 SDValue VOp0 = V.getOperand(0);
15752 if (VOp0.getOpcode() == ISD::AssertZext &&
15753 V.getValueSizeInBits() ==
15754 cast<VTSDNode>(VOp0.getOperand(1))->getVT().getSizeInBits())
15755 return VOp0.getOperand(0);
15757 unsigned InBits = VOp0.getValueSizeInBits();
15758 unsigned Bits = V.getValueSizeInBits();
15759 if (DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)))
15760 return V.getOperand(0);
15764 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15765 bool addTest = true;
15766 SDValue Cond = Op.getOperand(0);
15767 SDValue Op1 = Op.getOperand(1);
15768 SDValue Op2 = Op.getOperand(2);
15770 MVT VT = Op1.getSimpleValueType();
15773 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15774 // are available or VBLENDV if AVX is available.
15775 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
15776 if (Cond.getOpcode() == ISD::SETCC &&
15777 ((Subtarget.hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15778 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
15779 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
15780 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15781 int SSECC = translateX86FSETCC(
15782 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15785 if (Subtarget.hasAVX512()) {
15786 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15787 DAG.getConstant(SSECC, DL, MVT::i8));
15788 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15791 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15792 DAG.getConstant(SSECC, DL, MVT::i8));
15794 // If we have AVX, we can use a variable vector select (VBLENDV) instead
15795 // of 3 logic instructions for size savings and potentially speed.
15796 // Unfortunately, there is no scalar form of VBLENDV.
15798 // If either operand is a constant, don't try this. We can expect to
15799 // optimize away at least one of the logic instructions later in that
15800 // case, so that sequence would be faster than a variable blend.
15802 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
15803 // uses XMM0 as the selection register. That may need just as many
15804 // instructions as the AND/ANDN/OR sequence due to register moves, so
15807 if (Subtarget.hasAVX() &&
15808 !isa<ConstantFPSDNode>(Op1) && !isa<ConstantFPSDNode>(Op2)) {
15810 // Convert to vectors, do a VSELECT, and convert back to scalar.
15811 // All of the conversions should be optimized away.
15813 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
15814 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
15815 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
15816 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
15818 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
15819 VCmp = DAG.getBitcast(VCmpVT, VCmp);
15821 SDValue VSel = DAG.getNode(ISD::VSELECT, DL, VecVT, VCmp, VOp1, VOp2);
15823 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
15824 VSel, DAG.getIntPtrConstant(0, DL));
15826 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15827 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15828 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15832 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
15834 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
15835 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
15836 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
15837 Op1Scalar = Op1.getOperand(0);
15839 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
15840 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
15841 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
15842 Op2Scalar = Op2.getOperand(0);
15843 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
15844 SDValue newSelect = DAG.getNode(ISD::SELECT, DL,
15845 Op1Scalar.getValueType(),
15846 Cond, Op1Scalar, Op2Scalar);
15847 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
15848 return DAG.getBitcast(VT, newSelect);
15849 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
15850 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
15851 DAG.getIntPtrConstant(0, DL));
15855 if (VT == MVT::v4i1 || VT == MVT::v2i1) {
15856 SDValue zeroConst = DAG.getIntPtrConstant(0, DL);
15857 Op1 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
15858 DAG.getUNDEF(MVT::v8i1), Op1, zeroConst);
15859 Op2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
15860 DAG.getUNDEF(MVT::v8i1), Op2, zeroConst);
15861 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::v8i1,
15863 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, newSelect, zeroConst);
15866 if (Cond.getOpcode() == ISD::SETCC) {
15867 if (SDValue NewCond = LowerSETCC(Cond, DAG))
15871 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15872 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15873 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15874 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15875 if (Cond.getOpcode() == X86ISD::SETCC &&
15876 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15877 isNullConstant(Cond.getOperand(1).getOperand(1))) {
15878 SDValue Cmp = Cond.getOperand(1);
15880 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15882 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
15883 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15884 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
15886 SDValue CmpOp0 = Cmp.getOperand(0);
15887 // Apply further optimizations for special cases
15888 // (select (x != 0), -1, 0) -> neg & sbb
15889 // (select (x == 0), 0, -1) -> neg & sbb
15890 if (isNullConstant(Y) &&
15891 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
15892 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15893 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15894 DAG.getConstant(0, DL,
15895 CmpOp0.getValueType()),
15897 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15898 DAG.getConstant(X86::COND_B, DL, MVT::i8),
15899 SDValue(Neg.getNode(), 1));
15903 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15904 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
15905 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15907 SDValue Res = // Res = 0 or -1.
15908 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15909 DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp);
15911 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
15912 Res = DAG.getNOT(DL, Res, Res.getValueType());
15914 if (!isNullConstant(Op2))
15915 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
15920 // Look past (and (setcc_carry (cmp ...)), 1).
15921 if (Cond.getOpcode() == ISD::AND &&
15922 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
15923 isOneConstant(Cond.getOperand(1)))
15924 Cond = Cond.getOperand(0);
15926 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15927 // setting operand in place of the X86ISD::SETCC.
15928 unsigned CondOpcode = Cond.getOpcode();
15929 if (CondOpcode == X86ISD::SETCC ||
15930 CondOpcode == X86ISD::SETCC_CARRY) {
15931 CC = Cond.getOperand(0);
15933 SDValue Cmp = Cond.getOperand(1);
15934 unsigned Opc = Cmp.getOpcode();
15935 MVT VT = Op.getSimpleValueType();
15937 bool IllegalFPCMov = false;
15938 if (VT.isFloatingPoint() && !VT.isVector() &&
15939 !isScalarFPTypeInSSEReg(VT)) // FPStack?
15940 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
15942 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
15943 Opc == X86ISD::BT) { // FIXME
15947 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15948 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15949 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15950 Cond.getOperand(0).getValueType() != MVT::i8)) {
15951 SDValue LHS = Cond.getOperand(0);
15952 SDValue RHS = Cond.getOperand(1);
15953 unsigned X86Opcode;
15956 switch (CondOpcode) {
15957 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15958 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15959 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15960 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15961 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15962 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15963 default: llvm_unreachable("unexpected overflowing operator");
15965 if (CondOpcode == ISD::UMULO)
15966 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15969 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15971 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
15973 if (CondOpcode == ISD::UMULO)
15974 Cond = X86Op.getValue(2);
15976 Cond = X86Op.getValue(1);
15978 CC = DAG.getConstant(X86Cond, DL, MVT::i8);
15983 // Look past the truncate if the high bits are known zero.
15984 Cond = getCondAfterTruncWithZeroHighBitsInput(Cond, DAG);
15986 // We know the result of AND is compared against zero. Try to match
15988 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15989 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
15990 CC = NewSetCC.getOperand(0);
15991 Cond = NewSetCC.getOperand(1);
15998 CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
15999 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16002 // a < b ? -1 : 0 -> RES = ~setcc_carry
16003 // a < b ? 0 : -1 -> RES = setcc_carry
16004 // a >= b ? -1 : 0 -> RES = setcc_carry
16005 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16006 if (Cond.getOpcode() == X86ISD::SUB) {
16007 Cond = ConvertCmpIfNecessary(Cond, DAG);
16008 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16010 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16011 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
16012 (isNullConstant(Op1) || isNullConstant(Op2))) {
16013 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16014 DAG.getConstant(X86::COND_B, DL, MVT::i8),
16016 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
16017 return DAG.getNOT(DL, Res, Res.getValueType());
16022 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16023 // widen the cmov and push the truncate through. This avoids introducing a new
16024 // branch during isel and doesn't add any extensions.
16025 if (Op.getValueType() == MVT::i8 &&
16026 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16027 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16028 if (T1.getValueType() == T2.getValueType() &&
16029 // Blacklist CopyFromReg to avoid partial register stalls.
16030 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16031 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16032 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16033 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16037 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16038 // condition is true.
16039 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16040 SDValue Ops[] = { Op2, Op1, CC, Cond };
16041 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16044 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op,
16045 const X86Subtarget &Subtarget,
16046 SelectionDAG &DAG) {
16047 MVT VT = Op->getSimpleValueType(0);
16048 SDValue In = Op->getOperand(0);
16049 MVT InVT = In.getSimpleValueType();
16050 MVT VTElt = VT.getVectorElementType();
16051 MVT InVTElt = InVT.getVectorElementType();
16055 if ((InVTElt == MVT::i1) &&
16056 (((Subtarget.hasBWI() && Subtarget.hasVLX() &&
16057 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16059 ((Subtarget.hasBWI() && VT.is512BitVector() &&
16060 VTElt.getSizeInBits() <= 16)) ||
16062 ((Subtarget.hasDQI() && Subtarget.hasVLX() &&
16063 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16065 ((Subtarget.hasDQI() && VT.is512BitVector() &&
16066 VTElt.getSizeInBits() >= 32))))
16067 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16069 unsigned int NumElts = VT.getVectorNumElements();
16071 if (NumElts != 8 && NumElts != 16 && !Subtarget.hasBWI())
16074 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16075 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16076 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16077 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16080 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16081 MVT ExtVT = NumElts == 8 ? MVT::v8i64 : MVT::v16i32;
16083 DAG.getConstant(APInt::getAllOnesValue(ExtVT.getScalarSizeInBits()), dl,
16086 DAG.getConstant(APInt::getNullValue(ExtVT.getScalarSizeInBits()), dl, ExtVT);
16088 SDValue V = DAG.getNode(ISD::VSELECT, dl, ExtVT, In, NegOne, Zero);
16089 if (VT.is512BitVector())
16091 return DAG.getNode(X86ISD::VTRUNC, dl, VT, V);
16094 static SDValue LowerSIGN_EXTEND_VECTOR_INREG(SDValue Op,
16095 const X86Subtarget &Subtarget,
16096 SelectionDAG &DAG) {
16097 SDValue In = Op->getOperand(0);
16098 MVT VT = Op->getSimpleValueType(0);
16099 MVT InVT = In.getSimpleValueType();
16100 assert(VT.getSizeInBits() == InVT.getSizeInBits());
16102 MVT SVT = VT.getVectorElementType();
16103 MVT InSVT = InVT.getVectorElementType();
16104 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
16106 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
16108 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
16110 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
16111 !(VT.is256BitVector() && Subtarget.hasInt256()))
16116 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
16117 if (VT.is256BitVector())
16118 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
16119 MVT::getVectorVT(InSVT, InVT.getVectorNumElements() / 2),
16120 In, DAG.getIntPtrConstant(0, dl));
16122 // SSE41 targets can use the pmovsx* instructions directly.
16123 if (Subtarget.hasSSE41())
16124 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16126 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
16130 // As SRAI is only available on i16/i32 types, we expand only up to i32
16131 // and handle i64 separately.
16132 while (CurrVT != VT && CurrVT.getVectorElementType() != MVT::i32) {
16133 Curr = DAG.getNode(X86ISD::UNPCKL, dl, CurrVT, DAG.getUNDEF(CurrVT), Curr);
16134 MVT CurrSVT = MVT::getIntegerVT(CurrVT.getScalarSizeInBits() * 2);
16135 CurrVT = MVT::getVectorVT(CurrSVT, CurrVT.getVectorNumElements() / 2);
16136 Curr = DAG.getBitcast(CurrVT, Curr);
16139 SDValue SignExt = Curr;
16140 if (CurrVT != InVT) {
16141 unsigned SignExtShift =
16142 CurrVT.getVectorElementType().getSizeInBits() - InSVT.getSizeInBits();
16143 SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
16144 DAG.getConstant(SignExtShift, dl, MVT::i8));
16150 if (VT == MVT::v2i64 && CurrVT == MVT::v4i32) {
16151 SDValue Sign = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
16152 DAG.getConstant(31, dl, MVT::i8));
16153 SDValue Ext = DAG.getVectorShuffle(CurrVT, dl, SignExt, Sign, {0, 4, 1, 5});
16154 return DAG.getBitcast(VT, Ext);
16160 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
16161 SelectionDAG &DAG) {
16162 MVT VT = Op->getSimpleValueType(0);
16163 SDValue In = Op->getOperand(0);
16164 MVT InVT = In.getSimpleValueType();
16167 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16168 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16170 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16171 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16172 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16175 if (Subtarget.hasInt256())
16176 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16178 // Optimize vectors in AVX mode
16179 // Sign extend v8i16 to v8i32 and
16182 // Divide input vector into two parts
16183 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16184 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16185 // concat the vectors to original VT
16187 unsigned NumElems = InVT.getVectorNumElements();
16188 SDValue Undef = DAG.getUNDEF(InVT);
16190 SmallVector<int,8> ShufMask1(NumElems, -1);
16191 for (unsigned i = 0; i != NumElems/2; ++i)
16194 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask1);
16196 SmallVector<int,8> ShufMask2(NumElems, -1);
16197 for (unsigned i = 0; i != NumElems/2; ++i)
16198 ShufMask2[i] = i + NumElems/2;
16200 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask2);
16202 MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
16203 VT.getVectorNumElements()/2);
16205 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16206 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16208 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16211 // Lower truncating store. We need a special lowering to vXi1 vectors
16212 static SDValue LowerTruncatingStore(SDValue StOp, const X86Subtarget &Subtarget,
16213 SelectionDAG &DAG) {
16214 StoreSDNode *St = cast<StoreSDNode>(StOp.getNode());
16216 EVT MemVT = St->getMemoryVT();
16217 assert(St->isTruncatingStore() && "We only custom truncating store.");
16218 assert(MemVT.isVector() && MemVT.getVectorElementType() == MVT::i1 &&
16219 "Expected truncstore of i1 vector");
16221 SDValue Op = St->getValue();
16222 MVT OpVT = Op.getValueType().getSimpleVT();
16223 unsigned NumElts = OpVT.getVectorNumElements();
16224 if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
16226 // Truncate and store - everything is legal
16227 Op = DAG.getNode(ISD::TRUNCATE, dl, MemVT, Op);
16228 if (MemVT.getSizeInBits() < 8)
16229 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
16230 DAG.getUNDEF(MVT::v8i1), Op,
16231 DAG.getIntPtrConstant(0, dl));
16232 return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
16233 St->getMemOperand());
16236 // A subset, assume that we have only AVX-512F
16237 if (NumElts <= 8) {
16239 // Extend to 8-elts vector
16240 MVT ExtVT = MVT::getVectorVT(OpVT.getScalarType(), 8);
16241 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ExtVT,
16242 DAG.getUNDEF(ExtVT), Op, DAG.getIntPtrConstant(0, dl));
16244 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i1, Op);
16245 return DAG.getStore(St->getChain(), dl, Op, St->getBasePtr(),
16246 St->getMemOperand());
16249 assert(OpVT == MVT::v32i8 && "Unexpected operand type");
16250 // Divide the vector into 2 parts and store each part separately
16251 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
16252 DAG.getIntPtrConstant(0, dl));
16253 Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Lo);
16254 SDValue BasePtr = St->getBasePtr();
16255 SDValue StLo = DAG.getStore(St->getChain(), dl, Lo, BasePtr,
16256 St->getMemOperand());
16257 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, Op,
16258 DAG.getIntPtrConstant(16, dl));
16259 Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::v16i1, Hi);
16261 SDValue BasePtrHi =
16262 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
16263 DAG.getConstant(2, dl, BasePtr.getValueType()));
16265 SDValue StHi = DAG.getStore(St->getChain(), dl, Hi,
16266 BasePtrHi, St->getMemOperand());
16267 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StLo, StHi);
16270 static SDValue LowerExtended1BitVectorLoad(SDValue Op,
16271 const X86Subtarget &Subtarget,
16272 SelectionDAG &DAG) {
16274 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16276 EVT MemVT = Ld->getMemoryVT();
16277 assert(MemVT.isVector() && MemVT.getScalarType() == MVT::i1 &&
16278 "Expected i1 vector load");
16279 unsigned ExtOpcode = Ld->getExtensionType() == ISD::ZEXTLOAD ?
16280 ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
16281 MVT VT = Op.getValueType().getSimpleVT();
16282 unsigned NumElts = VT.getVectorNumElements();
16284 if ((Subtarget.hasVLX() && Subtarget.hasBWI() && Subtarget.hasDQI()) ||
16286 // Load and extend - everything is legal
16288 SDValue Load = DAG.getLoad(MVT::v8i1, dl, Ld->getChain(),
16290 Ld->getMemOperand());
16291 // Replace chain users with the new chain.
16292 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16293 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16294 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
16295 SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, Load);
16297 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
16298 DAG.getIntPtrConstant(0, dl));
16300 SDValue Load = DAG.getLoad(MemVT, dl, Ld->getChain(),
16302 Ld->getMemOperand());
16303 // Replace chain users with the new chain.
16304 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16305 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16307 // Finally, do a normal sign-extend to the desired register.
16308 return DAG.getNode(ExtOpcode, dl, Op.getValueType(), Load);
16311 if (NumElts <= 8) {
16312 // A subset, assume that we have only AVX-512F
16313 unsigned NumBitsToLoad = NumElts < 8 ? 8 : NumElts;
16314 MVT TypeToLoad = MVT::getIntegerVT(NumBitsToLoad);
16315 SDValue Load = DAG.getLoad(TypeToLoad, dl, Ld->getChain(),
16317 Ld->getMemOperand());
16318 // Replace chain users with the new chain.
16319 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16320 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16322 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumBitsToLoad);
16323 SDValue BitVec = DAG.getBitcast(MaskVT, Load);
16326 return DAG.getNode(ExtOpcode, dl, VT, BitVec);
16328 // we should take care to v4i1 and v2i1
16330 MVT ExtVT = MVT::getVectorVT(VT.getScalarType(), 8);
16331 SDValue ExtVec = DAG.getNode(ExtOpcode, dl, ExtVT, BitVec);
16332 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
16333 DAG.getIntPtrConstant(0, dl));
16336 assert(VT == MVT::v32i8 && "Unexpected extload type");
16338 SmallVector<SDValue, 2> Chains;
16340 SDValue BasePtr = Ld->getBasePtr();
16341 SDValue LoadLo = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
16343 Ld->getMemOperand());
16344 Chains.push_back(LoadLo.getValue(1));
16346 SDValue BasePtrHi =
16347 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
16348 DAG.getConstant(2, dl, BasePtr.getValueType()));
16350 SDValue LoadHi = DAG.getLoad(MVT::v16i1, dl, Ld->getChain(),
16352 Ld->getMemOperand());
16353 Chains.push_back(LoadHi.getValue(1));
16354 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16355 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
16357 SDValue Lo = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadLo);
16358 SDValue Hi = DAG.getNode(ExtOpcode, dl, MVT::v16i8, LoadHi);
16359 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v32i8, Lo, Hi);
16362 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16363 // may emit an illegal shuffle but the expansion is still better than scalar
16364 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16365 // we'll emit a shuffle and a arithmetic shift.
16366 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16367 // TODO: It is possible to support ZExt by zeroing the undef values during
16368 // the shuffle phase or after the shuffle.
16369 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget &Subtarget,
16370 SelectionDAG &DAG) {
16371 MVT RegVT = Op.getSimpleValueType();
16372 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16373 assert(RegVT.isInteger() &&
16374 "We only custom lower integer vector sext loads.");
16376 // Nothing useful we can do without SSE2 shuffles.
16377 assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
16379 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16381 EVT MemVT = Ld->getMemoryVT();
16382 if (MemVT.getScalarType() == MVT::i1)
16383 return LowerExtended1BitVectorLoad(Op, Subtarget, DAG);
16385 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16386 unsigned RegSz = RegVT.getSizeInBits();
16388 ISD::LoadExtType Ext = Ld->getExtensionType();
16390 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16391 && "Only anyext and sext are currently implemented.");
16392 assert(MemVT != RegVT && "Cannot extend to the same type");
16393 assert(MemVT.isVector() && "Must load a vector from memory");
16395 unsigned NumElems = RegVT.getVectorNumElements();
16396 unsigned MemSz = MemVT.getSizeInBits();
16397 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16399 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
16400 // The only way in which we have a legal 256-bit vector result but not the
16401 // integer 256-bit operations needed to directly lower a sextload is if we
16402 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16403 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16404 // correctly legalized. We do this late to allow the canonical form of
16405 // sextload to persist throughout the rest of the DAG combiner -- it wants
16406 // to fold together any extensions it can, and so will fuse a sign_extend
16407 // of an sextload into a sextload targeting a wider value.
16409 if (MemSz == 128) {
16410 // Just switch this to a normal load.
16411 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16412 "it must be a legal 128-bit vector "
16414 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16415 Ld->getPointerInfo(), Ld->getAlignment(),
16416 Ld->getMemOperand()->getFlags());
16418 assert(MemSz < 128 &&
16419 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16420 // Do an sext load to a 128-bit vector type. We want to use the same
16421 // number of elements, but elements half as wide. This will end up being
16422 // recursively lowered by this routine, but will succeed as we definitely
16423 // have all the necessary features if we're using AVX1.
16425 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16426 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16428 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16429 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
16430 Ld->getMemOperand()->getFlags());
16433 // Replace chain users with the new chain.
16434 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16435 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16437 // Finally, do a normal sign-extend to the desired register.
16438 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16441 // All sizes must be a power of two.
16442 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16443 "Non-power-of-two elements are not custom lowered!");
16445 // Attempt to load the original value using scalar loads.
16446 // Find the largest scalar type that divides the total loaded size.
16447 MVT SclrLoadTy = MVT::i8;
16448 for (MVT Tp : MVT::integer_valuetypes()) {
16449 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16454 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16455 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16457 SclrLoadTy = MVT::f64;
16459 // Calculate the number of scalar loads that we need to perform
16460 // in order to load our vector from memory.
16461 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16463 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16464 "Can only lower sext loads with a single scalar load!");
16466 unsigned loadRegZize = RegSz;
16467 if (Ext == ISD::SEXTLOAD && RegSz >= 256)
16470 // Represent our vector as a sequence of elements which are the
16471 // largest scalar that we can load.
16472 EVT LoadUnitVecVT = EVT::getVectorVT(
16473 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16475 // Represent the data using the same element type that is stored in
16476 // memory. In practice, we ''widen'' MemVT.
16478 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16479 loadRegZize / MemVT.getScalarSizeInBits());
16481 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16482 "Invalid vector type");
16484 // We can't shuffle using an illegal type.
16485 assert(TLI.isTypeLegal(WideVecVT) &&
16486 "We only lower types that form legal widened vector types");
16488 SmallVector<SDValue, 8> Chains;
16489 SDValue Ptr = Ld->getBasePtr();
16490 SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, dl,
16491 TLI.getPointerTy(DAG.getDataLayout()));
16492 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16494 for (unsigned i = 0; i < NumLoads; ++i) {
16495 // Perform a single load.
16496 SDValue ScalarLoad =
16497 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16498 Ld->getAlignment(), Ld->getMemOperand()->getFlags());
16499 Chains.push_back(ScalarLoad.getValue(1));
16500 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16501 // another round of DAGCombining.
16503 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16505 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16506 ScalarLoad, DAG.getIntPtrConstant(i, dl));
16508 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16511 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16513 // Bitcast the loaded value to a vector of the original element type, in
16514 // the size of the target vector type.
16515 SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
16516 unsigned SizeRatio = RegSz / MemSz;
16518 if (Ext == ISD::SEXTLOAD) {
16519 // If we have SSE4.1, we can directly emit a VSEXT node.
16520 if (Subtarget.hasSSE41()) {
16521 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16522 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16526 // Otherwise we'll use SIGN_EXTEND_VECTOR_INREG to sign extend the lowest
16528 assert(TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND_VECTOR_INREG, RegVT) &&
16529 "We can't implement a sext load without SIGN_EXTEND_VECTOR_INREG!");
16531 SDValue Shuff = DAG.getSignExtendVectorInReg(SlicedVec, dl, RegVT);
16532 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16536 // Redistribute the loaded elements into the different locations.
16537 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16538 for (unsigned i = 0; i != NumElems; ++i)
16539 ShuffleVec[i * SizeRatio] = i;
16541 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16542 DAG.getUNDEF(WideVecVT), ShuffleVec);
16544 // Bitcast to the requested type.
16545 Shuff = DAG.getBitcast(RegVT, Shuff);
16546 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16550 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
16551 /// each of which has no other use apart from the AND / OR.
16552 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16553 Opc = Op.getOpcode();
16554 if (Opc != ISD::OR && Opc != ISD::AND)
16556 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16557 Op.getOperand(0).hasOneUse() &&
16558 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16559 Op.getOperand(1).hasOneUse());
16562 /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
16563 /// SETCC node has a single use.
16564 static bool isXor1OfSetCC(SDValue Op) {
16565 if (Op.getOpcode() != ISD::XOR)
16567 if (isOneConstant(Op.getOperand(1)))
16568 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16569 Op.getOperand(0).hasOneUse();
16573 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16574 bool addTest = true;
16575 SDValue Chain = Op.getOperand(0);
16576 SDValue Cond = Op.getOperand(1);
16577 SDValue Dest = Op.getOperand(2);
16580 bool Inverted = false;
16582 if (Cond.getOpcode() == ISD::SETCC) {
16583 // Check for setcc([su]{add,sub,mul}o == 0).
16584 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16585 isNullConstant(Cond.getOperand(1)) &&
16586 Cond.getOperand(0).getResNo() == 1 &&
16587 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16588 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16589 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16590 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16591 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16592 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16594 Cond = Cond.getOperand(0);
16596 if (SDValue NewCond = LowerSETCC(Cond, DAG))
16601 // FIXME: LowerXALUO doesn't handle these!!
16602 else if (Cond.getOpcode() == X86ISD::ADD ||
16603 Cond.getOpcode() == X86ISD::SUB ||
16604 Cond.getOpcode() == X86ISD::SMUL ||
16605 Cond.getOpcode() == X86ISD::UMUL)
16606 Cond = LowerXALUO(Cond, DAG);
16609 // Look pass (and (setcc_carry (cmp ...)), 1).
16610 if (Cond.getOpcode() == ISD::AND &&
16611 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
16612 isOneConstant(Cond.getOperand(1)))
16613 Cond = Cond.getOperand(0);
16615 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16616 // setting operand in place of the X86ISD::SETCC.
16617 unsigned CondOpcode = Cond.getOpcode();
16618 if (CondOpcode == X86ISD::SETCC ||
16619 CondOpcode == X86ISD::SETCC_CARRY) {
16620 CC = Cond.getOperand(0);
16622 SDValue Cmp = Cond.getOperand(1);
16623 unsigned Opc = Cmp.getOpcode();
16624 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16625 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16629 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16633 // These can only come from an arithmetic instruction with overflow,
16634 // e.g. SADDO, UADDO.
16635 Cond = Cond.getNode()->getOperand(1);
16641 CondOpcode = Cond.getOpcode();
16642 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16643 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16644 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16645 Cond.getOperand(0).getValueType() != MVT::i8)) {
16646 SDValue LHS = Cond.getOperand(0);
16647 SDValue RHS = Cond.getOperand(1);
16648 unsigned X86Opcode;
16651 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16652 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16654 switch (CondOpcode) {
16655 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16657 if (isOneConstant(RHS)) {
16658 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16661 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16662 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16664 if (isOneConstant(RHS)) {
16665 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16668 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16669 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16670 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16671 default: llvm_unreachable("unexpected overflowing operator");
16674 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16675 if (CondOpcode == ISD::UMULO)
16676 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16679 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16681 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16683 if (CondOpcode == ISD::UMULO)
16684 Cond = X86Op.getValue(2);
16686 Cond = X86Op.getValue(1);
16688 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
16692 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16693 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16694 if (CondOpc == ISD::OR) {
16695 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16696 // two branches instead of an explicit OR instruction with a
16698 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16699 isX86LogicalCmp(Cmp)) {
16700 CC = Cond.getOperand(0).getOperand(0);
16701 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16702 Chain, Dest, CC, Cmp);
16703 CC = Cond.getOperand(1).getOperand(0);
16707 } else { // ISD::AND
16708 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16709 // two branches instead of an explicit AND instruction with a
16710 // separate test. However, we only do this if this block doesn't
16711 // have a fall-through edge, because this requires an explicit
16712 // jmp when the condition is false.
16713 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16714 isX86LogicalCmp(Cmp) &&
16715 Op.getNode()->hasOneUse()) {
16716 X86::CondCode CCode =
16717 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16718 CCode = X86::GetOppositeBranchCondition(CCode);
16719 CC = DAG.getConstant(CCode, dl, MVT::i8);
16720 SDNode *User = *Op.getNode()->use_begin();
16721 // Look for an unconditional branch following this conditional branch.
16722 // We need this because we need to reverse the successors in order
16723 // to implement FCMP_OEQ.
16724 if (User->getOpcode() == ISD::BR) {
16725 SDValue FalseBB = User->getOperand(1);
16727 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16728 assert(NewBR == User);
16732 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16733 Chain, Dest, CC, Cmp);
16734 X86::CondCode CCode =
16735 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16736 CCode = X86::GetOppositeBranchCondition(CCode);
16737 CC = DAG.getConstant(CCode, dl, MVT::i8);
16743 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16744 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16745 // It should be transformed during dag combiner except when the condition
16746 // is set by a arithmetics with overflow node.
16747 X86::CondCode CCode =
16748 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16749 CCode = X86::GetOppositeBranchCondition(CCode);
16750 CC = DAG.getConstant(CCode, dl, MVT::i8);
16751 Cond = Cond.getOperand(0).getOperand(1);
16753 } else if (Cond.getOpcode() == ISD::SETCC &&
16754 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16755 // For FCMP_OEQ, we can emit
16756 // two branches instead of an explicit AND instruction with a
16757 // separate test. However, we only do this if this block doesn't
16758 // have a fall-through edge, because this requires an explicit
16759 // jmp when the condition is false.
16760 if (Op.getNode()->hasOneUse()) {
16761 SDNode *User = *Op.getNode()->use_begin();
16762 // Look for an unconditional branch following this conditional branch.
16763 // We need this because we need to reverse the successors in order
16764 // to implement FCMP_OEQ.
16765 if (User->getOpcode() == ISD::BR) {
16766 SDValue FalseBB = User->getOperand(1);
16768 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16769 assert(NewBR == User);
16773 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16774 Cond.getOperand(0), Cond.getOperand(1));
16775 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16776 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
16777 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16778 Chain, Dest, CC, Cmp);
16779 CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
16784 } else if (Cond.getOpcode() == ISD::SETCC &&
16785 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16786 // For FCMP_UNE, we can emit
16787 // two branches instead of an explicit AND instruction with a
16788 // separate test. However, we only do this if this block doesn't
16789 // have a fall-through edge, because this requires an explicit
16790 // jmp when the condition is false.
16791 if (Op.getNode()->hasOneUse()) {
16792 SDNode *User = *Op.getNode()->use_begin();
16793 // Look for an unconditional branch following this conditional branch.
16794 // We need this because we need to reverse the successors in order
16795 // to implement FCMP_UNE.
16796 if (User->getOpcode() == ISD::BR) {
16797 SDValue FalseBB = User->getOperand(1);
16799 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16800 assert(NewBR == User);
16803 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16804 Cond.getOperand(0), Cond.getOperand(1));
16805 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16806 CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
16807 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16808 Chain, Dest, CC, Cmp);
16809 CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8);
16819 // Look pass the truncate if the high bits are known zero.
16820 Cond = getCondAfterTruncWithZeroHighBitsInput(Cond, DAG);
16822 // We know the result of AND is compared against zero. Try to match
16824 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16825 if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG)) {
16826 CC = NewSetCC.getOperand(0);
16827 Cond = NewSetCC.getOperand(1);
16834 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16835 CC = DAG.getConstant(X86Cond, dl, MVT::i8);
16836 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16838 Cond = ConvertCmpIfNecessary(Cond, DAG);
16839 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16840 Chain, Dest, CC, Cond);
16843 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16844 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16845 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16846 // that the guard pages used by the OS virtual memory manager are allocated in
16847 // correct sequence.
16849 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16850 SelectionDAG &DAG) const {
16851 MachineFunction &MF = DAG.getMachineFunction();
16852 bool SplitStack = MF.shouldSplitStack();
16853 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
16858 SDNode *Node = Op.getNode();
16859 SDValue Chain = Op.getOperand(0);
16860 SDValue Size = Op.getOperand(1);
16861 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16862 EVT VT = Node->getValueType(0);
16864 // Chain the dynamic stack allocation so that it doesn't modify the stack
16865 // pointer when other instructions are using the stack.
16866 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, dl, true), dl);
16868 bool Is64Bit = Subtarget.is64Bit();
16869 MVT SPTy = getPointerTy(DAG.getDataLayout());
16873 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16874 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16875 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16876 " not tell us which reg is the stack pointer!");
16878 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16879 Chain = SP.getValue(1);
16880 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
16881 unsigned StackAlign = TFI.getStackAlignment();
16882 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16883 if (Align > StackAlign)
16884 Result = DAG.getNode(ISD::AND, dl, VT, Result,
16885 DAG.getConstant(-(uint64_t)Align, dl, VT));
16886 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
16887 } else if (SplitStack) {
16888 MachineRegisterInfo &MRI = MF.getRegInfo();
16891 // The 64 bit implementation of segmented stacks needs to clobber both r10
16892 // r11. This makes it impossible to use it along with nested parameters.
16893 const Function *F = MF.getFunction();
16894 for (const auto &A : F->args()) {
16895 if (A.hasNestAttr())
16896 report_fatal_error("Cannot use segmented stacks with functions that "
16897 "have nested arguments.");
16901 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
16902 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16903 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16904 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16905 DAG.getRegister(Vreg, SPTy));
16907 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16908 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
16909 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
16911 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
16912 unsigned SPReg = RegInfo->getStackRegister();
16913 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16914 Chain = SP.getValue(1);
16917 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16918 DAG.getConstant(-(uint64_t)Align, dl, VT));
16919 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16925 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
16926 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
16928 SDValue Ops[2] = {Result, Chain};
16929 return DAG.getMergeValues(Ops, dl);
16932 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16933 MachineFunction &MF = DAG.getMachineFunction();
16934 auto PtrVT = getPointerTy(MF.getDataLayout());
16935 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16937 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16940 if (!Subtarget.is64Bit() ||
16941 Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
16942 // vastart just stores the address of the VarArgsFrameIndex slot into the
16943 // memory location argument.
16944 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
16945 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16946 MachinePointerInfo(SV));
16950 // gp_offset (0 - 6 * 8)
16951 // fp_offset (48 - 48 + 8 * 16)
16952 // overflow_arg_area (point to parameters coming in memory).
16954 SmallVector<SDValue, 8> MemOps;
16955 SDValue FIN = Op.getOperand(1);
16957 SDValue Store = DAG.getStore(
16958 Op.getOperand(0), DL,
16959 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
16960 MachinePointerInfo(SV));
16961 MemOps.push_back(Store);
16964 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
16965 Store = DAG.getStore(
16966 Op.getOperand(0), DL,
16967 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
16968 MachinePointerInfo(SV, 4));
16969 MemOps.push_back(Store);
16971 // Store ptr to overflow_arg_area
16972 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
16973 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
16975 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
16976 MemOps.push_back(Store);
16978 // Store ptr to reg_save_area.
16979 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
16980 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
16981 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
16982 Store = DAG.getStore(
16983 Op.getOperand(0), DL, RSFIN, FIN,
16984 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
16985 MemOps.push_back(Store);
16986 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16989 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16990 assert(Subtarget.is64Bit() &&
16991 "LowerVAARG only handles 64-bit va_arg!");
16992 assert(Op.getNode()->getNumOperands() == 4);
16994 MachineFunction &MF = DAG.getMachineFunction();
16995 if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
16996 // The Win64 ABI uses char* instead of a structure.
16997 return DAG.expandVAArg(Op.getNode());
16999 SDValue Chain = Op.getOperand(0);
17000 SDValue SrcPtr = Op.getOperand(1);
17001 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17002 unsigned Align = Op.getConstantOperandVal(3);
17005 EVT ArgVT = Op.getNode()->getValueType(0);
17006 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17007 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
17010 // Decide which area this value should be read from.
17011 // TODO: Implement the AMD64 ABI in its entirety. This simple
17012 // selection mechanism works only for the basic types.
17013 if (ArgVT == MVT::f80) {
17014 llvm_unreachable("va_arg for f80 not yet implemented");
17015 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17016 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17017 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17018 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17020 llvm_unreachable("Unhandled argument type in LowerVAARG");
17023 if (ArgMode == 2) {
17024 // Sanity Check: Make sure using fp_offset makes sense.
17025 assert(!Subtarget.useSoftFloat() &&
17026 !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
17027 Subtarget.hasSSE1());
17030 // Insert VAARG_64 node into the DAG
17031 // VAARG_64 returns two values: Variable Argument Address, Chain
17032 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
17033 DAG.getConstant(ArgMode, dl, MVT::i8),
17034 DAG.getConstant(Align, dl, MVT::i32)};
17035 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
17036 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17037 VTs, InstOps, MVT::i64,
17038 MachinePointerInfo(SV),
17040 /*Volatile=*/false,
17042 /*WriteMem=*/true);
17043 Chain = VAARG.getValue(1);
17045 // Load the next argument and return it
17046 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
17049 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
17050 SelectionDAG &DAG) {
17051 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
17052 // where a va_list is still an i8*.
17053 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
17054 if (Subtarget.isCallingConvWin64(
17055 DAG.getMachineFunction().getFunction()->getCallingConv()))
17056 // Probably a Win64 va_copy.
17057 return DAG.expandVACopy(Op.getNode());
17059 SDValue Chain = Op.getOperand(0);
17060 SDValue DstPtr = Op.getOperand(1);
17061 SDValue SrcPtr = Op.getOperand(2);
17062 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17063 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17066 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17067 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
17069 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17072 /// Handle vector element shifts where the shift amount is a constant.
17073 /// Takes immediate version of shift as input.
17074 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
17075 SDValue SrcOp, uint64_t ShiftAmt,
17076 SelectionDAG &DAG) {
17077 MVT ElementType = VT.getVectorElementType();
17079 // Fold this packed shift into its first operand if ShiftAmt is 0.
17083 // Check for ShiftAmt >= element width
17084 if (ShiftAmt >= ElementType.getSizeInBits()) {
17085 if (Opc == X86ISD::VSRAI)
17086 ShiftAmt = ElementType.getSizeInBits() - 1;
17088 return DAG.getConstant(0, dl, VT);
17091 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17092 && "Unknown target vector shift-by-constant node");
17094 // Fold this packed vector shift into a build vector if SrcOp is a
17095 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17096 if (VT == SrcOp.getSimpleValueType() &&
17097 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17098 SmallVector<SDValue, 8> Elts;
17099 unsigned NumElts = SrcOp->getNumOperands();
17100 ConstantSDNode *ND;
17103 default: llvm_unreachable("Unknown opcode!");
17104 case X86ISD::VSHLI:
17105 for (unsigned i=0; i!=NumElts; ++i) {
17106 SDValue CurrentOp = SrcOp->getOperand(i);
17107 if (CurrentOp->isUndef()) {
17108 Elts.push_back(CurrentOp);
17111 ND = cast<ConstantSDNode>(CurrentOp);
17112 const APInt &C = ND->getAPIntValue();
17113 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
17116 case X86ISD::VSRLI:
17117 for (unsigned i=0; i!=NumElts; ++i) {
17118 SDValue CurrentOp = SrcOp->getOperand(i);
17119 if (CurrentOp->isUndef()) {
17120 Elts.push_back(CurrentOp);
17123 ND = cast<ConstantSDNode>(CurrentOp);
17124 const APInt &C = ND->getAPIntValue();
17125 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
17128 case X86ISD::VSRAI:
17129 for (unsigned i=0; i!=NumElts; ++i) {
17130 SDValue CurrentOp = SrcOp->getOperand(i);
17131 if (CurrentOp->isUndef()) {
17132 Elts.push_back(CurrentOp);
17135 ND = cast<ConstantSDNode>(CurrentOp);
17136 const APInt &C = ND->getAPIntValue();
17137 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
17142 return DAG.getBuildVector(VT, dl, Elts);
17145 return DAG.getNode(Opc, dl, VT, SrcOp,
17146 DAG.getConstant(ShiftAmt, dl, MVT::i8));
17149 /// Handle vector element shifts where the shift amount may or may not be a
17150 /// constant. Takes immediate version of shift as input.
17151 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
17152 SDValue SrcOp, SDValue ShAmt,
17153 SelectionDAG &DAG) {
17154 MVT SVT = ShAmt.getSimpleValueType();
17155 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17157 // Catch shift-by-constant.
17158 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17159 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17160 CShAmt->getZExtValue(), DAG);
17162 // Change opcode to non-immediate version
17164 default: llvm_unreachable("Unknown target vector shift node");
17165 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17166 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17167 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17170 const X86Subtarget &Subtarget =
17171 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17172 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17173 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17174 // Let the shuffle legalizer expand this shift amount node.
17175 SDValue Op0 = ShAmt.getOperand(0);
17176 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17177 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, Subtarget, DAG);
17179 // Need to build a vector containing shift amount.
17180 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17181 SmallVector<SDValue, 4> ShOps;
17182 ShOps.push_back(ShAmt);
17183 if (SVT == MVT::i32) {
17184 ShOps.push_back(DAG.getConstant(0, dl, SVT));
17185 ShOps.push_back(DAG.getUNDEF(SVT));
17187 ShOps.push_back(DAG.getUNDEF(SVT));
17189 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17190 ShAmt = DAG.getBuildVector(BVT, dl, ShOps);
17193 // The return type has to be a 128-bit type with the same element
17194 // type as the input type.
17195 MVT EltVT = VT.getVectorElementType();
17196 MVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17198 ShAmt = DAG.getBitcast(ShVT, ShAmt);
17199 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17202 /// \brief Return Mask with the necessary casting or extending
17203 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
17204 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
17205 const X86Subtarget &Subtarget, SelectionDAG &DAG,
17208 if (isAllOnesConstant(Mask))
17209 return DAG.getTargetConstant(1, dl, MaskVT);
17210 if (X86::isZeroNode(Mask))
17211 return DAG.getTargetConstant(0, dl, MaskVT);
17213 if (MaskVT.bitsGT(Mask.getSimpleValueType())) {
17214 // Mask should be extended
17215 Mask = DAG.getNode(ISD::ANY_EXTEND, dl,
17216 MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
17219 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
17220 if (MaskVT == MVT::v64i1) {
17221 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
17222 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
17224 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
17225 DAG.getConstant(0, dl, MVT::i32));
17226 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
17227 DAG.getConstant(1, dl, MVT::i32));
17229 Lo = DAG.getBitcast(MVT::v32i1, Lo);
17230 Hi = DAG.getBitcast(MVT::v32i1, Hi);
17232 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
17234 // MaskVT require < 64bit. Truncate mask (should succeed in any case),
17236 MVT TruncVT = MVT::getIntegerVT(MaskVT.getSizeInBits());
17237 return DAG.getBitcast(MaskVT,
17238 DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Mask));
17242 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
17243 Mask.getSimpleValueType().getSizeInBits());
17244 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17245 // are extracted by EXTRACT_SUBVECTOR.
17246 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17247 DAG.getBitcast(BitcastVT, Mask),
17248 DAG.getIntPtrConstant(0, dl));
17252 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17253 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17254 /// necessary casting or extending for \p Mask when lowering masking intrinsics
17255 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17256 SDValue PreservedSrc,
17257 const X86Subtarget &Subtarget,
17258 SelectionDAG &DAG) {
17259 MVT VT = Op.getSimpleValueType();
17260 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
17261 unsigned OpcodeSelect = ISD::VSELECT;
17264 if (isAllOnesConstant(Mask))
17267 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
17269 switch (Op.getOpcode()) {
17271 case X86ISD::PCMPEQM:
17272 case X86ISD::PCMPGTM:
17274 case X86ISD::CMPMU:
17275 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17276 case X86ISD::VFPCLASS:
17277 case X86ISD::VFPCLASSS:
17278 return DAG.getNode(ISD::OR, dl, VT, Op, VMask);
17279 case X86ISD::VTRUNC:
17280 case X86ISD::VTRUNCS:
17281 case X86ISD::VTRUNCUS:
17282 case ISD::FP_TO_FP16:
17283 // We can't use ISD::VSELECT here because it is not always "Legal"
17284 // for the destination type. For example vpmovqb require only AVX512
17285 // and vselect that can operate on byte element type require BWI
17286 OpcodeSelect = X86ISD::SELECT;
17289 if (PreservedSrc.isUndef())
17290 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17291 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
17294 /// \brief Creates an SDNode for a predicated scalar operation.
17295 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17296 /// The mask is coming as MVT::i8 and it should be truncated
17297 /// to MVT::i1 while lowering masking intrinsics.
17298 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17299 /// "X86select" instead of "vselect". We just can't create the "vselect" node
17300 /// for a scalar instruction.
17301 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17302 SDValue PreservedSrc,
17303 const X86Subtarget &Subtarget,
17304 SelectionDAG &DAG) {
17305 if (isAllOnesConstant(Mask))
17308 MVT VT = Op.getSimpleValueType();
17310 // The mask should be of type MVT::i1
17311 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17313 if (Op.getOpcode() == X86ISD::FSETCC)
17314 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
17315 if (Op.getOpcode() == X86ISD::VFPCLASS ||
17316 Op.getOpcode() == X86ISD::VFPCLASSS)
17317 return DAG.getNode(ISD::OR, dl, VT, Op, IMask);
17319 if (PreservedSrc.isUndef())
17320 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17321 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17324 static int getSEHRegistrationNodeSize(const Function *Fn) {
17325 if (!Fn->hasPersonalityFn())
17326 report_fatal_error(
17327 "querying registration node size for function without personality");
17328 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
17329 // WinEHStatePass for the full struct definition.
17330 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
17331 case EHPersonality::MSVC_X86SEH: return 24;
17332 case EHPersonality::MSVC_CXX: return 16;
17335 report_fatal_error(
17336 "can only recover FP for 32-bit MSVC EH personality functions");
17339 /// When the MSVC runtime transfers control to us, either to an outlined
17340 /// function or when returning to a parent frame after catching an exception, we
17341 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
17342 /// Here's the math:
17343 /// RegNodeBase = EntryEBP - RegNodeSize
17344 /// ParentFP = RegNodeBase - ParentFrameOffset
17345 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
17346 /// subtracting the offset (negative on x86) takes us back to the parent FP.
17347 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
17348 SDValue EntryEBP) {
17349 MachineFunction &MF = DAG.getMachineFunction();
17352 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17353 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
17355 // It's possible that the parent function no longer has a personality function
17356 // if the exceptional code was optimized away, in which case we just return
17357 // the incoming EBP.
17358 if (!Fn->hasPersonalityFn())
17361 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
17362 // registration, or the .set_setframe offset.
17363 MCSymbol *OffsetSym =
17364 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
17365 GlobalValue::getRealLinkageName(Fn->getName()));
17366 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
17367 SDValue ParentFrameOffset =
17368 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
17370 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
17371 // prologue to RBP in the parent function.
17372 const X86Subtarget &Subtarget =
17373 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17374 if (Subtarget.is64Bit())
17375 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
17377 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
17378 // RegNodeBase = EntryEBP - RegNodeSize
17379 // ParentFP = RegNodeBase - ParentFrameOffset
17380 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
17381 DAG.getConstant(RegNodeSize, dl, PtrVT));
17382 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
17385 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
17386 SelectionDAG &DAG) {
17388 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17389 MVT VT = Op.getSimpleValueType();
17390 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17392 switch(IntrData->Type) {
17393 case INTR_TYPE_1OP:
17394 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17395 case INTR_TYPE_2OP:
17396 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17398 case INTR_TYPE_2OP_IMM8:
17399 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17400 DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(2)));
17401 case INTR_TYPE_3OP:
17402 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17403 Op.getOperand(2), Op.getOperand(3));
17404 case INTR_TYPE_4OP:
17405 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17406 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
17407 case INTR_TYPE_1OP_MASK_RM: {
17408 SDValue Src = Op.getOperand(1);
17409 SDValue PassThru = Op.getOperand(2);
17410 SDValue Mask = Op.getOperand(3);
17411 SDValue RoundingMode;
17412 // We allways add rounding mode to the Node.
17413 // If the rounding mode is not specified, we add the
17414 // "current direction" mode.
17415 if (Op.getNumOperands() == 4)
17417 DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
17419 RoundingMode = Op.getOperand(4);
17420 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17421 if (IntrWithRoundingModeOpcode != 0)
17422 if (cast<ConstantSDNode>(RoundingMode)->getZExtValue() !=
17423 X86::STATIC_ROUNDING::CUR_DIRECTION)
17424 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17425 dl, Op.getValueType(), Src, RoundingMode),
17426 Mask, PassThru, Subtarget, DAG);
17427 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17429 Mask, PassThru, Subtarget, DAG);
17431 case INTR_TYPE_1OP_MASK: {
17432 SDValue Src = Op.getOperand(1);
17433 SDValue PassThru = Op.getOperand(2);
17434 SDValue Mask = Op.getOperand(3);
17435 // We add rounding mode to the Node when
17436 // - RM Opcode is specified and
17437 // - RM is not "current direction".
17438 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17439 if (IntrWithRoundingModeOpcode != 0) {
17440 SDValue Rnd = Op.getOperand(4);
17441 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17442 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17443 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17444 dl, Op.getValueType(),
17446 Mask, PassThru, Subtarget, DAG);
17449 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
17450 Mask, PassThru, Subtarget, DAG);
17452 case INTR_TYPE_SCALAR_MASK: {
17453 SDValue Src1 = Op.getOperand(1);
17454 SDValue Src2 = Op.getOperand(2);
17455 SDValue passThru = Op.getOperand(3);
17456 SDValue Mask = Op.getOperand(4);
17457 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2),
17458 Mask, passThru, Subtarget, DAG);
17460 case INTR_TYPE_SCALAR_MASK_RM: {
17461 SDValue Src1 = Op.getOperand(1);
17462 SDValue Src2 = Op.getOperand(2);
17463 SDValue Src0 = Op.getOperand(3);
17464 SDValue Mask = Op.getOperand(4);
17465 // There are 2 kinds of intrinsics in this group:
17466 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
17467 // (2) With rounding mode and sae - 7 operands.
17468 if (Op.getNumOperands() == 6) {
17469 SDValue Sae = Op.getOperand(5);
17470 unsigned Opc = IntrData->Opc1 ? IntrData->Opc1 : IntrData->Opc0;
17471 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2,
17473 Mask, Src0, Subtarget, DAG);
17475 assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
17476 SDValue RoundingMode = Op.getOperand(5);
17477 SDValue Sae = Op.getOperand(6);
17478 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17479 RoundingMode, Sae),
17480 Mask, Src0, Subtarget, DAG);
17482 case INTR_TYPE_2OP_MASK:
17483 case INTR_TYPE_2OP_IMM8_MASK: {
17484 SDValue Src1 = Op.getOperand(1);
17485 SDValue Src2 = Op.getOperand(2);
17486 SDValue PassThru = Op.getOperand(3);
17487 SDValue Mask = Op.getOperand(4);
17489 if (IntrData->Type == INTR_TYPE_2OP_IMM8_MASK)
17490 Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
17492 // We specify 2 possible opcodes for intrinsics with rounding modes.
17493 // First, we check if the intrinsic may have non-default rounding mode,
17494 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17495 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17496 if (IntrWithRoundingModeOpcode != 0) {
17497 SDValue Rnd = Op.getOperand(5);
17498 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17499 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17500 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17501 dl, Op.getValueType(),
17503 Mask, PassThru, Subtarget, DAG);
17506 // TODO: Intrinsics should have fast-math-flags to propagate.
17507 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2),
17508 Mask, PassThru, Subtarget, DAG);
17510 case INTR_TYPE_2OP_MASK_RM: {
17511 SDValue Src1 = Op.getOperand(1);
17512 SDValue Src2 = Op.getOperand(2);
17513 SDValue PassThru = Op.getOperand(3);
17514 SDValue Mask = Op.getOperand(4);
17515 // We specify 2 possible modes for intrinsics, with/without rounding
17517 // First, we check if the intrinsic have rounding mode (6 operands),
17518 // if not, we set rounding mode to "current".
17520 if (Op.getNumOperands() == 6)
17521 Rnd = Op.getOperand(5);
17523 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
17524 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17526 Mask, PassThru, Subtarget, DAG);
17528 case INTR_TYPE_3OP_SCALAR_MASK_RM: {
17529 SDValue Src1 = Op.getOperand(1);
17530 SDValue Src2 = Op.getOperand(2);
17531 SDValue Src3 = Op.getOperand(3);
17532 SDValue PassThru = Op.getOperand(4);
17533 SDValue Mask = Op.getOperand(5);
17534 SDValue Sae = Op.getOperand(6);
17536 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
17538 Mask, PassThru, Subtarget, DAG);
17540 case INTR_TYPE_3OP_MASK_RM: {
17541 SDValue Src1 = Op.getOperand(1);
17542 SDValue Src2 = Op.getOperand(2);
17543 SDValue Imm = Op.getOperand(3);
17544 SDValue PassThru = Op.getOperand(4);
17545 SDValue Mask = Op.getOperand(5);
17546 // We specify 2 possible modes for intrinsics, with/without rounding
17548 // First, we check if the intrinsic have rounding mode (7 operands),
17549 // if not, we set rounding mode to "current".
17551 if (Op.getNumOperands() == 7)
17552 Rnd = Op.getOperand(6);
17554 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
17555 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17556 Src1, Src2, Imm, Rnd),
17557 Mask, PassThru, Subtarget, DAG);
17559 case INTR_TYPE_3OP_IMM8_MASK:
17560 case INTR_TYPE_3OP_MASK:
17561 case INSERT_SUBVEC: {
17562 SDValue Src1 = Op.getOperand(1);
17563 SDValue Src2 = Op.getOperand(2);
17564 SDValue Src3 = Op.getOperand(3);
17565 SDValue PassThru = Op.getOperand(4);
17566 SDValue Mask = Op.getOperand(5);
17568 if (IntrData->Type == INTR_TYPE_3OP_IMM8_MASK)
17569 Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
17570 else if (IntrData->Type == INSERT_SUBVEC) {
17571 // imm should be adapted to ISD::INSERT_SUBVECTOR behavior
17572 assert(isa<ConstantSDNode>(Src3) && "Expected a ConstantSDNode here!");
17573 unsigned Imm = cast<ConstantSDNode>(Src3)->getZExtValue();
17574 Imm *= Src2.getSimpleValueType().getVectorNumElements();
17575 Src3 = DAG.getTargetConstant(Imm, dl, MVT::i32);
17578 // We specify 2 possible opcodes for intrinsics with rounding modes.
17579 // First, we check if the intrinsic may have non-default rounding mode,
17580 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17581 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17582 if (IntrWithRoundingModeOpcode != 0) {
17583 SDValue Rnd = Op.getOperand(6);
17584 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17585 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17586 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17587 dl, Op.getValueType(),
17588 Src1, Src2, Src3, Rnd),
17589 Mask, PassThru, Subtarget, DAG);
17592 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17594 Mask, PassThru, Subtarget, DAG);
17596 case VPERM_2OP_MASK : {
17597 SDValue Src1 = Op.getOperand(1);
17598 SDValue Src2 = Op.getOperand(2);
17599 SDValue PassThru = Op.getOperand(3);
17600 SDValue Mask = Op.getOperand(4);
17602 // Swap Src1 and Src2 in the node creation
17603 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1),
17604 Mask, PassThru, Subtarget, DAG);
17606 case VPERM_3OP_MASKZ:
17607 case VPERM_3OP_MASK:{
17608 // Src2 is the PassThru
17609 SDValue Src1 = Op.getOperand(1);
17610 SDValue Src2 = Op.getOperand(2);
17611 SDValue Src3 = Op.getOperand(3);
17612 SDValue Mask = Op.getOperand(4);
17613 MVT VT = Op.getSimpleValueType();
17614 SDValue PassThru = SDValue();
17616 // set PassThru element
17617 if (IntrData->Type == VPERM_3OP_MASKZ)
17618 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
17620 PassThru = DAG.getBitcast(VT, Src2);
17622 // Swap Src1 and Src2 in the node creation
17623 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17624 dl, Op.getValueType(),
17626 Mask, PassThru, Subtarget, DAG);
17630 case FMA_OP_MASK: {
17631 SDValue Src1 = Op.getOperand(1);
17632 SDValue Src2 = Op.getOperand(2);
17633 SDValue Src3 = Op.getOperand(3);
17634 SDValue Mask = Op.getOperand(4);
17635 MVT VT = Op.getSimpleValueType();
17636 SDValue PassThru = SDValue();
17638 // set PassThru element
17639 if (IntrData->Type == FMA_OP_MASKZ)
17640 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
17641 else if (IntrData->Type == FMA_OP_MASK3)
17646 // We specify 2 possible opcodes for intrinsics with rounding modes.
17647 // First, we check if the intrinsic may have non-default rounding mode,
17648 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17649 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17650 if (IntrWithRoundingModeOpcode != 0) {
17651 SDValue Rnd = Op.getOperand(5);
17652 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17653 X86::STATIC_ROUNDING::CUR_DIRECTION)
17654 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17655 dl, Op.getValueType(),
17656 Src1, Src2, Src3, Rnd),
17657 Mask, PassThru, Subtarget, DAG);
17659 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17660 dl, Op.getValueType(),
17662 Mask, PassThru, Subtarget, DAG);
17664 case FMA_OP_SCALAR_MASK:
17665 case FMA_OP_SCALAR_MASK3:
17666 case FMA_OP_SCALAR_MASKZ: {
17667 SDValue Src1 = Op.getOperand(1);
17668 SDValue Src2 = Op.getOperand(2);
17669 SDValue Src3 = Op.getOperand(3);
17670 SDValue Mask = Op.getOperand(4);
17671 MVT VT = Op.getSimpleValueType();
17672 SDValue PassThru = SDValue();
17674 // set PassThru element
17675 if (IntrData->Type == FMA_OP_SCALAR_MASKZ)
17676 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
17677 else if (IntrData->Type == FMA_OP_SCALAR_MASK3)
17682 SDValue Rnd = Op.getOperand(5);
17683 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl,
17684 Op.getValueType(), Src1, Src2,
17686 Mask, PassThru, Subtarget, DAG);
17688 case TERLOG_OP_MASK:
17689 case TERLOG_OP_MASKZ: {
17690 SDValue Src1 = Op.getOperand(1);
17691 SDValue Src2 = Op.getOperand(2);
17692 SDValue Src3 = Op.getOperand(3);
17693 SDValue Src4 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(4));
17694 SDValue Mask = Op.getOperand(5);
17695 MVT VT = Op.getSimpleValueType();
17696 SDValue PassThru = Src1;
17697 // Set PassThru element.
17698 if (IntrData->Type == TERLOG_OP_MASKZ)
17699 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
17701 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17702 Src1, Src2, Src3, Src4),
17703 Mask, PassThru, Subtarget, DAG);
17706 // FPclass intrinsics with mask
17707 SDValue Src1 = Op.getOperand(1);
17708 MVT VT = Src1.getSimpleValueType();
17709 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
17710 SDValue Imm = Op.getOperand(2);
17711 SDValue Mask = Op.getOperand(3);
17712 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
17713 Mask.getSimpleValueType().getSizeInBits());
17714 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm);
17715 SDValue FPclassMask = getVectorMaskingNode(FPclass, Mask,
17716 DAG.getTargetConstant(0, dl, MaskVT),
17718 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17719 DAG.getUNDEF(BitcastVT), FPclassMask,
17720 DAG.getIntPtrConstant(0, dl));
17721 return DAG.getBitcast(Op.getValueType(), Res);
17724 SDValue Src1 = Op.getOperand(1);
17725 SDValue Imm = Op.getOperand(2);
17726 SDValue Mask = Op.getOperand(3);
17727 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::i1, Src1, Imm);
17728 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask,
17729 DAG.getTargetConstant(0, dl, MVT::i1), Subtarget, DAG);
17730 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i8, FPclassMask);
17733 case CMP_MASK_CC: {
17734 // Comparison intrinsics with masks.
17735 // Example of transformation:
17736 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17737 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17739 // (v8i1 (insert_subvector undef,
17740 // (v2i1 (and (PCMPEQM %a, %b),
17741 // (extract_subvector
17742 // (v8i1 (bitcast %mask)), 0))), 0))))
17743 MVT VT = Op.getOperand(1).getSimpleValueType();
17744 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
17745 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17746 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
17747 Mask.getSimpleValueType().getSizeInBits());
17749 if (IntrData->Type == CMP_MASK_CC) {
17750 SDValue CC = Op.getOperand(3);
17751 CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
17752 // We specify 2 possible opcodes for intrinsics with rounding modes.
17753 // First, we check if the intrinsic may have non-default rounding mode,
17754 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17755 if (IntrData->Opc1 != 0) {
17756 SDValue Rnd = Op.getOperand(5);
17757 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17758 X86::STATIC_ROUNDING::CUR_DIRECTION)
17759 Cmp = DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
17760 Op.getOperand(2), CC, Rnd);
17762 //default rounding mode
17764 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17765 Op.getOperand(2), CC);
17768 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17769 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17772 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17773 DAG.getTargetConstant(0, dl,
17776 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17777 DAG.getUNDEF(BitcastVT), CmpMask,
17778 DAG.getIntPtrConstant(0, dl));
17779 return DAG.getBitcast(Op.getValueType(), Res);
17781 case CMP_MASK_SCALAR_CC: {
17782 SDValue Src1 = Op.getOperand(1);
17783 SDValue Src2 = Op.getOperand(2);
17784 SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
17785 SDValue Mask = Op.getOperand(4);
17788 if (IntrData->Opc1 != 0) {
17789 SDValue Rnd = Op.getOperand(5);
17790 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17791 X86::STATIC_ROUNDING::CUR_DIRECTION)
17792 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::i1, Src1, Src2, CC, Rnd);
17794 //default rounding mode
17796 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::i1, Src1, Src2, CC);
17798 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask,
17799 DAG.getTargetConstant(0, dl,
17803 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, CmpMask);
17805 case COMI: { // Comparison intrinsics
17806 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17807 SDValue LHS = Op.getOperand(1);
17808 SDValue RHS = Op.getOperand(2);
17809 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17810 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
17813 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
17814 SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17815 DAG.getConstant(X86::COND_E, dl, MVT::i8), Comi);
17816 SDValue SetNP = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17817 DAG.getConstant(X86::COND_NP, dl, MVT::i8),
17819 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
17822 case ISD::SETNE: { // (ZF = 1 or PF = 1)
17823 SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17824 DAG.getConstant(X86::COND_NE, dl, MVT::i8), Comi);
17825 SDValue SetP = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17826 DAG.getConstant(X86::COND_P, dl, MVT::i8),
17828 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
17831 case ISD::SETGT: // (CF = 0 and ZF = 0)
17832 SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17833 DAG.getConstant(X86::COND_A, dl, MVT::i8), Comi);
17835 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
17836 SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17837 DAG.getConstant(X86::COND_A, dl, MVT::i8), InvComi);
17840 case ISD::SETGE: // CF = 0
17841 SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17842 DAG.getConstant(X86::COND_AE, dl, MVT::i8), Comi);
17844 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
17845 SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17846 DAG.getConstant(X86::COND_AE, dl, MVT::i8), InvComi);
17849 llvm_unreachable("Unexpected illegal condition!");
17851 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17853 case COMI_RM: { // Comparison intrinsics with Sae
17854 SDValue LHS = Op.getOperand(1);
17855 SDValue RHS = Op.getOperand(2);
17856 unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
17857 SDValue Sae = Op.getOperand(4);
17860 if (cast<ConstantSDNode>(Sae)->getZExtValue() ==
17861 X86::STATIC_ROUNDING::CUR_DIRECTION)
17862 FCmp = DAG.getNode(X86ISD::FSETCC, dl, MVT::i1, LHS, RHS,
17863 DAG.getConstant(CondVal, dl, MVT::i8));
17865 FCmp = DAG.getNode(X86ISD::FSETCC, dl, MVT::i1, LHS, RHS,
17866 DAG.getConstant(CondVal, dl, MVT::i8), Sae);
17867 // AnyExt just uses KMOVW %kreg, %r32; ZeroExt emits "and $1, %reg"
17868 return DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, FCmp);
17871 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17872 Op.getOperand(1), Op.getOperand(2), DAG);
17873 case COMPRESS_EXPAND_IN_REG: {
17874 SDValue Mask = Op.getOperand(3);
17875 SDValue DataToCompress = Op.getOperand(1);
17876 SDValue PassThru = Op.getOperand(2);
17877 if (isAllOnesConstant(Mask)) // return data as is
17878 return Op.getOperand(1);
17880 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17882 Mask, PassThru, Subtarget, DAG);
17885 SDValue Mask = Op.getOperand(1);
17886 MVT MaskVT = MVT::getVectorVT(MVT::i1,
17887 Mask.getSimpleValueType().getSizeInBits());
17888 Mask = DAG.getBitcast(MaskVT, Mask);
17889 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Mask);
17892 MVT VT = Op.getSimpleValueType();
17893 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits()/2);
17895 SDValue Src1 = getMaskNode(Op.getOperand(1), MaskVT, Subtarget, DAG, dl);
17896 SDValue Src2 = getMaskNode(Op.getOperand(2), MaskVT, Subtarget, DAG, dl);
17897 // Arguments should be swapped.
17898 SDValue Res = DAG.getNode(IntrData->Opc0, dl,
17899 MVT::getVectorVT(MVT::i1, VT.getSizeInBits()),
17901 return DAG.getBitcast(VT, Res);
17904 case FIXUPIMMS_MASKZ:
17906 case FIXUPIMM_MASKZ:{
17907 SDValue Src1 = Op.getOperand(1);
17908 SDValue Src2 = Op.getOperand(2);
17909 SDValue Src3 = Op.getOperand(3);
17910 SDValue Imm = Op.getOperand(4);
17911 SDValue Mask = Op.getOperand(5);
17912 SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ?
17913 Src1 : getZeroVector(VT, Subtarget, DAG, dl);
17914 // We specify 2 possible modes for intrinsics, with/without rounding
17916 // First, we check if the intrinsic have rounding mode (7 operands),
17917 // if not, we set rounding mode to "current".
17919 if (Op.getNumOperands() == 7)
17920 Rnd = Op.getOperand(6);
17922 Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
17923 if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ)
17924 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17925 Src1, Src2, Src3, Imm, Rnd),
17926 Mask, Passthru, Subtarget, DAG);
17927 else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ
17928 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17929 Src1, Src2, Src3, Imm, Rnd),
17930 Mask, Passthru, Subtarget, DAG);
17932 case CONVERT_TO_MASK: {
17933 MVT SrcVT = Op.getOperand(1).getSimpleValueType();
17934 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
17935 MVT BitcastVT = MVT::getVectorVT(MVT::i1, VT.getSizeInBits());
17937 SDValue CvtMask = DAG.getNode(IntrData->Opc0, dl, MaskVT,
17939 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17940 DAG.getUNDEF(BitcastVT), CvtMask,
17941 DAG.getIntPtrConstant(0, dl));
17942 return DAG.getBitcast(Op.getValueType(), Res);
17944 case CONVERT_MASK_TO_VEC: {
17945 SDValue Mask = Op.getOperand(1);
17946 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
17947 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
17948 return DAG.getNode(IntrData->Opc0, dl, VT, VMask);
17950 case BRCST_SUBVEC_TO_VEC: {
17951 SDValue Src = Op.getOperand(1);
17952 SDValue Passthru = Op.getOperand(2);
17953 SDValue Mask = Op.getOperand(3);
17954 EVT resVT = Passthru.getValueType();
17955 SDValue subVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, resVT,
17956 DAG.getUNDEF(resVT), Src,
17957 DAG.getIntPtrConstant(0, dl));
17959 if (Src.getSimpleValueType().is256BitVector() && resVT.is512BitVector())
17960 immVal = DAG.getConstant(0x44, dl, MVT::i8);
17962 immVal = DAG.getConstant(0, dl, MVT::i8);
17963 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17964 subVec, subVec, immVal),
17965 Mask, Passthru, Subtarget, DAG);
17967 case BRCST32x2_TO_VEC: {
17968 SDValue Src = Op.getOperand(1);
17969 SDValue PassThru = Op.getOperand(2);
17970 SDValue Mask = Op.getOperand(3);
17972 assert((VT.getScalarType() == MVT::i32 ||
17973 VT.getScalarType() == MVT::f32) && "Unexpected type!");
17974 //bitcast Src to packed 64
17975 MVT ScalarVT = VT.getScalarType() == MVT::i32 ? MVT::i64 : MVT::f64;
17976 MVT BitcastVT = MVT::getVectorVT(ScalarVT, Src.getValueSizeInBits()/64);
17977 Src = DAG.getBitcast(BitcastVT, Src);
17979 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
17980 Mask, PassThru, Subtarget, DAG);
17988 default: return SDValue(); // Don't custom lower most intrinsics.
17990 case Intrinsic::x86_avx2_permd:
17991 case Intrinsic::x86_avx2_permps:
17992 // Operands intentionally swapped. Mask is last operand to intrinsic,
17993 // but second operand for node/instruction.
17994 return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(),
17995 Op.getOperand(2), Op.getOperand(1));
17997 // ptest and testp intrinsics. The intrinsic these come from are designed to
17998 // return an integer value, not just an instruction so lower it to the ptest
17999 // or testp pattern and a setcc for the result.
18000 case Intrinsic::x86_sse41_ptestz:
18001 case Intrinsic::x86_sse41_ptestc:
18002 case Intrinsic::x86_sse41_ptestnzc:
18003 case Intrinsic::x86_avx_ptestz_256:
18004 case Intrinsic::x86_avx_ptestc_256:
18005 case Intrinsic::x86_avx_ptestnzc_256:
18006 case Intrinsic::x86_avx_vtestz_ps:
18007 case Intrinsic::x86_avx_vtestc_ps:
18008 case Intrinsic::x86_avx_vtestnzc_ps:
18009 case Intrinsic::x86_avx_vtestz_pd:
18010 case Intrinsic::x86_avx_vtestc_pd:
18011 case Intrinsic::x86_avx_vtestnzc_pd:
18012 case Intrinsic::x86_avx_vtestz_ps_256:
18013 case Intrinsic::x86_avx_vtestc_ps_256:
18014 case Intrinsic::x86_avx_vtestnzc_ps_256:
18015 case Intrinsic::x86_avx_vtestz_pd_256:
18016 case Intrinsic::x86_avx_vtestc_pd_256:
18017 case Intrinsic::x86_avx_vtestnzc_pd_256: {
18018 bool IsTestPacked = false;
18021 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
18022 case Intrinsic::x86_avx_vtestz_ps:
18023 case Intrinsic::x86_avx_vtestz_pd:
18024 case Intrinsic::x86_avx_vtestz_ps_256:
18025 case Intrinsic::x86_avx_vtestz_pd_256:
18026 IsTestPacked = true; // Fallthrough
18027 case Intrinsic::x86_sse41_ptestz:
18028 case Intrinsic::x86_avx_ptestz_256:
18030 X86CC = X86::COND_E;
18032 case Intrinsic::x86_avx_vtestc_ps:
18033 case Intrinsic::x86_avx_vtestc_pd:
18034 case Intrinsic::x86_avx_vtestc_ps_256:
18035 case Intrinsic::x86_avx_vtestc_pd_256:
18036 IsTestPacked = true; // Fallthrough
18037 case Intrinsic::x86_sse41_ptestc:
18038 case Intrinsic::x86_avx_ptestc_256:
18040 X86CC = X86::COND_B;
18042 case Intrinsic::x86_avx_vtestnzc_ps:
18043 case Intrinsic::x86_avx_vtestnzc_pd:
18044 case Intrinsic::x86_avx_vtestnzc_ps_256:
18045 case Intrinsic::x86_avx_vtestnzc_pd_256:
18046 IsTestPacked = true; // Fallthrough
18047 case Intrinsic::x86_sse41_ptestnzc:
18048 case Intrinsic::x86_avx_ptestnzc_256:
18050 X86CC = X86::COND_A;
18054 SDValue LHS = Op.getOperand(1);
18055 SDValue RHS = Op.getOperand(2);
18056 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
18057 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
18058 SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
18059 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
18060 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
18062 case Intrinsic::x86_avx512_kortestz_w:
18063 case Intrinsic::x86_avx512_kortestc_w: {
18064 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
18065 SDValue LHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(1));
18066 SDValue RHS = DAG.getBitcast(MVT::v16i1, Op.getOperand(2));
18067 SDValue CC = DAG.getConstant(X86CC, dl, MVT::i8);
18068 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
18069 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
18070 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
18073 case Intrinsic::x86_sse42_pcmpistria128:
18074 case Intrinsic::x86_sse42_pcmpestria128:
18075 case Intrinsic::x86_sse42_pcmpistric128:
18076 case Intrinsic::x86_sse42_pcmpestric128:
18077 case Intrinsic::x86_sse42_pcmpistrio128:
18078 case Intrinsic::x86_sse42_pcmpestrio128:
18079 case Intrinsic::x86_sse42_pcmpistris128:
18080 case Intrinsic::x86_sse42_pcmpestris128:
18081 case Intrinsic::x86_sse42_pcmpistriz128:
18082 case Intrinsic::x86_sse42_pcmpestriz128: {
18086 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
18087 case Intrinsic::x86_sse42_pcmpistria128:
18088 Opcode = X86ISD::PCMPISTRI;
18089 X86CC = X86::COND_A;
18091 case Intrinsic::x86_sse42_pcmpestria128:
18092 Opcode = X86ISD::PCMPESTRI;
18093 X86CC = X86::COND_A;
18095 case Intrinsic::x86_sse42_pcmpistric128:
18096 Opcode = X86ISD::PCMPISTRI;
18097 X86CC = X86::COND_B;
18099 case Intrinsic::x86_sse42_pcmpestric128:
18100 Opcode = X86ISD::PCMPESTRI;
18101 X86CC = X86::COND_B;
18103 case Intrinsic::x86_sse42_pcmpistrio128:
18104 Opcode = X86ISD::PCMPISTRI;
18105 X86CC = X86::COND_O;
18107 case Intrinsic::x86_sse42_pcmpestrio128:
18108 Opcode = X86ISD::PCMPESTRI;
18109 X86CC = X86::COND_O;
18111 case Intrinsic::x86_sse42_pcmpistris128:
18112 Opcode = X86ISD::PCMPISTRI;
18113 X86CC = X86::COND_S;
18115 case Intrinsic::x86_sse42_pcmpestris128:
18116 Opcode = X86ISD::PCMPESTRI;
18117 X86CC = X86::COND_S;
18119 case Intrinsic::x86_sse42_pcmpistriz128:
18120 Opcode = X86ISD::PCMPISTRI;
18121 X86CC = X86::COND_E;
18123 case Intrinsic::x86_sse42_pcmpestriz128:
18124 Opcode = X86ISD::PCMPESTRI;
18125 X86CC = X86::COND_E;
18128 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
18129 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
18130 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
18131 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18132 DAG.getConstant(X86CC, dl, MVT::i8),
18133 SDValue(PCMP.getNode(), 1));
18134 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
18137 case Intrinsic::x86_sse42_pcmpistri128:
18138 case Intrinsic::x86_sse42_pcmpestri128: {
18140 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
18141 Opcode = X86ISD::PCMPISTRI;
18143 Opcode = X86ISD::PCMPESTRI;
18145 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
18146 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
18147 return DAG.getNode(Opcode, dl, VTs, NewOps);
18150 case Intrinsic::eh_sjlj_lsda: {
18151 MachineFunction &MF = DAG.getMachineFunction();
18152 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18153 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
18154 auto &Context = MF.getMMI().getContext();
18155 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
18156 Twine(MF.getFunctionNumber()));
18157 return DAG.getNode(X86ISD::Wrapper, dl, VT, DAG.getMCSymbol(S, PtrVT));
18160 case Intrinsic::x86_seh_lsda: {
18161 // Compute the symbol for the LSDA. We know it'll get emitted later.
18162 MachineFunction &MF = DAG.getMachineFunction();
18163 SDValue Op1 = Op.getOperand(1);
18164 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
18165 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
18166 GlobalValue::getRealLinkageName(Fn->getName()));
18168 // Generate a simple absolute symbol reference. This intrinsic is only
18169 // supported on 32-bit Windows, which isn't PIC.
18170 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
18171 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
18174 case Intrinsic::x86_seh_recoverfp: {
18175 SDValue FnOp = Op.getOperand(1);
18176 SDValue IncomingFPOp = Op.getOperand(2);
18177 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
18178 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
18180 report_fatal_error(
18181 "llvm.x86.seh.recoverfp must take a function as the first argument");
18182 return recoverFramePointer(DAG, Fn, IncomingFPOp);
18185 case Intrinsic::localaddress: {
18186 // Returns one of the stack, base, or frame pointer registers, depending on
18187 // which is used to reference local variables.
18188 MachineFunction &MF = DAG.getMachineFunction();
18189 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18191 if (RegInfo->hasBasePointer(MF))
18192 Reg = RegInfo->getBaseRegister();
18193 else // This function handles the SP or FP case.
18194 Reg = RegInfo->getPtrSizedFrameRegister(MF);
18195 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
18200 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
18201 SDValue Src, SDValue Mask, SDValue Base,
18202 SDValue Index, SDValue ScaleOp, SDValue Chain,
18203 const X86Subtarget &Subtarget) {
18205 auto *C = cast<ConstantSDNode>(ScaleOp);
18206 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
18207 MVT MaskVT = MVT::getVectorVT(MVT::i1,
18208 Index.getSimpleValueType().getVectorNumElements());
18210 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
18211 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
18212 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
18213 SDValue Segment = DAG.getRegister(0, MVT::i32);
18215 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
18216 SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
18217 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
18218 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
18219 return DAG.getMergeValues(RetOps, dl);
18222 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
18223 SDValue Src, SDValue Mask, SDValue Base,
18224 SDValue Index, SDValue ScaleOp, SDValue Chain,
18225 const X86Subtarget &Subtarget) {
18227 auto *C = cast<ConstantSDNode>(ScaleOp);
18228 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
18229 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
18230 SDValue Segment = DAG.getRegister(0, MVT::i32);
18231 MVT MaskVT = MVT::getVectorVT(MVT::i1,
18232 Index.getSimpleValueType().getVectorNumElements());
18234 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
18235 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
18236 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
18237 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
18238 return SDValue(Res, 1);
18241 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
18242 SDValue Mask, SDValue Base, SDValue Index,
18243 SDValue ScaleOp, SDValue Chain,
18244 const X86Subtarget &Subtarget) {
18246 auto *C = cast<ConstantSDNode>(ScaleOp);
18247 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
18248 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
18249 SDValue Segment = DAG.getRegister(0, MVT::i32);
18251 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
18252 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
18253 //SDVTList VTs = DAG.getVTList(MVT::Other);
18254 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
18255 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
18256 return SDValue(Res, 0);
18259 /// Handles the lowering of builtin intrinsics that read performance monitor
18260 /// counters (x86_rdpmc).
18261 static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL,
18263 const X86Subtarget &Subtarget,
18264 SmallVectorImpl<SDValue> &Results) {
18265 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
18266 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
18269 // The ECX register is used to select the index of the performance counter
18271 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
18273 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
18275 // Reads the content of a 64-bit performance counter and returns it in the
18276 // registers EDX:EAX.
18277 if (Subtarget.is64Bit()) {
18278 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
18279 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
18282 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
18283 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
18286 Chain = HI.getValue(1);
18288 if (Subtarget.is64Bit()) {
18289 // The EAX register is loaded with the low-order 32 bits. The EDX register
18290 // is loaded with the supported high-order bits of the counter.
18291 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
18292 DAG.getConstant(32, DL, MVT::i8));
18293 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
18294 Results.push_back(Chain);
18298 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
18299 SDValue Ops[] = { LO, HI };
18300 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
18301 Results.push_back(Pair);
18302 Results.push_back(Chain);
18305 /// Handles the lowering of builtin intrinsics that read the time stamp counter
18306 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
18307 /// READCYCLECOUNTER nodes.
18308 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
18310 const X86Subtarget &Subtarget,
18311 SmallVectorImpl<SDValue> &Results) {
18312 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
18313 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
18316 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
18317 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
18318 // and the EAX register is loaded with the low-order 32 bits.
18319 if (Subtarget.is64Bit()) {
18320 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
18321 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
18324 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
18325 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
18328 SDValue Chain = HI.getValue(1);
18330 if (Opcode == X86ISD::RDTSCP_DAG) {
18331 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
18333 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
18334 // the ECX register. Add 'ecx' explicitly to the chain.
18335 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
18337 // Explicitly store the content of ECX at the location passed in input
18338 // to the 'rdtscp' intrinsic.
18339 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
18340 MachinePointerInfo());
18343 if (Subtarget.is64Bit()) {
18344 // The EDX register is loaded with the high-order 32 bits of the MSR, and
18345 // the EAX register is loaded with the low-order 32 bits.
18346 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
18347 DAG.getConstant(32, DL, MVT::i8));
18348 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
18349 Results.push_back(Chain);
18353 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
18354 SDValue Ops[] = { LO, HI };
18355 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
18356 Results.push_back(Pair);
18357 Results.push_back(Chain);
18360 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
18361 SelectionDAG &DAG) {
18362 SmallVector<SDValue, 2> Results;
18364 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
18366 return DAG.getMergeValues(Results, DL);
18369 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
18370 MachineFunction &MF = DAG.getMachineFunction();
18371 SDValue Chain = Op.getOperand(0);
18372 SDValue RegNode = Op.getOperand(2);
18373 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
18375 report_fatal_error("EH registrations only live in functions using WinEH");
18377 // Cast the operand to an alloca, and remember the frame index.
18378 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
18380 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
18381 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
18383 // Return the chain operand without making any DAG nodes.
18387 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
18388 MachineFunction &MF = DAG.getMachineFunction();
18389 SDValue Chain = Op.getOperand(0);
18390 SDValue EHGuard = Op.getOperand(2);
18391 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
18393 report_fatal_error("EHGuard only live in functions using WinEH");
18395 // Cast the operand to an alloca, and remember the frame index.
18396 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
18398 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
18399 EHInfo->EHGuardFrameIndex = FINode->getIndex();
18401 // Return the chain operand without making any DAG nodes.
18405 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
18406 SelectionDAG &DAG) {
18407 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
18409 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
18411 if (IntNo == llvm::Intrinsic::x86_seh_ehregnode)
18412 return MarkEHRegistrationNode(Op, DAG);
18413 if (IntNo == llvm::Intrinsic::x86_seh_ehguard)
18414 return MarkEHGuard(Op, DAG);
18415 if (IntNo == llvm::Intrinsic::x86_flags_read_u32 ||
18416 IntNo == llvm::Intrinsic::x86_flags_read_u64 ||
18417 IntNo == llvm::Intrinsic::x86_flags_write_u32 ||
18418 IntNo == llvm::Intrinsic::x86_flags_write_u64) {
18419 // We need a frame pointer because this will get lowered to a PUSH/POP
18421 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18422 MFI->setHasCopyImplyingStackAdjustment(true);
18423 // Don't do anything here, we will expand these intrinsics out later
18424 // during ExpandISelPseudos in EmitInstrWithCustomInserter.
18431 switch(IntrData->Type) {
18432 default: llvm_unreachable("Unknown Intrinsic Type");
18435 // Emit the node with the right value type.
18436 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
18437 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18439 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
18440 // Otherwise return the value from Rand, which is always 0, casted to i32.
18441 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
18442 DAG.getConstant(1, dl, Op->getValueType(1)),
18443 DAG.getConstant(X86::COND_B, dl, MVT::i32),
18444 SDValue(Result.getNode(), 1) };
18445 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
18446 DAG.getVTList(Op->getValueType(1), MVT::Glue),
18449 // Return { result, isValid, chain }.
18450 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
18451 SDValue(Result.getNode(), 2));
18454 //gather(v1, mask, index, base, scale);
18455 SDValue Chain = Op.getOperand(0);
18456 SDValue Src = Op.getOperand(2);
18457 SDValue Base = Op.getOperand(3);
18458 SDValue Index = Op.getOperand(4);
18459 SDValue Mask = Op.getOperand(5);
18460 SDValue Scale = Op.getOperand(6);
18461 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale,
18465 //scatter(base, mask, index, v1, scale);
18466 SDValue Chain = Op.getOperand(0);
18467 SDValue Base = Op.getOperand(2);
18468 SDValue Mask = Op.getOperand(3);
18469 SDValue Index = Op.getOperand(4);
18470 SDValue Src = Op.getOperand(5);
18471 SDValue Scale = Op.getOperand(6);
18472 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
18473 Scale, Chain, Subtarget);
18476 SDValue Hint = Op.getOperand(6);
18477 unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
18478 assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1");
18479 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
18480 SDValue Chain = Op.getOperand(0);
18481 SDValue Mask = Op.getOperand(2);
18482 SDValue Index = Op.getOperand(3);
18483 SDValue Base = Op.getOperand(4);
18484 SDValue Scale = Op.getOperand(5);
18485 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
18488 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
18490 SmallVector<SDValue, 2> Results;
18491 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
18493 return DAG.getMergeValues(Results, dl);
18495 // Read Performance Monitoring Counters.
18497 SmallVector<SDValue, 2> Results;
18498 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
18499 return DAG.getMergeValues(Results, dl);
18501 // XTEST intrinsics.
18503 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18504 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18505 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18506 DAG.getConstant(X86::COND_NE, dl, MVT::i8),
18508 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18509 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18510 Ret, SDValue(InTrans.getNode(), 1));
18514 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18515 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18516 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18517 DAG.getConstant(-1, dl, MVT::i8));
18518 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18519 Op.getOperand(4), GenCF.getValue(1));
18520 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18521 Op.getOperand(5), MachinePointerInfo());
18522 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18523 DAG.getConstant(X86::COND_B, dl, MVT::i8),
18525 SDValue Results[] = { SetCC, Store };
18526 return DAG.getMergeValues(Results, dl);
18528 case COMPRESS_TO_MEM: {
18529 SDValue Mask = Op.getOperand(4);
18530 SDValue DataToCompress = Op.getOperand(3);
18531 SDValue Addr = Op.getOperand(2);
18532 SDValue Chain = Op.getOperand(0);
18533 MVT VT = DataToCompress.getSimpleValueType();
18535 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
18536 assert(MemIntr && "Expected MemIntrinsicSDNode!");
18538 if (isAllOnesConstant(Mask)) // return just a store
18539 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18540 MemIntr->getMemOperand());
18542 SDValue Compressed =
18543 getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress),
18544 Mask, DAG.getUNDEF(VT), Subtarget, DAG);
18545 return DAG.getStore(Chain, dl, Compressed, Addr,
18546 MemIntr->getMemOperand());
18548 case TRUNCATE_TO_MEM_VI8:
18549 case TRUNCATE_TO_MEM_VI16:
18550 case TRUNCATE_TO_MEM_VI32: {
18551 SDValue Mask = Op.getOperand(4);
18552 SDValue DataToTruncate = Op.getOperand(3);
18553 SDValue Addr = Op.getOperand(2);
18554 SDValue Chain = Op.getOperand(0);
18556 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
18557 assert(MemIntr && "Expected MemIntrinsicSDNode!");
18559 EVT VT = MemIntr->getMemoryVT();
18561 if (isAllOnesConstant(Mask)) // return just a truncate store
18562 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, VT,
18563 MemIntr->getMemOperand());
18565 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
18566 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
18568 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, VT,
18569 MemIntr->getMemOperand(), true);
18571 case EXPAND_FROM_MEM: {
18572 SDValue Mask = Op.getOperand(4);
18573 SDValue PassThru = Op.getOperand(3);
18574 SDValue Addr = Op.getOperand(2);
18575 SDValue Chain = Op.getOperand(0);
18576 MVT VT = Op.getSimpleValueType();
18578 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
18579 assert(MemIntr && "Expected MemIntrinsicSDNode!");
18581 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr,
18582 MemIntr->getMemOperand());
18584 if (isAllOnesConstant(Mask)) // return just a load
18585 return DataToExpand;
18587 SDValue Results[] = {
18588 getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, DataToExpand),
18589 Mask, PassThru, Subtarget, DAG), Chain};
18590 return DAG.getMergeValues(Results, dl);
18595 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18596 SelectionDAG &DAG) const {
18597 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18598 MFI->setReturnAddressIsTaken(true);
18600 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18603 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18605 EVT PtrVT = getPointerTy(DAG.getDataLayout());
18608 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18609 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18610 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
18611 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18612 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
18613 MachinePointerInfo());
18616 // Just load the return address.
18617 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18618 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
18619 MachinePointerInfo());
18622 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18623 MachineFunction &MF = DAG.getMachineFunction();
18624 MachineFrameInfo *MFI = MF.getFrameInfo();
18625 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18626 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18627 EVT VT = Op.getValueType();
18629 MFI->setFrameAddressIsTaken(true);
18631 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18632 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18633 // is not possible to crawl up the stack without looking at the unwind codes
18635 int FrameAddrIndex = FuncInfo->getFAIndex();
18636 if (!FrameAddrIndex) {
18637 // Set up a frame object for the return address.
18638 unsigned SlotSize = RegInfo->getSlotSize();
18639 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18640 SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
18641 FuncInfo->setFAIndex(FrameAddrIndex);
18643 return DAG.getFrameIndex(FrameAddrIndex, VT);
18646 unsigned FrameReg =
18647 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18648 SDLoc dl(Op); // FIXME probably not meaningful
18649 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18650 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18651 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18652 "Invalid Frame Register!");
18653 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18655 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18656 MachinePointerInfo());
18660 // FIXME? Maybe this could be a TableGen attribute on some registers and
18661 // this table could be generated automatically from RegInfo.
18662 unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
18663 SelectionDAG &DAG) const {
18664 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
18665 const MachineFunction &MF = DAG.getMachineFunction();
18667 unsigned Reg = StringSwitch<unsigned>(RegName)
18668 .Case("esp", X86::ESP)
18669 .Case("rsp", X86::RSP)
18670 .Case("ebp", X86::EBP)
18671 .Case("rbp", X86::RBP)
18674 if (Reg == X86::EBP || Reg == X86::RBP) {
18675 if (!TFI.hasFP(MF))
18676 report_fatal_error("register " + StringRef(RegName) +
18677 " is allocatable: function has no frame pointer");
18680 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18681 unsigned FrameReg =
18682 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18683 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
18684 "Invalid Frame Register!");
18692 report_fatal_error("Invalid register name global variable");
18695 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18696 SelectionDAG &DAG) const {
18697 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18698 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
18701 unsigned X86TargetLowering::getExceptionPointerRegister(
18702 const Constant *PersonalityFn) const {
18703 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
18704 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
18706 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
18709 unsigned X86TargetLowering::getExceptionSelectorRegister(
18710 const Constant *PersonalityFn) const {
18711 // Funclet personalities don't use selectors (the runtime does the selection).
18712 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
18713 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
18716 bool X86TargetLowering::needsFixedCatchObjects() const {
18717 return Subtarget.isTargetWin64();
18720 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18721 SDValue Chain = Op.getOperand(0);
18722 SDValue Offset = Op.getOperand(1);
18723 SDValue Handler = Op.getOperand(2);
18726 EVT PtrVT = getPointerTy(DAG.getDataLayout());
18727 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
18728 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18729 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18730 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18731 "Invalid Frame Register!");
18732 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18733 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18735 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18736 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
18738 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18739 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
18740 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18742 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18743 DAG.getRegister(StoreAddrReg, PtrVT));
18746 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18747 SelectionDAG &DAG) const {
18749 // If the subtarget is not 64bit, we may need the global base reg
18750 // after isel expand pseudo, i.e., after CGBR pass ran.
18751 // Therefore, ask for the GlobalBaseReg now, so that the pass
18752 // inserts the code for us in case we need it.
18753 // Otherwise, we will end up in a situation where we will
18754 // reference a virtual register that is not defined!
18755 if (!Subtarget.is64Bit()) {
18756 const X86InstrInfo *TII = Subtarget.getInstrInfo();
18757 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
18759 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18760 DAG.getVTList(MVT::i32, MVT::Other),
18761 Op.getOperand(0), Op.getOperand(1));
18764 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18765 SelectionDAG &DAG) const {
18767 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18768 Op.getOperand(0), Op.getOperand(1));
18771 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
18772 SelectionDAG &DAG) const {
18774 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
18778 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18779 return Op.getOperand(0);
18782 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18783 SelectionDAG &DAG) const {
18784 SDValue Root = Op.getOperand(0);
18785 SDValue Trmp = Op.getOperand(1); // trampoline
18786 SDValue FPtr = Op.getOperand(2); // nested function
18787 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18790 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18791 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
18793 if (Subtarget.is64Bit()) {
18794 SDValue OutChains[6];
18796 // Large code-model.
18797 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18798 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18800 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18801 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18803 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18805 // Load the pointer to the nested function into R11.
18806 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18807 SDValue Addr = Trmp;
18808 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
18809 Addr, MachinePointerInfo(TrmpAddr));
18811 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18812 DAG.getConstant(2, dl, MVT::i64));
18814 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
18815 /* Alignment = */ 2);
18817 // Load the 'nest' parameter value into R10.
18818 // R10 is specified in X86CallingConv.td
18819 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18820 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18821 DAG.getConstant(10, dl, MVT::i64));
18822 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
18823 Addr, MachinePointerInfo(TrmpAddr, 10));
18825 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18826 DAG.getConstant(12, dl, MVT::i64));
18828 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
18829 /* Alignment = */ 2);
18831 // Jump to the nested function.
18832 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18833 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18834 DAG.getConstant(20, dl, MVT::i64));
18835 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
18836 Addr, MachinePointerInfo(TrmpAddr, 20));
18838 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18839 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18840 DAG.getConstant(22, dl, MVT::i64));
18841 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
18842 Addr, MachinePointerInfo(TrmpAddr, 22));
18844 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18846 const Function *Func =
18847 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18848 CallingConv::ID CC = Func->getCallingConv();
18853 llvm_unreachable("Unsupported calling convention");
18854 case CallingConv::C:
18855 case CallingConv::X86_StdCall: {
18856 // Pass 'nest' parameter in ECX.
18857 // Must be kept in sync with X86CallingConv.td
18858 NestReg = X86::ECX;
18860 // Check that ECX wasn't needed by an 'inreg' parameter.
18861 FunctionType *FTy = Func->getFunctionType();
18862 const AttributeSet &Attrs = Func->getAttributes();
18864 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18865 unsigned InRegCount = 0;
18868 for (FunctionType::param_iterator I = FTy->param_begin(),
18869 E = FTy->param_end(); I != E; ++I, ++Idx)
18870 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
18871 auto &DL = DAG.getDataLayout();
18872 // FIXME: should only count parameters that are lowered to integers.
18873 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
18876 if (InRegCount > 2) {
18877 report_fatal_error("Nest register in use - reduce number of inreg"
18883 case CallingConv::X86_FastCall:
18884 case CallingConv::X86_ThisCall:
18885 case CallingConv::Fast:
18886 // Pass 'nest' parameter in EAX.
18887 // Must be kept in sync with X86CallingConv.td
18888 NestReg = X86::EAX;
18892 SDValue OutChains[4];
18893 SDValue Addr, Disp;
18895 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18896 DAG.getConstant(10, dl, MVT::i32));
18897 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18899 // This is storing the opcode for MOV32ri.
18900 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18901 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18903 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
18904 Trmp, MachinePointerInfo(TrmpAddr));
18906 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18907 DAG.getConstant(1, dl, MVT::i32));
18909 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
18910 /* Alignment = */ 1);
18912 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18913 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18914 DAG.getConstant(5, dl, MVT::i32));
18915 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
18916 Addr, MachinePointerInfo(TrmpAddr, 5),
18917 /* Alignment = */ 1);
18919 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18920 DAG.getConstant(6, dl, MVT::i32));
18922 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
18923 /* Alignment = */ 1);
18925 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18929 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18930 SelectionDAG &DAG) const {
18932 The rounding mode is in bits 11:10 of FPSR, and has the following
18934 00 Round to nearest
18939 FLT_ROUNDS, on the other hand, expects the following:
18946 To perform the conversion, we do:
18947 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18950 MachineFunction &MF = DAG.getMachineFunction();
18951 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
18952 unsigned StackAlignment = TFI.getStackAlignment();
18953 MVT VT = Op.getSimpleValueType();
18956 // Save FP Control Word to stack slot
18957 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18958 SDValue StackSlot =
18959 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
18961 MachineMemOperand *MMO =
18962 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
18963 MachineMemOperand::MOStore, 2, 2);
18965 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18966 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18967 DAG.getVTList(MVT::Other),
18968 Ops, MVT::i16, MMO);
18970 // Load FP Control Word from stack slot
18972 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
18974 // Transform as necessary
18976 DAG.getNode(ISD::SRL, DL, MVT::i16,
18977 DAG.getNode(ISD::AND, DL, MVT::i16,
18978 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
18979 DAG.getConstant(11, DL, MVT::i8));
18981 DAG.getNode(ISD::SRL, DL, MVT::i16,
18982 DAG.getNode(ISD::AND, DL, MVT::i16,
18983 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
18984 DAG.getConstant(9, DL, MVT::i8));
18987 DAG.getNode(ISD::AND, DL, MVT::i16,
18988 DAG.getNode(ISD::ADD, DL, MVT::i16,
18989 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18990 DAG.getConstant(1, DL, MVT::i16)),
18991 DAG.getConstant(3, DL, MVT::i16));
18993 return DAG.getNode((VT.getSizeInBits() < 16 ?
18994 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18997 /// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
18999 // 1. i32/i64 128/256-bit vector (native support require VLX) are expended
19000 // to 512-bit vector.
19001 // 2. i8/i16 vector implemented using dword LZCNT vector instruction
19002 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
19003 // split the vector, perform operation on it's Lo a Hi part and
19004 // concatenate the results.
19005 static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
19006 assert(Op.getOpcode() == ISD::CTLZ);
19008 MVT VT = Op.getSimpleValueType();
19009 MVT EltVT = VT.getVectorElementType();
19010 unsigned NumElems = VT.getVectorNumElements();
19012 if (EltVT == MVT::i64 || EltVT == MVT::i32) {
19013 // Extend to 512 bit vector.
19014 assert((VT.is256BitVector() || VT.is128BitVector()) &&
19015 "Unsupported value type for operation");
19017 MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits());
19018 SDValue Vec512 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
19019 DAG.getUNDEF(NewVT),
19021 DAG.getIntPtrConstant(0, dl));
19022 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Vec512);
19024 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode,
19025 DAG.getIntPtrConstant(0, dl));
19028 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
19029 "Unsupported element type");
19031 if (16 < NumElems) {
19032 // Split vector, it's Lo and Hi parts will be handled in next iteration.
19034 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
19035 MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
19037 Lo = DAG.getNode(ISD::CTLZ, dl, OutVT, Lo);
19038 Hi = DAG.getNode(ISD::CTLZ, dl, OutVT, Hi);
19040 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
19043 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
19045 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
19046 "Unsupported value type for operation");
19048 // Use native supported vector instruction vplzcntd.
19049 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
19050 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
19051 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
19052 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
19054 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
19057 // Lower CTLZ using a PSHUFB lookup table implementation.
19058 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
19059 const X86Subtarget &Subtarget,
19060 SelectionDAG &DAG) {
19061 MVT VT = Op.getSimpleValueType();
19062 int NumElts = VT.getVectorNumElements();
19063 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
19064 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
19066 // Per-nibble leading zero PSHUFB lookup table.
19067 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
19068 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
19069 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
19070 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
19072 SmallVector<SDValue, 64> LUTVec;
19073 for (int i = 0; i < NumBytes; ++i)
19074 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
19075 SDValue InRegLUT = DAG.getNode(ISD::BUILD_VECTOR, DL, CurrVT, LUTVec);
19077 // Begin by bitcasting the input to byte vector, then split those bytes
19078 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
19079 // If the hi input nibble is zero then we add both results together, otherwise
19080 // we just take the hi result (by masking the lo result to zero before the
19082 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
19083 SDValue Zero = getZeroVector(CurrVT, Subtarget, DAG, DL);
19085 SDValue NibbleMask = DAG.getConstant(0xF, DL, CurrVT);
19086 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
19087 SDValue Lo = DAG.getNode(ISD::AND, DL, CurrVT, Op0, NibbleMask);
19088 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
19089 SDValue HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
19091 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
19092 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
19093 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
19094 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
19096 // Merge result back from vXi8 back to VT, working on the lo/hi halves
19097 // of the current vector width in the same way we did for the nibbles.
19098 // If the upper half of the input element is zero then add the halves'
19099 // leading zero counts together, otherwise just use the upper half's.
19100 // Double the width of the result until we are at target width.
19101 while (CurrVT != VT) {
19102 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
19103 int CurrNumElts = CurrVT.getVectorNumElements();
19104 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
19105 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
19106 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
19108 // Check if the upper half of the input element is zero.
19109 SDValue HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
19110 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
19111 HiZ = DAG.getBitcast(NextVT, HiZ);
19113 // Move the upper/lower halves to the lower bits as we'll be extending to
19114 // NextVT. Mask the lower result to zero if HiZ is true and add the results
19116 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
19117 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
19118 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
19119 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
19120 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
19127 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
19128 const X86Subtarget &Subtarget,
19129 SelectionDAG &DAG) {
19130 MVT VT = Op.getSimpleValueType();
19131 SDValue Op0 = Op.getOperand(0);
19133 if (Subtarget.hasAVX512())
19134 return LowerVectorCTLZ_AVX512(Op, DAG);
19136 // Decompose 256-bit ops into smaller 128-bit ops.
19137 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
19138 unsigned NumElems = VT.getVectorNumElements();
19140 // Extract each 128-bit vector, perform ctlz and concat the result.
19141 SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
19142 SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
19144 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
19145 DAG.getNode(ISD::CTLZ, DL, LHS.getValueType(), LHS),
19146 DAG.getNode(ISD::CTLZ, DL, RHS.getValueType(), RHS));
19149 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
19150 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
19153 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
19154 SelectionDAG &DAG) {
19155 MVT VT = Op.getSimpleValueType();
19157 unsigned NumBits = VT.getSizeInBits();
19159 unsigned Opc = Op.getOpcode();
19162 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
19164 Op = Op.getOperand(0);
19165 if (VT == MVT::i8) {
19166 // Zero extend to i32 since there is not an i8 bsr.
19168 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
19171 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
19172 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
19173 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
19175 if (Opc == ISD::CTLZ) {
19176 // If src is zero (i.e. bsr sets ZF), returns NumBits.
19179 DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
19180 DAG.getConstant(X86::COND_E, dl, MVT::i8),
19183 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
19186 // Finally xor with NumBits-1.
19187 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
19188 DAG.getConstant(NumBits - 1, dl, OpVT));
19191 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
19195 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
19196 MVT VT = Op.getSimpleValueType();
19197 unsigned NumBits = VT.getScalarSizeInBits();
19200 if (VT.isVector()) {
19201 SDValue N0 = Op.getOperand(0);
19202 SDValue Zero = DAG.getConstant(0, dl, VT);
19204 // lsb(x) = (x & -x)
19205 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, N0,
19206 DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
19208 // cttz_undef(x) = (width - 1) - ctlz(lsb)
19209 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
19210 SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
19211 return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
19212 DAG.getNode(ISD::CTLZ, dl, VT, LSB));
19215 // cttz(x) = ctpop(lsb - 1)
19216 SDValue One = DAG.getConstant(1, dl, VT);
19217 return DAG.getNode(ISD::CTPOP, dl, VT,
19218 DAG.getNode(ISD::SUB, dl, VT, LSB, One));
19221 assert(Op.getOpcode() == ISD::CTTZ &&
19222 "Only scalar CTTZ requires custom lowering");
19224 // Issue a bsf (scan bits forward) which also sets EFLAGS.
19225 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19226 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op.getOperand(0));
19228 // If src is zero (i.e. bsf sets ZF), returns NumBits.
19231 DAG.getConstant(NumBits, dl, VT),
19232 DAG.getConstant(X86::COND_E, dl, MVT::i8),
19235 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
19238 /// Break a 256-bit integer operation into two new 128-bit ones and then
19239 /// concatenate the result back.
19240 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
19241 MVT VT = Op.getSimpleValueType();
19243 assert(VT.is256BitVector() && VT.isInteger() &&
19244 "Unsupported value type for operation");
19246 unsigned NumElems = VT.getVectorNumElements();
19249 // Extract the LHS vectors
19250 SDValue LHS = Op.getOperand(0);
19251 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
19252 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
19254 // Extract the RHS vectors
19255 SDValue RHS = Op.getOperand(1);
19256 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
19257 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
19259 MVT EltVT = VT.getVectorElementType();
19260 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19262 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
19263 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
19264 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
19267 /// Break a 512-bit integer operation into two new 256-bit ones and then
19268 /// concatenate the result back.
19269 static SDValue Lower512IntArith(SDValue Op, SelectionDAG &DAG) {
19270 MVT VT = Op.getSimpleValueType();
19272 assert(VT.is512BitVector() && VT.isInteger() &&
19273 "Unsupported value type for operation");
19275 unsigned NumElems = VT.getVectorNumElements();
19278 // Extract the LHS vectors
19279 SDValue LHS = Op.getOperand(0);
19280 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
19281 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
19283 // Extract the RHS vectors
19284 SDValue RHS = Op.getOperand(1);
19285 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
19286 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
19288 MVT EltVT = VT.getVectorElementType();
19289 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19291 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
19292 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
19293 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
19296 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
19297 if (Op.getValueType() == MVT::i1)
19298 return DAG.getNode(ISD::XOR, SDLoc(Op), Op.getValueType(),
19299 Op.getOperand(0), Op.getOperand(1));
19300 assert(Op.getSimpleValueType().is256BitVector() &&
19301 Op.getSimpleValueType().isInteger() &&
19302 "Only handle AVX 256-bit vector integer operation");
19303 return Lower256IntArith(Op, DAG);
19306 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
19307 if (Op.getValueType() == MVT::i1)
19308 return DAG.getNode(ISD::XOR, SDLoc(Op), Op.getValueType(),
19309 Op.getOperand(0), Op.getOperand(1));
19310 assert(Op.getSimpleValueType().is256BitVector() &&
19311 Op.getSimpleValueType().isInteger() &&
19312 "Only handle AVX 256-bit vector integer operation");
19313 return Lower256IntArith(Op, DAG);
19316 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
19317 assert(Op.getSimpleValueType().is256BitVector() &&
19318 Op.getSimpleValueType().isInteger() &&
19319 "Only handle AVX 256-bit vector integer operation");
19320 return Lower256IntArith(Op, DAG);
19323 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
19324 SelectionDAG &DAG) {
19326 MVT VT = Op.getSimpleValueType();
19329 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
19331 // Decompose 256-bit ops into smaller 128-bit ops.
19332 if (VT.is256BitVector() && !Subtarget.hasInt256())
19333 return Lower256IntArith(Op, DAG);
19335 SDValue A = Op.getOperand(0);
19336 SDValue B = Op.getOperand(1);
19338 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
19339 // vector pairs, multiply and truncate.
19340 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
19341 if (Subtarget.hasInt256()) {
19342 // For 512-bit vectors, split into 256-bit vectors to allow the
19343 // sign-extension to occur.
19344 if (VT == MVT::v64i8)
19345 return Lower512IntArith(Op, DAG);
19347 // For 256-bit vectors, split into 128-bit vectors to allow the
19348 // sign-extension to occur. We don't need this on AVX512BW as we can
19349 // safely sign-extend to v32i16.
19350 if (VT == MVT::v32i8 && !Subtarget.hasBWI())
19351 return Lower256IntArith(Op, DAG);
19353 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
19354 return DAG.getNode(
19355 ISD::TRUNCATE, dl, VT,
19356 DAG.getNode(ISD::MUL, dl, ExVT,
19357 DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, A),
19358 DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, B)));
19361 assert(VT == MVT::v16i8 &&
19362 "Pre-AVX2 support only supports v16i8 multiplication");
19363 MVT ExVT = MVT::v8i16;
19365 // Extract the lo parts and sign extend to i16
19367 if (Subtarget.hasSSE41()) {
19368 ALo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, A);
19369 BLo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, B);
19371 const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
19372 -1, 4, -1, 5, -1, 6, -1, 7};
19373 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
19374 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
19375 ALo = DAG.getBitcast(ExVT, ALo);
19376 BLo = DAG.getBitcast(ExVT, BLo);
19377 ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
19378 BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
19381 // Extract the hi parts and sign extend to i16
19383 if (Subtarget.hasSSE41()) {
19384 const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
19385 -1, -1, -1, -1, -1, -1, -1, -1};
19386 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
19387 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
19388 AHi = DAG.getNode(X86ISD::VSEXT, dl, ExVT, AHi);
19389 BHi = DAG.getNode(X86ISD::VSEXT, dl, ExVT, BHi);
19391 const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
19392 -1, 12, -1, 13, -1, 14, -1, 15};
19393 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
19394 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
19395 AHi = DAG.getBitcast(ExVT, AHi);
19396 BHi = DAG.getBitcast(ExVT, BHi);
19397 AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
19398 BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
19401 // Multiply, mask the lower 8bits of the lo/hi results and pack
19402 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
19403 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
19404 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
19405 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
19406 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
19409 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
19410 if (VT == MVT::v4i32) {
19411 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
19412 "Should not custom lower when pmuldq is available!");
19414 // Extract the odd parts.
19415 static const int UnpackMask[] = { 1, -1, 3, -1 };
19416 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
19417 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
19419 // Multiply the even parts.
19420 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
19421 // Now multiply odd parts.
19422 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
19424 Evens = DAG.getBitcast(VT, Evens);
19425 Odds = DAG.getBitcast(VT, Odds);
19427 // Merge the two vectors back together with a shuffle. This expands into 2
19429 static const int ShufMask[] = { 0, 4, 2, 6 };
19430 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
19433 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
19434 "Only know how to lower V2I64/V4I64/V8I64 multiply");
19436 // Ahi = psrlqi(a, 32);
19437 // Bhi = psrlqi(b, 32);
19439 // AloBlo = pmuludq(a, b);
19440 // AloBhi = pmuludq(a, Bhi);
19441 // AhiBlo = pmuludq(Ahi, b);
19443 // AloBhi = psllqi(AloBhi, 32);
19444 // AhiBlo = psllqi(AhiBlo, 32);
19445 // return AloBlo + AloBhi + AhiBlo;
19447 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
19448 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
19450 SDValue AhiBlo = Ahi;
19451 SDValue AloBhi = Bhi;
19452 // Bit cast to 32-bit vectors for MULUDQ
19453 MVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
19454 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
19455 A = DAG.getBitcast(MulVT, A);
19456 B = DAG.getBitcast(MulVT, B);
19457 Ahi = DAG.getBitcast(MulVT, Ahi);
19458 Bhi = DAG.getBitcast(MulVT, Bhi);
19460 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
19461 // After shifting right const values the result may be all-zero.
19462 if (!ISD::isBuildVectorAllZeros(Ahi.getNode())) {
19463 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
19464 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
19466 if (!ISD::isBuildVectorAllZeros(Bhi.getNode())) {
19467 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
19468 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
19471 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
19472 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
19475 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
19476 SelectionDAG &DAG) {
19478 MVT VT = Op.getSimpleValueType();
19480 // Decompose 256-bit ops into smaller 128-bit ops.
19481 if (VT.is256BitVector() && !Subtarget.hasInt256())
19482 return Lower256IntArith(Op, DAG);
19484 // Only i8 vectors should need custom lowering after this.
19485 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
19486 "Unsupported vector type");
19488 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
19489 // logical shift down the upper half and pack back to i8.
19490 SDValue A = Op.getOperand(0);
19491 SDValue B = Op.getOperand(1);
19493 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
19494 // and then ashr/lshr the upper bits down to the lower bits before multiply.
19495 unsigned Opcode = Op.getOpcode();
19496 unsigned ExShift = (ISD::MULHU == Opcode ? ISD::SRL : ISD::SRA);
19497 unsigned ExSSE41 = (ISD::MULHU == Opcode ? X86ISD::VZEXT : X86ISD::VSEXT);
19499 // AVX2 implementations - extend xmm subvectors to ymm.
19500 if (Subtarget.hasInt256()) {
19501 SDValue Lo = DAG.getIntPtrConstant(0, dl);
19502 SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl);
19504 if (VT == MVT::v32i8) {
19505 SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo);
19506 SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo);
19507 SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi);
19508 SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Hi);
19509 ALo = DAG.getNode(ExSSE41, dl, MVT::v16i16, ALo);
19510 BLo = DAG.getNode(ExSSE41, dl, MVT::v16i16, BLo);
19511 AHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, AHi);
19512 BHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, BHi);
19513 Lo = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
19514 DAG.getNode(ISD::MUL, dl, MVT::v16i16, ALo, BLo),
19515 DAG.getConstant(8, dl, MVT::v16i16));
19516 Hi = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
19517 DAG.getNode(ISD::MUL, dl, MVT::v16i16, AHi, BHi),
19518 DAG.getConstant(8, dl, MVT::v16i16));
19519 // The ymm variant of PACKUS treats the 128-bit lanes separately, so before
19520 // using PACKUS we need to permute the inputs to the correct lo/hi xmm lane.
19521 const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7,
19522 16, 17, 18, 19, 20, 21, 22, 23};
19523 const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
19524 24, 25, 26, 27, 28, 29, 30, 31};
19525 return DAG.getNode(X86ISD::PACKUS, dl, VT,
19526 DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, LoMask),
19527 DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, HiMask));
19530 SDValue ExA = DAG.getNode(ExSSE41, dl, MVT::v16i16, A);
19531 SDValue ExB = DAG.getNode(ExSSE41, dl, MVT::v16i16, B);
19532 SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB);
19533 SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
19534 DAG.getConstant(8, dl, MVT::v16i16));
19535 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo);
19536 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi);
19537 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
19540 assert(VT == MVT::v16i8 &&
19541 "Pre-AVX2 support only supports v16i8 multiplication");
19542 MVT ExVT = MVT::v8i16;
19544 // Extract the lo parts and zero/sign extend to i16.
19546 if (Subtarget.hasSSE41()) {
19547 ALo = DAG.getNode(ExSSE41, dl, ExVT, A);
19548 BLo = DAG.getNode(ExSSE41, dl, ExVT, B);
19550 const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
19551 -1, 4, -1, 5, -1, 6, -1, 7};
19552 ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
19553 BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
19554 ALo = DAG.getBitcast(ExVT, ALo);
19555 BLo = DAG.getBitcast(ExVT, BLo);
19556 ALo = DAG.getNode(ExShift, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
19557 BLo = DAG.getNode(ExShift, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
19560 // Extract the hi parts and zero/sign extend to i16.
19562 if (Subtarget.hasSSE41()) {
19563 const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
19564 -1, -1, -1, -1, -1, -1, -1, -1};
19565 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
19566 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
19567 AHi = DAG.getNode(ExSSE41, dl, ExVT, AHi);
19568 BHi = DAG.getNode(ExSSE41, dl, ExVT, BHi);
19570 const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
19571 -1, 12, -1, 13, -1, 14, -1, 15};
19572 AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
19573 BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
19574 AHi = DAG.getBitcast(ExVT, AHi);
19575 BHi = DAG.getBitcast(ExVT, BHi);
19576 AHi = DAG.getNode(ExShift, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
19577 BHi = DAG.getNode(ExShift, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
19580 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
19581 // pack back to v16i8.
19582 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
19583 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
19584 RLo = DAG.getNode(ISD::SRL, dl, ExVT, RLo, DAG.getConstant(8, dl, ExVT));
19585 RHi = DAG.getNode(ISD::SRL, dl, ExVT, RHi, DAG.getConstant(8, dl, ExVT));
19586 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
19589 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
19590 assert(Subtarget.isTargetWin64() && "Unexpected target");
19591 EVT VT = Op.getValueType();
19592 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
19593 "Unexpected return type for lowering");
19597 switch (Op->getOpcode()) {
19598 default: llvm_unreachable("Unexpected request for libcall!");
19599 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
19600 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
19601 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
19602 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
19603 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
19604 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
19608 SDValue InChain = DAG.getEntryNode();
19610 TargetLowering::ArgListTy Args;
19611 TargetLowering::ArgListEntry Entry;
19612 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
19613 EVT ArgVT = Op->getOperand(i).getValueType();
19614 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
19615 "Unexpected argument type for lowering");
19616 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
19617 Entry.Node = StackPtr;
19618 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
19619 MachinePointerInfo(), /* Alignment = */ 16);
19620 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19621 Entry.Ty = PointerType::get(ArgTy,0);
19622 Entry.isSExt = false;
19623 Entry.isZExt = false;
19624 Args.push_back(Entry);
19627 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
19628 getPointerTy(DAG.getDataLayout()));
19630 TargetLowering::CallLoweringInfo CLI(DAG);
19631 CLI.setDebugLoc(dl).setChain(InChain)
19632 .setCallee(getLibcallCallingConv(LC),
19633 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
19634 Callee, std::move(Args))
19635 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
19637 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
19638 return DAG.getBitcast(VT, CallInfo.first);
19641 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget &Subtarget,
19642 SelectionDAG &DAG) {
19643 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
19644 MVT VT = Op0.getSimpleValueType();
19647 // Decompose 256-bit ops into smaller 128-bit ops.
19648 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
19649 unsigned Opcode = Op.getOpcode();
19650 unsigned NumElems = VT.getVectorNumElements();
19651 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
19652 SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
19653 SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
19654 SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
19655 SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
19656 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
19657 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
19659 DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
19660 DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
19662 return DAG.getMergeValues(Ops, dl);
19665 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
19666 (VT == MVT::v8i32 && Subtarget.hasInt256()));
19668 // PMULxD operations multiply each even value (starting at 0) of LHS with
19669 // the related value of RHS and produce a widen result.
19670 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
19671 // => <2 x i64> <ae|cg>
19673 // In other word, to have all the results, we need to perform two PMULxD:
19674 // 1. one with the even values.
19675 // 2. one with the odd values.
19676 // To achieve #2, with need to place the odd values at an even position.
19678 // Place the odd value at an even position (basically, shift all values 1
19679 // step to the left):
19680 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
19681 // <a|b|c|d> => <b|undef|d|undef>
19682 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0,
19683 makeArrayRef(&Mask[0], VT.getVectorNumElements()));
19684 // <e|f|g|h> => <f|undef|h|undef>
19685 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1,
19686 makeArrayRef(&Mask[0], VT.getVectorNumElements()));
19688 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
19690 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
19691 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
19693 (!IsSigned || !Subtarget.hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
19694 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
19695 // => <2 x i64> <ae|cg>
19696 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
19697 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
19698 // => <2 x i64> <bf|dh>
19699 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
19701 // Shuffle it back into the right order.
19702 SDValue Highs, Lows;
19703 if (VT == MVT::v8i32) {
19704 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
19705 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
19706 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
19707 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
19709 const int HighMask[] = {1, 5, 3, 7};
19710 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
19711 const int LowMask[] = {0, 4, 2, 6};
19712 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
19715 // If we have a signed multiply but no PMULDQ fix up the high parts of a
19716 // unsigned multiply.
19717 if (IsSigned && !Subtarget.hasSSE41()) {
19718 SDValue ShAmt = DAG.getConstant(
19720 DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout()));
19721 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
19722 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
19723 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
19724 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
19726 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
19727 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
19730 // The first result of MUL_LOHI is actually the low value, followed by the
19732 SDValue Ops[] = {Lows, Highs};
19733 return DAG.getMergeValues(Ops, dl);
19736 // Return true if the required (according to Opcode) shift-imm form is natively
19737 // supported by the Subtarget
19738 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
19740 if (VT.getScalarSizeInBits() < 16)
19743 if (VT.is512BitVector() &&
19744 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
19747 bool LShift = VT.is128BitVector() ||
19748 (VT.is256BitVector() && Subtarget.hasInt256());
19750 bool AShift = LShift && (Subtarget.hasVLX() ||
19751 (VT != MVT::v2i64 && VT != MVT::v4i64));
19752 return (Opcode == ISD::SRA) ? AShift : LShift;
19755 // The shift amount is a variable, but it is the same for all vector lanes.
19756 // These instructions are defined together with shift-immediate.
19758 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
19760 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
19763 // Return true if the required (according to Opcode) variable-shift form is
19764 // natively supported by the Subtarget
19765 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
19768 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
19771 // vXi16 supported only on AVX-512, BWI
19772 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
19775 if (VT.is512BitVector() || Subtarget.hasVLX())
19778 bool LShift = VT.is128BitVector() || VT.is256BitVector();
19779 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
19780 return (Opcode == ISD::SRA) ? AShift : LShift;
19783 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
19784 const X86Subtarget &Subtarget) {
19785 MVT VT = Op.getSimpleValueType();
19787 SDValue R = Op.getOperand(0);
19788 SDValue Amt = Op.getOperand(1);
19790 unsigned X86Opc = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
19791 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
19793 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
19794 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
19795 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
19796 SDValue Ex = DAG.getBitcast(ExVT, R);
19798 if (ShiftAmt >= 32) {
19799 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
19801 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
19802 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
19803 ShiftAmt - 32, DAG);
19804 if (VT == MVT::v2i64)
19805 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
19806 if (VT == MVT::v4i64)
19807 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
19808 {9, 1, 11, 3, 13, 5, 15, 7});
19810 // SRA upper i32, SHL whole i64 and select lower i32.
19811 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
19814 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
19815 Lower = DAG.getBitcast(ExVT, Lower);
19816 if (VT == MVT::v2i64)
19817 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
19818 if (VT == MVT::v4i64)
19819 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
19820 {8, 1, 10, 3, 12, 5, 14, 7});
19822 return DAG.getBitcast(VT, Ex);
19825 // Optimize shl/srl/sra with constant shift amount.
19826 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
19827 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
19828 uint64_t ShiftAmt = ShiftConst->getZExtValue();
19830 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
19831 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
19833 // i64 SRA needs to be performed as partial shifts.
19834 if ((VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
19835 Op.getOpcode() == ISD::SRA && !Subtarget.hasXOP())
19836 return ArithmeticShiftRight64(ShiftAmt);
19838 if (VT == MVT::v16i8 ||
19839 (Subtarget.hasInt256() && VT == MVT::v32i8) ||
19840 VT == MVT::v64i8) {
19841 unsigned NumElts = VT.getVectorNumElements();
19842 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
19844 // Simple i8 add case
19845 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
19846 return DAG.getNode(ISD::ADD, dl, VT, R, R);
19848 // ashr(R, 7) === cmp_slt(R, 0)
19849 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
19850 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
19851 if (VT.is512BitVector()) {
19852 assert(VT == MVT::v64i8 && "Unexpected element type!");
19853 SDValue CMP = DAG.getNode(X86ISD::PCMPGTM, dl, MVT::v64i1, Zeros, R);
19854 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
19856 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
19859 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
19860 if (VT == MVT::v16i8 && Subtarget.hasXOP())
19863 if (Op.getOpcode() == ISD::SHL) {
19864 // Make a large shift.
19865 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT,
19867 SHL = DAG.getBitcast(VT, SHL);
19868 // Zero out the rightmost bits.
19869 return DAG.getNode(ISD::AND, dl, VT, SHL,
19870 DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
19872 if (Op.getOpcode() == ISD::SRL) {
19873 // Make a large shift.
19874 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT,
19876 SRL = DAG.getBitcast(VT, SRL);
19877 // Zero out the leftmost bits.
19878 return DAG.getNode(ISD::AND, dl, VT, SRL,
19879 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
19881 if (Op.getOpcode() == ISD::SRA) {
19882 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
19883 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
19885 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
19886 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
19887 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
19890 llvm_unreachable("Unknown shift opcode.");
19895 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19896 if (!Subtarget.is64Bit() && !Subtarget.hasXOP() &&
19897 (VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64))) {
19899 // Peek through any splat that was introduced for i64 shift vectorization.
19900 int SplatIndex = -1;
19901 if (ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt.getNode()))
19902 if (SVN->isSplat()) {
19903 SplatIndex = SVN->getSplatIndex();
19904 Amt = Amt.getOperand(0);
19905 assert(SplatIndex < (int)VT.getVectorNumElements() &&
19906 "Splat shuffle referencing second operand");
19909 if (Amt.getOpcode() != ISD::BITCAST ||
19910 Amt.getOperand(0).getOpcode() != ISD::BUILD_VECTOR)
19913 Amt = Amt.getOperand(0);
19914 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19915 VT.getVectorNumElements();
19916 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
19917 uint64_t ShiftAmt = 0;
19918 unsigned BaseOp = (SplatIndex < 0 ? 0 : SplatIndex * Ratio);
19919 for (unsigned i = 0; i != Ratio; ++i) {
19920 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + BaseOp));
19924 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
19927 // Check remaining shift amounts (if not a splat).
19928 if (SplatIndex < 0) {
19929 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19930 uint64_t ShAmt = 0;
19931 for (unsigned j = 0; j != Ratio; ++j) {
19932 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
19936 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
19938 if (ShAmt != ShiftAmt)
19943 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
19944 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
19946 if (Op.getOpcode() == ISD::SRA)
19947 return ArithmeticShiftRight64(ShiftAmt);
19953 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
19954 const X86Subtarget &Subtarget) {
19955 MVT VT = Op.getSimpleValueType();
19957 SDValue R = Op.getOperand(0);
19958 SDValue Amt = Op.getOperand(1);
19960 unsigned X86OpcI = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
19961 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
19963 unsigned X86OpcV = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHL :
19964 (Op.getOpcode() == ISD::SRL) ? X86ISD::VSRL : X86ISD::VSRA;
19966 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) {
19968 MVT EltVT = VT.getVectorElementType();
19970 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
19971 // Check if this build_vector node is doing a splat.
19972 // If so, then set BaseShAmt equal to the splat value.
19973 BaseShAmt = BV->getSplatValue();
19974 if (BaseShAmt && BaseShAmt.isUndef())
19975 BaseShAmt = SDValue();
19977 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
19978 Amt = Amt.getOperand(0);
19980 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
19981 if (SVN && SVN->isSplat()) {
19982 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
19983 SDValue InVec = Amt.getOperand(0);
19984 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
19985 assert((SplatIdx < InVec.getSimpleValueType().getVectorNumElements()) &&
19986 "Unexpected shuffle index found!");
19987 BaseShAmt = InVec.getOperand(SplatIdx);
19988 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
19989 if (ConstantSDNode *C =
19990 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
19991 if (C->getZExtValue() == SplatIdx)
19992 BaseShAmt = InVec.getOperand(1);
19997 // Avoid introducing an extract element from a shuffle.
19998 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
19999 DAG.getIntPtrConstant(SplatIdx, dl));
20003 if (BaseShAmt.getNode()) {
20004 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
20005 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
20006 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
20007 else if (EltVT.bitsLT(MVT::i32))
20008 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
20010 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, DAG);
20014 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
20015 if (!Subtarget.is64Bit() && VT == MVT::v2i64 &&
20016 Amt.getOpcode() == ISD::BITCAST &&
20017 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
20018 Amt = Amt.getOperand(0);
20019 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
20020 VT.getVectorNumElements();
20021 std::vector<SDValue> Vals(Ratio);
20022 for (unsigned i = 0; i != Ratio; ++i)
20023 Vals[i] = Amt.getOperand(i);
20024 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
20025 for (unsigned j = 0; j != Ratio; ++j)
20026 if (Vals[j] != Amt.getOperand(i + j))
20030 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
20031 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
20036 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
20037 SelectionDAG &DAG) {
20038 MVT VT = Op.getSimpleValueType();
20040 SDValue R = Op.getOperand(0);
20041 SDValue Amt = Op.getOperand(1);
20042 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
20044 assert(VT.isVector() && "Custom lowering only for vector shifts!");
20045 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
20047 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
20050 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
20053 if (SupportedVectorVarShift(VT, Subtarget, Op.getOpcode()))
20056 // XOP has 128-bit variable logical/arithmetic shifts.
20057 // +ve/-ve Amt = shift left/right.
20058 if (Subtarget.hasXOP() &&
20059 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
20060 VT == MVT::v8i16 || VT == MVT::v16i8)) {
20061 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
20062 SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl);
20063 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
20065 if (Op.getOpcode() == ISD::SHL || Op.getOpcode() == ISD::SRL)
20066 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
20067 if (Op.getOpcode() == ISD::SRA)
20068 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
20071 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
20072 // shifts per-lane and then shuffle the partial results back together.
20073 if (VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) {
20074 // Splat the shift amounts so the scalar shifts above will catch it.
20075 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
20076 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
20077 SDValue R0 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt0);
20078 SDValue R1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt1);
20079 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
20082 // i64 vector arithmetic shift can be emulated with the transform:
20083 // M = lshr(SIGN_BIT, Amt)
20084 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
20085 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
20086 Op.getOpcode() == ISD::SRA) {
20087 SDValue S = DAG.getConstant(APInt::getSignBit(64), dl, VT);
20088 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
20089 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
20090 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
20091 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
20095 // If possible, lower this packed shift into a vector multiply instead of
20096 // expanding it into a sequence of scalar shifts.
20097 // Do this only if the vector shift count is a constant build_vector.
20098 if (ConstantAmt && Op.getOpcode() == ISD::SHL &&
20099 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
20100 (Subtarget.hasInt256() && VT == MVT::v16i16))) {
20101 SmallVector<SDValue, 8> Elts;
20102 MVT SVT = VT.getVectorElementType();
20103 unsigned SVTBits = SVT.getSizeInBits();
20104 APInt One(SVTBits, 1);
20105 unsigned NumElems = VT.getVectorNumElements();
20107 for (unsigned i=0; i !=NumElems; ++i) {
20108 SDValue Op = Amt->getOperand(i);
20109 if (Op->isUndef()) {
20110 Elts.push_back(Op);
20114 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
20115 APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
20116 uint64_t ShAmt = C.getZExtValue();
20117 if (ShAmt >= SVTBits) {
20118 Elts.push_back(DAG.getUNDEF(SVT));
20121 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
20123 SDValue BV = DAG.getBuildVector(VT, dl, Elts);
20124 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
20127 // Lower SHL with variable shift amount.
20128 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
20129 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
20131 Op = DAG.getNode(ISD::ADD, dl, VT, Op,
20132 DAG.getConstant(0x3f800000U, dl, VT));
20133 Op = DAG.getBitcast(MVT::v4f32, Op);
20134 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
20135 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
20138 // If possible, lower this shift as a sequence of two shifts by
20139 // constant plus a MOVSS/MOVSD instead of scalarizing it.
20141 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
20143 // Could be rewritten as:
20144 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
20146 // The advantage is that the two shifts from the example would be
20147 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
20148 // the vector shift into four scalar shifts plus four pairs of vector
20150 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32)) {
20151 unsigned TargetOpcode = X86ISD::MOVSS;
20152 bool CanBeSimplified;
20153 // The splat value for the first packed shift (the 'X' from the example).
20154 SDValue Amt1 = Amt->getOperand(0);
20155 // The splat value for the second packed shift (the 'Y' from the example).
20156 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) : Amt->getOperand(2);
20158 // See if it is possible to replace this node with a sequence of
20159 // two shifts followed by a MOVSS/MOVSD
20160 if (VT == MVT::v4i32) {
20161 // Check if it is legal to use a MOVSS.
20162 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
20163 Amt2 == Amt->getOperand(3);
20164 if (!CanBeSimplified) {
20165 // Otherwise, check if we can still simplify this node using a MOVSD.
20166 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
20167 Amt->getOperand(2) == Amt->getOperand(3);
20168 TargetOpcode = X86ISD::MOVSD;
20169 Amt2 = Amt->getOperand(2);
20172 // Do similar checks for the case where the machine value type
20174 CanBeSimplified = Amt1 == Amt->getOperand(1);
20175 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
20176 CanBeSimplified = Amt2 == Amt->getOperand(i);
20178 if (!CanBeSimplified) {
20179 TargetOpcode = X86ISD::MOVSD;
20180 CanBeSimplified = true;
20181 Amt2 = Amt->getOperand(4);
20182 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
20183 CanBeSimplified = Amt1 == Amt->getOperand(i);
20184 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
20185 CanBeSimplified = Amt2 == Amt->getOperand(j);
20189 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
20190 isa<ConstantSDNode>(Amt2)) {
20191 // Replace this node with two shifts followed by a MOVSS/MOVSD.
20192 MVT CastVT = MVT::v4i32;
20194 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT);
20195 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
20197 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT);
20198 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
20199 if (TargetOpcode == X86ISD::MOVSD)
20200 CastVT = MVT::v2i64;
20201 SDValue BitCast1 = DAG.getBitcast(CastVT, Shift1);
20202 SDValue BitCast2 = DAG.getBitcast(CastVT, Shift2);
20203 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
20205 return DAG.getBitcast(VT, Result);
20209 // v4i32 Non Uniform Shifts.
20210 // If the shift amount is constant we can shift each lane using the SSE2
20211 // immediate shifts, else we need to zero-extend each lane to the lower i64
20212 // and shift using the SSE2 variable shifts.
20213 // The separate results can then be blended together.
20214 if (VT == MVT::v4i32) {
20215 unsigned Opc = Op.getOpcode();
20216 SDValue Amt0, Amt1, Amt2, Amt3;
20218 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
20219 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
20220 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
20221 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
20223 // ISD::SHL is handled above but we include it here for completeness.
20226 llvm_unreachable("Unknown target vector shift node");
20228 Opc = X86ISD::VSHL;
20231 Opc = X86ISD::VSRL;
20234 Opc = X86ISD::VSRA;
20237 // The SSE2 shifts use the lower i64 as the same shift amount for
20238 // all lanes and the upper i64 is ignored. These shuffle masks
20239 // optimally zero-extend each lanes on SSE2/SSE41/AVX targets.
20240 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
20241 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
20242 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
20243 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
20244 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
20247 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
20248 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
20249 SDValue R2 = DAG.getNode(Opc, dl, VT, R, Amt2);
20250 SDValue R3 = DAG.getNode(Opc, dl, VT, R, Amt3);
20251 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
20252 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
20253 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
20256 if (VT == MVT::v16i8 ||
20257 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP())) {
20258 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
20259 unsigned ShiftOpcode = Op->getOpcode();
20261 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
20262 // On SSE41 targets we make use of the fact that VSELECT lowers
20263 // to PBLENDVB which selects bytes based just on the sign bit.
20264 if (Subtarget.hasSSE41()) {
20265 V0 = DAG.getBitcast(VT, V0);
20266 V1 = DAG.getBitcast(VT, V1);
20267 Sel = DAG.getBitcast(VT, Sel);
20268 return DAG.getBitcast(SelVT,
20269 DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1));
20271 // On pre-SSE41 targets we test for the sign bit by comparing to
20272 // zero - a negative value will set all bits of the lanes to true
20273 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
20274 SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
20275 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
20276 return DAG.getNode(ISD::VSELECT, dl, SelVT, C, V0, V1);
20279 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
20280 // We can safely do this using i16 shifts as we're only interested in
20281 // the 3 lower bits of each byte.
20282 Amt = DAG.getBitcast(ExtVT, Amt);
20283 Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
20284 Amt = DAG.getBitcast(VT, Amt);
20286 if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
20287 // r = VSELECT(r, shift(r, 4), a);
20289 DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
20290 R = SignBitSelect(VT, Amt, M, R);
20293 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
20295 // r = VSELECT(r, shift(r, 2), a);
20296 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
20297 R = SignBitSelect(VT, Amt, M, R);
20300 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
20302 // return VSELECT(r, shift(r, 1), a);
20303 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
20304 R = SignBitSelect(VT, Amt, M, R);
20308 if (Op->getOpcode() == ISD::SRA) {
20309 // For SRA we need to unpack each byte to the higher byte of a i16 vector
20310 // so we can correctly sign extend. We don't care what happens to the
20312 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
20313 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
20314 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
20315 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
20316 ALo = DAG.getBitcast(ExtVT, ALo);
20317 AHi = DAG.getBitcast(ExtVT, AHi);
20318 RLo = DAG.getBitcast(ExtVT, RLo);
20319 RHi = DAG.getBitcast(ExtVT, RHi);
20321 // r = VSELECT(r, shift(r, 4), a);
20322 SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
20323 DAG.getConstant(4, dl, ExtVT));
20324 SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
20325 DAG.getConstant(4, dl, ExtVT));
20326 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
20327 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
20330 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
20331 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
20333 // r = VSELECT(r, shift(r, 2), a);
20334 MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
20335 DAG.getConstant(2, dl, ExtVT));
20336 MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
20337 DAG.getConstant(2, dl, ExtVT));
20338 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
20339 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
20342 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
20343 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
20345 // r = VSELECT(r, shift(r, 1), a);
20346 MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
20347 DAG.getConstant(1, dl, ExtVT));
20348 MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
20349 DAG.getConstant(1, dl, ExtVT));
20350 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
20351 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
20353 // Logical shift the result back to the lower byte, leaving a zero upper
20355 // meaning that we can safely pack with PACKUSWB.
20357 DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
20359 DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
20360 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
20364 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
20365 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
20366 // solution better.
20367 if (Subtarget.hasInt256() && VT == MVT::v8i16) {
20368 MVT ExtVT = MVT::v8i32;
20370 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
20371 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
20372 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, ExtVT, Amt);
20373 return DAG.getNode(ISD::TRUNCATE, dl, VT,
20374 DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
20377 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
20378 MVT ExtVT = MVT::v8i32;
20379 SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
20380 SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
20381 SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
20382 SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R);
20383 SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R);
20384 ALo = DAG.getBitcast(ExtVT, ALo);
20385 AHi = DAG.getBitcast(ExtVT, AHi);
20386 RLo = DAG.getBitcast(ExtVT, RLo);
20387 RHi = DAG.getBitcast(ExtVT, RHi);
20388 SDValue Lo = DAG.getNode(Op.getOpcode(), dl, ExtVT, RLo, ALo);
20389 SDValue Hi = DAG.getNode(Op.getOpcode(), dl, ExtVT, RHi, AHi);
20390 Lo = DAG.getNode(ISD::SRL, dl, ExtVT, Lo, DAG.getConstant(16, dl, ExtVT));
20391 Hi = DAG.getNode(ISD::SRL, dl, ExtVT, Hi, DAG.getConstant(16, dl, ExtVT));
20392 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
20395 if (VT == MVT::v8i16) {
20396 unsigned ShiftOpcode = Op->getOpcode();
20398 // If we have a constant shift amount, the non-SSE41 path is best as
20399 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
20400 bool UseSSE41 = Subtarget.hasSSE41() &&
20401 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
20403 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
20404 // On SSE41 targets we make use of the fact that VSELECT lowers
20405 // to PBLENDVB which selects bytes based just on the sign bit.
20407 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
20408 V0 = DAG.getBitcast(ExtVT, V0);
20409 V1 = DAG.getBitcast(ExtVT, V1);
20410 Sel = DAG.getBitcast(ExtVT, Sel);
20411 return DAG.getBitcast(
20412 VT, DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1));
20414 // On pre-SSE41 targets we splat the sign bit - a negative value will
20415 // set all bits of the lanes to true and VSELECT uses that in
20416 // its OR(AND(V0,C),AND(V1,~C)) lowering.
20418 DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
20419 return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1);
20422 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
20424 // On SSE41 targets we need to replicate the shift mask in both
20425 // bytes for PBLENDVB.
20428 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
20429 DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
20431 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
20434 // r = VSELECT(r, shift(r, 8), a);
20435 SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
20436 R = SignBitSelect(Amt, M, R);
20439 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
20441 // r = VSELECT(r, shift(r, 4), a);
20442 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
20443 R = SignBitSelect(Amt, M, R);
20446 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
20448 // r = VSELECT(r, shift(r, 2), a);
20449 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
20450 R = SignBitSelect(Amt, M, R);
20453 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
20455 // return VSELECT(r, shift(r, 1), a);
20456 M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
20457 R = SignBitSelect(Amt, M, R);
20461 // Decompose 256-bit shifts into smaller 128-bit shifts.
20462 if (VT.is256BitVector())
20463 return Lower256IntArith(Op, DAG);
20468 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
20469 SelectionDAG &DAG) {
20470 MVT VT = Op.getSimpleValueType();
20472 SDValue R = Op.getOperand(0);
20473 SDValue Amt = Op.getOperand(1);
20475 assert(VT.isVector() && "Custom lowering only for vector rotates!");
20476 assert(Subtarget.hasXOP() && "XOP support required for vector rotates!");
20477 assert((Op.getOpcode() == ISD::ROTL) && "Only ROTL supported");
20479 // XOP has 128-bit vector variable + immediate rotates.
20480 // +ve/-ve Amt = rotate left/right.
20482 // Split 256-bit integers.
20483 if (VT.is256BitVector())
20484 return Lower256IntArith(Op, DAG);
20486 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
20488 // Attempt to rotate by immediate.
20489 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
20490 if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
20491 uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
20492 assert(RotateAmt < VT.getScalarSizeInBits() && "Rotation out of range");
20493 return DAG.getNode(X86ISD::VPROTI, DL, VT, R,
20494 DAG.getConstant(RotateAmt, DL, MVT::i8));
20498 // Use general rotate by variable (per-element).
20499 return DAG.getNode(X86ISD::VPROT, DL, VT, R, Amt);
20502 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
20503 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
20504 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
20505 // looks for this combo and may remove the "setcc" instruction if the "setcc"
20506 // has only one use.
20507 SDNode *N = Op.getNode();
20508 SDValue LHS = N->getOperand(0);
20509 SDValue RHS = N->getOperand(1);
20510 unsigned BaseOp = 0;
20513 switch (Op.getOpcode()) {
20514 default: llvm_unreachable("Unknown ovf instruction!");
20516 // A subtract of one will be selected as a INC. Note that INC doesn't
20517 // set CF, so we can't do this for UADDO.
20518 if (isOneConstant(RHS)) {
20519 BaseOp = X86ISD::INC;
20520 Cond = X86::COND_O;
20523 BaseOp = X86ISD::ADD;
20524 Cond = X86::COND_O;
20527 BaseOp = X86ISD::ADD;
20528 Cond = X86::COND_B;
20531 // A subtract of one will be selected as a DEC. Note that DEC doesn't
20532 // set CF, so we can't do this for USUBO.
20533 if (isOneConstant(RHS)) {
20534 BaseOp = X86ISD::DEC;
20535 Cond = X86::COND_O;
20538 BaseOp = X86ISD::SUB;
20539 Cond = X86::COND_O;
20542 BaseOp = X86ISD::SUB;
20543 Cond = X86::COND_B;
20546 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
20547 Cond = X86::COND_O;
20549 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
20550 if (N->getValueType(0) == MVT::i8) {
20551 BaseOp = X86ISD::UMUL8;
20552 Cond = X86::COND_O;
20555 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
20557 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
20560 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
20561 DAG.getConstant(X86::COND_O, DL, MVT::i32),
20562 SDValue(Sum.getNode(), 2));
20564 if (N->getValueType(1) == MVT::i1) {
20565 SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
20566 DAG.getValueType(MVT::i1));
20567 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
20569 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
20573 // Also sets EFLAGS.
20574 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
20575 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
20578 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
20579 DAG.getConstant(Cond, DL, MVT::i32),
20580 SDValue(Sum.getNode(), 1));
20582 if (N->getValueType(1) == MVT::i1) {
20583 SetCC = DAG.getNode(ISD::AssertZext, DL, MVT::i8, SetCC,
20584 DAG.getValueType(MVT::i1));
20585 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
20587 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
20590 /// Returns true if the operand type is exactly twice the native width, and
20591 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
20592 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
20593 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
20594 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
20595 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
20598 return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
20599 else if (OpWidth == 128)
20600 return Subtarget.hasCmpxchg16b();
20605 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
20606 return needsCmpXchgNb(SI->getValueOperand()->getType());
20609 // Note: this turns large loads into lock cmpxchg8b/16b.
20610 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
20611 TargetLowering::AtomicExpansionKind
20612 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
20613 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
20614 return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
20615 : AtomicExpansionKind::None;
20618 TargetLowering::AtomicExpansionKind
20619 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
20620 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
20621 Type *MemType = AI->getType();
20623 // If the operand is too big, we must see if cmpxchg8/16b is available
20624 // and default to library calls otherwise.
20625 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
20626 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
20627 : AtomicExpansionKind::None;
20630 AtomicRMWInst::BinOp Op = AI->getOperation();
20633 llvm_unreachable("Unknown atomic operation");
20634 case AtomicRMWInst::Xchg:
20635 case AtomicRMWInst::Add:
20636 case AtomicRMWInst::Sub:
20637 // It's better to use xadd, xsub or xchg for these in all cases.
20638 return AtomicExpansionKind::None;
20639 case AtomicRMWInst::Or:
20640 case AtomicRMWInst::And:
20641 case AtomicRMWInst::Xor:
20642 // If the atomicrmw's result isn't actually used, we can just add a "lock"
20643 // prefix to a normal instruction for these operations.
20644 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
20645 : AtomicExpansionKind::None;
20646 case AtomicRMWInst::Nand:
20647 case AtomicRMWInst::Max:
20648 case AtomicRMWInst::Min:
20649 case AtomicRMWInst::UMax:
20650 case AtomicRMWInst::UMin:
20651 // These always require a non-trivial set of data operations on x86. We must
20652 // use a cmpxchg loop.
20653 return AtomicExpansionKind::CmpXChg;
20658 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
20659 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
20660 Type *MemType = AI->getType();
20661 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
20662 // there is no benefit in turning such RMWs into loads, and it is actually
20663 // harmful as it introduces a mfence.
20664 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
20667 auto Builder = IRBuilder<>(AI);
20668 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20669 auto SynchScope = AI->getSynchScope();
20670 // We must restrict the ordering to avoid generating loads with Release or
20671 // ReleaseAcquire orderings.
20672 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
20673 auto Ptr = AI->getPointerOperand();
20675 // Before the load we need a fence. Here is an example lifted from
20676 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
20679 // x.store(1, relaxed);
20680 // r1 = y.fetch_add(0, release);
20682 // y.fetch_add(42, acquire);
20683 // r2 = x.load(relaxed);
20684 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
20685 // lowered to just a load without a fence. A mfence flushes the store buffer,
20686 // making the optimization clearly correct.
20687 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
20688 // otherwise, we might be able to be more aggressive on relaxed idempotent
20689 // rmw. In practice, they do not look useful, so we don't try to be
20690 // especially clever.
20691 if (SynchScope == SingleThread)
20692 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
20693 // the IR level, so we must wrap it in an intrinsic.
20696 if (!Subtarget.hasMFence())
20697 // FIXME: it might make sense to use a locked operation here but on a
20698 // different cache-line to prevent cache-line bouncing. In practice it
20699 // is probably a small win, and x86 processors without mfence are rare
20700 // enough that we do not bother.
20704 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
20705 Builder.CreateCall(MFence, {});
20707 // Finally we can emit the atomic load.
20708 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
20709 AI->getType()->getPrimitiveSizeInBits());
20710 Loaded->setAtomic(Order, SynchScope);
20711 AI->replaceAllUsesWith(Loaded);
20712 AI->eraseFromParent();
20716 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
20717 SelectionDAG &DAG) {
20719 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
20720 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
20721 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
20722 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
20724 // The only fence that needs an instruction is a sequentially-consistent
20725 // cross-thread fence.
20726 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
20727 FenceScope == CrossThread) {
20728 if (Subtarget.hasMFence())
20729 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
20731 SDValue Chain = Op.getOperand(0);
20732 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
20734 DAG.getRegister(X86::ESP, MVT::i32), // Base
20735 DAG.getTargetConstant(1, dl, MVT::i8), // Scale
20736 DAG.getRegister(0, MVT::i32), // Index
20737 DAG.getTargetConstant(0, dl, MVT::i32), // Disp
20738 DAG.getRegister(0, MVT::i32), // Segment.
20742 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
20743 return SDValue(Res, 0);
20746 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
20747 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
20750 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
20751 SelectionDAG &DAG) {
20752 MVT T = Op.getSimpleValueType();
20756 switch(T.SimpleTy) {
20757 default: llvm_unreachable("Invalid value type!");
20758 case MVT::i8: Reg = X86::AL; size = 1; break;
20759 case MVT::i16: Reg = X86::AX; size = 2; break;
20760 case MVT::i32: Reg = X86::EAX; size = 4; break;
20762 assert(Subtarget.is64Bit() && "Node not type legal!");
20763 Reg = X86::RAX; size = 8;
20766 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
20767 Op.getOperand(2), SDValue());
20768 SDValue Ops[] = { cpIn.getValue(0),
20771 DAG.getTargetConstant(size, DL, MVT::i8),
20772 cpIn.getValue(1) };
20773 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20774 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
20775 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
20779 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
20780 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
20781 MVT::i32, cpOut.getValue(2));
20782 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
20783 DAG.getConstant(X86::COND_E, DL, MVT::i8),
20786 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
20787 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
20788 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
20792 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
20793 SelectionDAG &DAG) {
20794 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
20795 MVT DstVT = Op.getSimpleValueType();
20797 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
20798 SrcVT == MVT::i64) {
20799 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
20800 if (DstVT != MVT::f64)
20801 // This conversion needs to be expanded.
20804 SDValue Op0 = Op->getOperand(0);
20805 SmallVector<SDValue, 16> Elts;
20809 if (SrcVT.isVector()) {
20810 NumElts = SrcVT.getVectorNumElements();
20811 SVT = SrcVT.getVectorElementType();
20813 // Widen the vector in input in the case of MVT::v2i32.
20814 // Example: from MVT::v2i32 to MVT::v4i32.
20815 for (unsigned i = 0, e = NumElts; i != e; ++i)
20816 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, Op0,
20817 DAG.getIntPtrConstant(i, dl)));
20819 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
20820 "Unexpected source type in LowerBITCAST");
20821 Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op0,
20822 DAG.getIntPtrConstant(0, dl)));
20823 Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op0,
20824 DAG.getIntPtrConstant(1, dl)));
20828 // Explicitly mark the extra elements as Undef.
20829 Elts.append(NumElts, DAG.getUNDEF(SVT));
20831 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20832 SDValue BV = DAG.getBuildVector(NewVT, dl, Elts);
20833 SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
20834 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
20835 DAG.getIntPtrConstant(0, dl));
20838 assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
20839 Subtarget.hasMMX() && "Unexpected custom BITCAST");
20840 assert((DstVT == MVT::i64 ||
20841 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
20842 "Unexpected custom BITCAST");
20843 // i64 <=> MMX conversions are Legal.
20844 if (SrcVT==MVT::i64 && DstVT.isVector())
20846 if (DstVT==MVT::i64 && SrcVT.isVector())
20848 // MMX <=> MMX conversions are Legal.
20849 if (SrcVT.isVector() && DstVT.isVector())
20851 // All other conversions need to be expanded.
20855 /// Compute the horizontal sum of bytes in V for the elements of VT.
20857 /// Requires V to be a byte vector and VT to be an integer vector type with
20858 /// wider elements than V's type. The width of the elements of VT determines
20859 /// how many bytes of V are summed horizontally to produce each element of the
20861 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
20862 const X86Subtarget &Subtarget,
20863 SelectionDAG &DAG) {
20865 MVT ByteVecVT = V.getSimpleValueType();
20866 MVT EltVT = VT.getVectorElementType();
20867 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
20868 "Expected value to have byte element type.");
20869 assert(EltVT != MVT::i8 &&
20870 "Horizontal byte sum only makes sense for wider elements!");
20871 unsigned VecSize = VT.getSizeInBits();
20872 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
20874 // PSADBW instruction horizontally add all bytes and leave the result in i64
20875 // chunks, thus directly computes the pop count for v2i64 and v4i64.
20876 if (EltVT == MVT::i64) {
20877 SDValue Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
20878 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
20879 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
20880 return DAG.getBitcast(VT, V);
20883 if (EltVT == MVT::i32) {
20884 // We unpack the low half and high half into i32s interleaved with zeros so
20885 // that we can use PSADBW to horizontally sum them. The most useful part of
20886 // this is that it lines up the results of two PSADBW instructions to be
20887 // two v2i64 vectors which concatenated are the 4 population counts. We can
20888 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
20889 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, DL);
20890 SDValue Low = DAG.getNode(X86ISD::UNPCKL, DL, VT, V, Zeros);
20891 SDValue High = DAG.getNode(X86ISD::UNPCKH, DL, VT, V, Zeros);
20893 // Do the horizontal sums into two v2i64s.
20894 Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
20895 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
20896 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
20897 DAG.getBitcast(ByteVecVT, Low), Zeros);
20898 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
20899 DAG.getBitcast(ByteVecVT, High), Zeros);
20901 // Merge them together.
20902 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
20903 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
20904 DAG.getBitcast(ShortVecVT, Low),
20905 DAG.getBitcast(ShortVecVT, High));
20907 return DAG.getBitcast(VT, V);
20910 // The only element type left is i16.
20911 assert(EltVT == MVT::i16 && "Unknown how to handle type");
20913 // To obtain pop count for each i16 element starting from the pop count for
20914 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
20915 // right by 8. It is important to shift as i16s as i8 vector shift isn't
20916 // directly supported.
20917 SDValue ShifterV = DAG.getConstant(8, DL, VT);
20918 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
20919 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
20920 DAG.getBitcast(ByteVecVT, V));
20921 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
20924 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
20925 const X86Subtarget &Subtarget,
20926 SelectionDAG &DAG) {
20927 MVT VT = Op.getSimpleValueType();
20928 MVT EltVT = VT.getVectorElementType();
20929 unsigned VecSize = VT.getSizeInBits();
20931 // Implement a lookup table in register by using an algorithm based on:
20932 // http://wm.ite.pl/articles/sse-popcount.html
20934 // The general idea is that every lower byte nibble in the input vector is an
20935 // index into a in-register pre-computed pop count table. We then split up the
20936 // input vector in two new ones: (1) a vector with only the shifted-right
20937 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
20938 // masked out higher ones) for each byte. PSHUB is used separately with both
20939 // to index the in-register table. Next, both are added and the result is a
20940 // i8 vector where each element contains the pop count for input byte.
20942 // To obtain the pop count for elements != i8, we follow up with the same
20943 // approach and use additional tricks as described below.
20945 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
20946 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
20947 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
20948 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
20950 int NumByteElts = VecSize / 8;
20951 MVT ByteVecVT = MVT::getVectorVT(MVT::i8, NumByteElts);
20952 SDValue In = DAG.getBitcast(ByteVecVT, Op);
20953 SmallVector<SDValue, 64> LUTVec;
20954 for (int i = 0; i < NumByteElts; ++i)
20955 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
20956 SDValue InRegLUT = DAG.getBuildVector(ByteVecVT, DL, LUTVec);
20957 SDValue M0F = DAG.getConstant(0x0F, DL, ByteVecVT);
20960 SDValue FourV = DAG.getConstant(4, DL, ByteVecVT);
20961 SDValue HighNibbles = DAG.getNode(ISD::SRL, DL, ByteVecVT, In, FourV);
20964 SDValue LowNibbles = DAG.getNode(ISD::AND, DL, ByteVecVT, In, M0F);
20966 // The input vector is used as the shuffle mask that index elements into the
20967 // LUT. After counting low and high nibbles, add the vector to obtain the
20968 // final pop count per i8 element.
20969 SDValue HighPopCnt =
20970 DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, HighNibbles);
20971 SDValue LowPopCnt =
20972 DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, LowNibbles);
20973 SDValue PopCnt = DAG.getNode(ISD::ADD, DL, ByteVecVT, HighPopCnt, LowPopCnt);
20975 if (EltVT == MVT::i8)
20978 return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG);
20981 static SDValue LowerVectorCTPOPBitmath(SDValue Op, const SDLoc &DL,
20982 const X86Subtarget &Subtarget,
20983 SelectionDAG &DAG) {
20984 MVT VT = Op.getSimpleValueType();
20985 assert(VT.is128BitVector() &&
20986 "Only 128-bit vector bitmath lowering supported.");
20988 int VecSize = VT.getSizeInBits();
20989 MVT EltVT = VT.getVectorElementType();
20990 int Len = EltVT.getSizeInBits();
20992 // This is the vectorized version of the "best" algorithm from
20993 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
20994 // with a minor tweak to use a series of adds + shifts instead of vector
20995 // multiplications. Implemented for all integer vector types. We only use
20996 // this when we don't have SSSE3 which allows a LUT-based lowering that is
20997 // much faster, even faster than using native popcnt instructions.
20999 auto GetShift = [&](unsigned OpCode, SDValue V, int Shifter) {
21000 MVT VT = V.getSimpleValueType();
21001 SDValue ShifterV = DAG.getConstant(Shifter, DL, VT);
21002 return DAG.getNode(OpCode, DL, VT, V, ShifterV);
21004 auto GetMask = [&](SDValue V, APInt Mask) {
21005 MVT VT = V.getSimpleValueType();
21006 SDValue MaskV = DAG.getConstant(Mask, DL, VT);
21007 return DAG.getNode(ISD::AND, DL, VT, V, MaskV);
21010 // We don't want to incur the implicit masks required to SRL vNi8 vectors on
21011 // x86, so set the SRL type to have elements at least i16 wide. This is
21012 // correct because all of our SRLs are followed immediately by a mask anyways
21013 // that handles any bits that sneak into the high bits of the byte elements.
21014 MVT SrlVT = Len > 8 ? VT : MVT::getVectorVT(MVT::i16, VecSize / 16);
21018 // v = v - ((v >> 1) & 0x55555555...)
21020 DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 1));
21021 SDValue And = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x55)));
21022 V = DAG.getNode(ISD::SUB, DL, VT, V, And);
21024 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
21025 SDValue AndLHS = GetMask(V, APInt::getSplat(Len, APInt(8, 0x33)));
21026 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 2));
21027 SDValue AndRHS = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x33)));
21028 V = DAG.getNode(ISD::ADD, DL, VT, AndLHS, AndRHS);
21030 // v = (v + (v >> 4)) & 0x0F0F0F0F...
21031 Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 4));
21032 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, V, Srl);
21033 V = GetMask(Add, APInt::getSplat(Len, APInt(8, 0x0F)));
21035 // At this point, V contains the byte-wise population count, and we are
21036 // merely doing a horizontal sum if necessary to get the wider element
21038 if (EltVT == MVT::i8)
21041 return LowerHorizontalByteSum(
21042 DAG.getBitcast(MVT::getVectorVT(MVT::i8, VecSize / 8), V), VT, Subtarget,
21046 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
21047 SelectionDAG &DAG) {
21048 MVT VT = Op.getSimpleValueType();
21049 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
21050 "Unknown CTPOP type to handle");
21051 SDLoc DL(Op.getNode());
21052 SDValue Op0 = Op.getOperand(0);
21054 if (!Subtarget.hasSSSE3()) {
21055 // We can't use the fast LUT approach, so fall back on vectorized bitmath.
21056 assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
21057 return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
21060 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
21061 unsigned NumElems = VT.getVectorNumElements();
21063 // Extract each 128-bit vector, compute pop count and concat the result.
21064 SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
21065 SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
21067 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
21068 LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
21069 LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
21072 if (VT.is512BitVector() && !Subtarget.hasBWI()) {
21073 unsigned NumElems = VT.getVectorNumElements();
21075 // Extract each 256-bit vector, compute pop count and concat the result.
21076 SDValue LHS = extract256BitVector(Op0, 0, DAG, DL);
21077 SDValue RHS = extract256BitVector(Op0, NumElems / 2, DAG, DL);
21079 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
21080 LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
21081 LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
21084 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
21087 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
21088 SelectionDAG &DAG) {
21089 assert(Op.getSimpleValueType().isVector() &&
21090 "We only do custom lowering for vector population count.");
21091 return LowerVectorCTPOP(Op, Subtarget, DAG);
21094 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
21095 MVT VT = Op.getSimpleValueType();
21096 SDValue In = Op.getOperand(0);
21099 // For scalars, its still beneficial to transfer to/from the SIMD unit to
21100 // perform the BITREVERSE.
21101 if (!VT.isVector()) {
21102 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
21103 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
21104 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
21105 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
21106 DAG.getIntPtrConstant(0, DL));
21109 MVT SVT = VT.getVectorElementType();
21110 int NumElts = VT.getVectorNumElements();
21111 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
21113 // Decompose 256-bit ops into smaller 128-bit ops.
21114 if (VT.is256BitVector()) {
21115 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
21116 SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
21118 MVT HalfVT = MVT::getVectorVT(SVT, NumElts / 2);
21119 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
21120 DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo),
21121 DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi));
21124 assert(VT.is128BitVector() &&
21125 "Only 128-bit vector bitreverse lowering supported.");
21127 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
21128 // perform the BSWAP in the shuffle.
21129 // Its best to shuffle using the second operand as this will implicitly allow
21130 // memory folding for multiple vectors.
21131 SmallVector<SDValue, 16> MaskElts;
21132 for (int i = 0; i != NumElts; ++i) {
21133 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
21134 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
21135 int PermuteByte = SourceByte | (2 << 5);
21136 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
21140 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
21141 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
21142 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
21144 return DAG.getBitcast(VT, Res);
21147 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
21148 SelectionDAG &DAG) {
21149 if (Subtarget.hasXOP())
21150 return LowerBITREVERSE_XOP(Op, DAG);
21152 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
21154 MVT VT = Op.getSimpleValueType();
21155 SDValue In = Op.getOperand(0);
21158 unsigned NumElts = VT.getVectorNumElements();
21159 assert(VT.getScalarType() == MVT::i8 &&
21160 "Only byte vector BITREVERSE supported");
21162 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
21163 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
21164 MVT HalfVT = MVT::getVectorVT(MVT::i8, NumElts / 2);
21165 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
21166 SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
21167 Lo = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo);
21168 Hi = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi);
21169 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
21172 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
21173 // two nibbles and a PSHUFB lookup to find the bitreverse of each
21174 // 0-15 value (moved to the other nibble).
21175 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
21176 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
21177 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
21179 const int LoLUT[16] = {
21180 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
21181 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
21182 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
21183 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
21184 const int HiLUT[16] = {
21185 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
21186 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
21187 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
21188 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
21190 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
21191 for (unsigned i = 0; i < NumElts; ++i) {
21192 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
21193 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
21196 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
21197 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
21198 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
21199 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
21200 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
21203 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG) {
21204 unsigned NewOpc = 0;
21205 switch (N->getOpcode()) {
21206 case ISD::ATOMIC_LOAD_ADD:
21207 NewOpc = X86ISD::LADD;
21209 case ISD::ATOMIC_LOAD_SUB:
21210 NewOpc = X86ISD::LSUB;
21212 case ISD::ATOMIC_LOAD_OR:
21213 NewOpc = X86ISD::LOR;
21215 case ISD::ATOMIC_LOAD_XOR:
21216 NewOpc = X86ISD::LXOR;
21218 case ISD::ATOMIC_LOAD_AND:
21219 NewOpc = X86ISD::LAND;
21222 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
21225 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
21226 return DAG.getMemIntrinsicNode(
21227 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
21228 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
21229 /*MemVT=*/N->getSimpleValueType(0), MMO);
21232 /// Lower atomic_load_ops into LOCK-prefixed operations.
21233 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
21234 const X86Subtarget &Subtarget) {
21235 SDValue Chain = N->getOperand(0);
21236 SDValue LHS = N->getOperand(1);
21237 SDValue RHS = N->getOperand(2);
21238 unsigned Opc = N->getOpcode();
21239 MVT VT = N->getSimpleValueType(0);
21242 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
21243 // can only be lowered when the result is unused. They should have already
21244 // been transformed into a cmpxchg loop in AtomicExpand.
21245 if (N->hasAnyUseOfValue(0)) {
21246 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
21247 // select LXADD if LOCK_SUB can't be selected.
21248 if (Opc == ISD::ATOMIC_LOAD_SUB) {
21249 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
21250 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
21251 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
21252 RHS, AN->getMemOperand(), AN->getOrdering(),
21253 AN->getSynchScope());
21255 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
21256 "Used AtomicRMW ops other than Add should have been expanded!");
21260 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG);
21261 // RAUW the chain, but don't worry about the result, as it's unused.
21262 assert(!N->hasAnyUseOfValue(0));
21263 DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1));
21267 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
21268 SDNode *Node = Op.getNode();
21270 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
21272 // Convert seq_cst store -> xchg
21273 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
21274 // FIXME: On 32-bit, store -> fist or movq would be more efficient
21275 // (The only way to get a 16-byte store is cmpxchg16b)
21276 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
21277 if (cast<AtomicSDNode>(Node)->getOrdering() ==
21278 AtomicOrdering::SequentiallyConsistent ||
21279 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
21280 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
21281 cast<AtomicSDNode>(Node)->getMemoryVT(),
21282 Node->getOperand(0),
21283 Node->getOperand(1), Node->getOperand(2),
21284 cast<AtomicSDNode>(Node)->getMemOperand(),
21285 cast<AtomicSDNode>(Node)->getOrdering(),
21286 cast<AtomicSDNode>(Node)->getSynchScope());
21287 return Swap.getValue(1);
21289 // Other atomic stores have a simple pattern.
21293 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
21294 MVT VT = Op.getNode()->getSimpleValueType(0);
21296 // Let legalize expand this if it isn't a legal type yet.
21297 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
21300 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
21303 bool ExtraOp = false;
21304 switch (Op.getOpcode()) {
21305 default: llvm_unreachable("Invalid code");
21306 case ISD::ADDC: Opc = X86ISD::ADD; break;
21307 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
21308 case ISD::SUBC: Opc = X86ISD::SUB; break;
21309 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
21313 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
21315 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
21316 Op.getOperand(1), Op.getOperand(2));
21319 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
21320 SelectionDAG &DAG) {
21321 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
21323 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
21324 // which returns the values as { float, float } (in XMM0) or
21325 // { double, double } (which is returned in XMM0, XMM1).
21327 SDValue Arg = Op.getOperand(0);
21328 EVT ArgVT = Arg.getValueType();
21329 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
21331 TargetLowering::ArgListTy Args;
21332 TargetLowering::ArgListEntry Entry;
21336 Entry.isSExt = false;
21337 Entry.isZExt = false;
21338 Args.push_back(Entry);
21340 bool isF64 = ArgVT == MVT::f64;
21341 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
21342 // the small struct {f32, f32} is returned in (eax, edx). For f64,
21343 // the results are returned via SRet in memory.
21344 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
21345 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21347 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
21349 Type *RetTy = isF64
21350 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
21351 : (Type*)VectorType::get(ArgTy, 4);
21353 TargetLowering::CallLoweringInfo CLI(DAG);
21354 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
21355 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
21357 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
21360 // Returned in xmm0 and xmm1.
21361 return CallResult.first;
21363 // Returned in bits 0:31 and 32:64 xmm0.
21364 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
21365 CallResult.first, DAG.getIntPtrConstant(0, dl));
21366 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
21367 CallResult.first, DAG.getIntPtrConstant(1, dl));
21368 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
21369 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
21372 /// Widen a vector input to a vector of NVT. The
21373 /// input vector must have the same element type as NVT.
21374 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
21375 bool FillWithZeroes = false) {
21376 // Check if InOp already has the right width.
21377 MVT InVT = InOp.getSimpleValueType();
21381 if (InOp.isUndef())
21382 return DAG.getUNDEF(NVT);
21384 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
21385 "input and widen element type must match");
21387 unsigned InNumElts = InVT.getVectorNumElements();
21388 unsigned WidenNumElts = NVT.getVectorNumElements();
21389 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
21390 "Unexpected request for vector widening");
21392 EVT EltVT = NVT.getVectorElementType();
21395 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
21396 InOp.getNumOperands() == 2) {
21397 SDValue N1 = InOp.getOperand(1);
21398 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
21400 InOp = InOp.getOperand(0);
21401 InVT = InOp.getSimpleValueType();
21402 InNumElts = InVT.getVectorNumElements();
21405 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
21406 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
21407 SmallVector<SDValue, 16> Ops;
21408 for (unsigned i = 0; i < InNumElts; ++i)
21409 Ops.push_back(InOp.getOperand(i));
21411 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
21412 DAG.getUNDEF(EltVT);
21413 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
21414 Ops.push_back(FillVal);
21415 return DAG.getBuildVector(NVT, dl, Ops);
21417 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
21419 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
21420 InOp, DAG.getIntPtrConstant(0, dl));
21423 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
21424 SelectionDAG &DAG) {
21425 assert(Subtarget.hasAVX512() &&
21426 "MGATHER/MSCATTER are supported on AVX-512 arch only");
21428 // X86 scatter kills mask register, so its type should be added to
21429 // the list of return values.
21430 // If the "scatter" has 2 return values, it is already handled.
21431 if (Op.getNode()->getNumValues() == 2)
21434 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
21435 SDValue Src = N->getValue();
21436 MVT VT = Src.getSimpleValueType();
21437 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
21440 SDValue NewScatter;
21441 SDValue Index = N->getIndex();
21442 SDValue Mask = N->getMask();
21443 SDValue Chain = N->getChain();
21444 SDValue BasePtr = N->getBasePtr();
21445 MVT MemVT = N->getMemoryVT().getSimpleVT();
21446 MVT IndexVT = Index.getSimpleValueType();
21447 MVT MaskVT = Mask.getSimpleValueType();
21449 if (MemVT.getScalarSizeInBits() < VT.getScalarSizeInBits()) {
21450 // The v2i32 value was promoted to v2i64.
21451 // Now we "redo" the type legalizer's work and widen the original
21452 // v2i32 value to v4i32. The original v2i32 is retrieved from v2i64
21454 assert((MemVT == MVT::v2i32 && VT == MVT::v2i64) &&
21455 "Unexpected memory type");
21456 int ShuffleMask[] = {0, 2, -1, -1};
21457 Src = DAG.getVectorShuffle(MVT::v4i32, dl, DAG.getBitcast(MVT::v4i32, Src),
21458 DAG.getUNDEF(MVT::v4i32), ShuffleMask);
21459 // Now we have 4 elements instead of 2.
21460 // Expand the index.
21461 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), 4);
21462 Index = ExtendToType(Index, NewIndexVT, DAG);
21464 // Expand the mask with zeroes
21465 // Mask may be <2 x i64> or <2 x i1> at this moment
21466 assert((MaskVT == MVT::v2i1 || MaskVT == MVT::v2i64) &&
21467 "Unexpected mask type");
21468 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), 4);
21469 Mask = ExtendToType(Mask, ExtMaskVT, DAG, true);
21473 unsigned NumElts = VT.getVectorNumElements();
21474 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
21475 !Index.getSimpleValueType().is512BitVector()) {
21476 // AVX512F supports only 512-bit vectors. Or data or index should
21477 // be 512 bit wide. If now the both index and data are 256-bit, but
21478 // the vector contains 8 elements, we just sign-extend the index
21479 if (IndexVT == MVT::v8i32)
21480 // Just extend index
21481 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
21483 // The minimal number of elts in scatter is 8
21486 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), NumElts);
21487 // Use original index here, do not modify the index twice
21488 Index = ExtendToType(N->getIndex(), NewIndexVT, DAG);
21489 if (IndexVT.getScalarType() == MVT::i32)
21490 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
21493 // At this point we have promoted mask operand
21494 assert(MaskVT.getScalarSizeInBits() >= 32 && "unexpected mask type");
21495 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), NumElts);
21496 // Use the original mask here, do not modify the mask twice
21497 Mask = ExtendToType(N->getMask(), ExtMaskVT, DAG, true);
21499 // The value that should be stored
21500 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts);
21501 Src = ExtendToType(Src, NewVT, DAG);
21504 // If the mask is "wide" at this point - truncate it to i1 vector
21505 MVT BitMaskVT = MVT::getVectorVT(MVT::i1, NumElts);
21506 Mask = DAG.getNode(ISD::TRUNCATE, dl, BitMaskVT, Mask);
21508 // The mask is killed by scatter, add it to the values
21509 SDVTList VTs = DAG.getVTList(BitMaskVT, MVT::Other);
21510 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index};
21511 NewScatter = DAG.getMaskedScatter(VTs, N->getMemoryVT(), dl, Ops,
21512 N->getMemOperand());
21513 DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
21514 return SDValue(NewScatter.getNode(), 1);
21517 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
21518 SelectionDAG &DAG) {
21520 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
21521 MVT VT = Op.getSimpleValueType();
21522 MVT ScalarVT = VT.getScalarType();
21523 SDValue Mask = N->getMask();
21526 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
21527 "Cannot lower masked load op.");
21529 assert(((ScalarVT == MVT::i32 || ScalarVT == MVT::f32) ||
21530 (Subtarget.hasBWI() &&
21531 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
21532 "Unsupported masked load op.");
21534 // This operation is legal for targets with VLX, but without
21535 // VLX the vector should be widened to 512 bit
21536 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
21537 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
21538 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
21539 SDValue Src0 = N->getSrc0();
21540 Src0 = ExtendToType(Src0, WideDataVT, DAG);
21541 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
21542 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
21543 N->getBasePtr(), Mask, Src0,
21544 N->getMemoryVT(), N->getMemOperand(),
21545 N->getExtensionType());
21547 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
21548 NewLoad.getValue(0),
21549 DAG.getIntPtrConstant(0, dl));
21550 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
21551 return DAG.getMergeValues(RetOps, dl);
21554 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
21555 SelectionDAG &DAG) {
21556 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
21557 SDValue DataToStore = N->getValue();
21558 MVT VT = DataToStore.getSimpleValueType();
21559 MVT ScalarVT = VT.getScalarType();
21560 SDValue Mask = N->getMask();
21563 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
21564 "Cannot lower masked store op.");
21566 assert(((ScalarVT == MVT::i32 || ScalarVT == MVT::f32) ||
21567 (Subtarget.hasBWI() &&
21568 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
21569 "Unsupported masked store op.");
21571 // This operation is legal for targets with VLX, but without
21572 // VLX the vector should be widened to 512 bit
21573 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
21574 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
21575 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
21576 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
21577 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
21578 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
21579 Mask, N->getMemoryVT(), N->getMemOperand(),
21580 N->isTruncatingStore());
21583 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
21584 SelectionDAG &DAG) {
21585 assert(Subtarget.hasAVX512() &&
21586 "MGATHER/MSCATTER are supported on AVX-512 arch only");
21588 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
21590 MVT VT = Op.getSimpleValueType();
21591 SDValue Index = N->getIndex();
21592 SDValue Mask = N->getMask();
21593 SDValue Src0 = N->getValue();
21594 MVT IndexVT = Index.getSimpleValueType();
21595 MVT MaskVT = Mask.getSimpleValueType();
21597 unsigned NumElts = VT.getVectorNumElements();
21598 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
21600 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
21601 !Index.getSimpleValueType().is512BitVector()) {
21602 // AVX512F supports only 512-bit vectors. Or data or index should
21603 // be 512 bit wide. If now the both index and data are 256-bit, but
21604 // the vector contains 8 elements, we just sign-extend the index
21605 if (NumElts == 8) {
21606 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
21607 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
21608 N->getOperand(3), Index };
21609 DAG.UpdateNodeOperands(N, Ops);
21613 // Minimal number of elements in Gather
21616 MVT NewIndexVT = MVT::getVectorVT(IndexVT.getScalarType(), NumElts);
21617 Index = ExtendToType(Index, NewIndexVT, DAG);
21618 if (IndexVT.getScalarType() == MVT::i32)
21619 Index = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i64, Index);
21622 MVT MaskBitVT = MVT::getVectorVT(MVT::i1, NumElts);
21623 // At this point we have promoted mask operand
21624 assert(MaskVT.getScalarSizeInBits() >= 32 && "unexpected mask type");
21625 MVT ExtMaskVT = MVT::getVectorVT(MaskVT.getScalarType(), NumElts);
21626 Mask = ExtendToType(Mask, ExtMaskVT, DAG, true);
21627 Mask = DAG.getNode(ISD::TRUNCATE, dl, MaskBitVT, Mask);
21629 // The pass-thru value
21630 MVT NewVT = MVT::getVectorVT(VT.getScalarType(), NumElts);
21631 Src0 = ExtendToType(Src0, NewVT, DAG);
21633 SDValue Ops[] = { N->getChain(), Src0, Mask, N->getBasePtr(), Index };
21634 SDValue NewGather = DAG.getMaskedGather(DAG.getVTList(NewVT, MVT::Other),
21635 N->getMemoryVT(), dl, Ops,
21636 N->getMemOperand());
21637 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
21638 NewGather.getValue(0),
21639 DAG.getIntPtrConstant(0, dl));
21640 SDValue RetOps[] = {Exract, NewGather.getValue(1)};
21641 return DAG.getMergeValues(RetOps, dl);
21646 SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
21647 SelectionDAG &DAG) const {
21648 // TODO: Eventually, the lowering of these nodes should be informed by or
21649 // deferred to the GC strategy for the function in which they appear. For
21650 // now, however, they must be lowered to something. Since they are logically
21651 // no-ops in the case of a null GC strategy (or a GC strategy which does not
21652 // require special handling for these nodes), lower them as literal NOOPs for
21654 SmallVector<SDValue, 2> Ops;
21656 Ops.push_back(Op.getOperand(0));
21657 if (Op->getGluedNode())
21658 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
21661 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
21662 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
21667 SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
21668 SelectionDAG &DAG) const {
21669 // TODO: Eventually, the lowering of these nodes should be informed by or
21670 // deferred to the GC strategy for the function in which they appear. For
21671 // now, however, they must be lowered to something. Since they are logically
21672 // no-ops in the case of a null GC strategy (or a GC strategy which does not
21673 // require special handling for these nodes), lower them as literal NOOPs for
21675 SmallVector<SDValue, 2> Ops;
21677 Ops.push_back(Op.getOperand(0));
21678 if (Op->getGluedNode())
21679 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
21682 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
21683 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
21688 /// Provide custom lowering hooks for some operations.
21689 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
21690 switch (Op.getOpcode()) {
21691 default: llvm_unreachable("Should not custom lower this!");
21692 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
21693 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
21694 return LowerCMP_SWAP(Op, Subtarget, DAG);
21695 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
21696 case ISD::ATOMIC_LOAD_ADD:
21697 case ISD::ATOMIC_LOAD_SUB:
21698 case ISD::ATOMIC_LOAD_OR:
21699 case ISD::ATOMIC_LOAD_XOR:
21700 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
21701 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
21702 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
21703 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
21704 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
21705 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
21706 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
21707 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
21708 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
21709 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
21710 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
21711 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
21712 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
21713 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
21714 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
21715 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
21716 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
21717 case ISD::SHL_PARTS:
21718 case ISD::SRA_PARTS:
21719 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
21720 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
21721 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
21722 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
21723 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
21724 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
21725 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
21726 case ISD::SIGN_EXTEND_VECTOR_INREG:
21727 return LowerSIGN_EXTEND_VECTOR_INREG(Op, Subtarget, DAG);
21728 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
21729 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
21730 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
21731 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
21733 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
21734 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
21735 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
21736 case ISD::SETCC: return LowerSETCC(Op, DAG);
21737 case ISD::SETCCE: return LowerSETCCE(Op, DAG);
21738 case ISD::SELECT: return LowerSELECT(Op, DAG);
21739 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
21740 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
21741 case ISD::VASTART: return LowerVASTART(Op, DAG);
21742 case ISD::VAARG: return LowerVAARG(Op, DAG);
21743 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
21744 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
21745 case ISD::INTRINSIC_VOID:
21746 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
21747 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
21748 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
21749 case ISD::FRAME_TO_ARGS_OFFSET:
21750 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
21751 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
21752 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
21753 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
21754 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
21755 case ISD::EH_SJLJ_SETUP_DISPATCH:
21756 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
21757 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
21758 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
21759 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
21761 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
21763 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
21764 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
21766 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
21767 case ISD::UMUL_LOHI:
21768 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
21769 case ISD::ROTL: return LowerRotate(Op, Subtarget, DAG);
21772 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
21778 case ISD::UMULO: return LowerXALUO(Op, DAG);
21779 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
21780 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
21784 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
21785 case ISD::ADD: return LowerADD(Op, DAG);
21786 case ISD::SUB: return LowerSUB(Op, DAG);
21790 case ISD::UMIN: return LowerMINMAX(Op, DAG);
21791 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
21792 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
21793 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
21794 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
21795 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
21796 case ISD::GC_TRANSITION_START:
21797 return LowerGC_TRANSITION_START(Op, DAG);
21798 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
21799 case ISD::STORE: return LowerTruncatingStore(Op, Subtarget, DAG);
21803 /// Places new result values for the node in Results (their number
21804 /// and types must exactly match those of the original return values of
21805 /// the node), or leaves Results empty, which indicates that the node is not
21806 /// to be custom lowered after all.
21807 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
21808 SmallVectorImpl<SDValue> &Results,
21809 SelectionDAG &DAG) const {
21810 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
21812 if (!Res.getNode())
21815 assert((N->getNumValues() <= Res->getNumValues()) &&
21816 "Lowering returned the wrong number of results!");
21818 // Places new result values base on N result number.
21819 // In some cases (LowerSINT_TO_FP for example) Res has more result values
21820 // than original node, chain should be dropped(last value).
21821 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
21822 Results.push_back(Res.getValue(I));
21825 /// Replace a node with an illegal result type with a new node built out of
21827 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
21828 SmallVectorImpl<SDValue>&Results,
21829 SelectionDAG &DAG) const {
21831 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21832 switch (N->getOpcode()) {
21834 llvm_unreachable("Do not know how to custom type legalize this operation!");
21835 case X86ISD::AVG: {
21836 // Legalize types for X86ISD::AVG by expanding vectors.
21837 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
21839 auto InVT = N->getValueType(0);
21840 auto InVTSize = InVT.getSizeInBits();
21841 const unsigned RegSize =
21842 (InVTSize > 128) ? ((InVTSize > 256) ? 512 : 256) : 128;
21843 assert((!Subtarget.hasAVX512() || RegSize < 512) &&
21844 "512-bit vector requires AVX512");
21845 assert((!Subtarget.hasAVX2() || RegSize < 256) &&
21846 "256-bit vector requires AVX2");
21848 auto ElemVT = InVT.getVectorElementType();
21849 auto RegVT = EVT::getVectorVT(*DAG.getContext(), ElemVT,
21850 RegSize / ElemVT.getSizeInBits());
21851 assert(RegSize % InVT.getSizeInBits() == 0);
21852 unsigned NumConcat = RegSize / InVT.getSizeInBits();
21854 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
21855 Ops[0] = N->getOperand(0);
21856 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
21857 Ops[0] = N->getOperand(1);
21858 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
21860 SDValue Res = DAG.getNode(X86ISD::AVG, dl, RegVT, InVec0, InVec1);
21861 Results.push_back(DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InVT, Res,
21862 DAG.getIntPtrConstant(0, dl)));
21865 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
21866 case X86ISD::FMINC:
21868 case X86ISD::FMAXC:
21869 case X86ISD::FMAX: {
21870 EVT VT = N->getValueType(0);
21871 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
21872 SDValue UNDEF = DAG.getUNDEF(VT);
21873 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
21874 N->getOperand(0), UNDEF);
21875 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
21876 N->getOperand(1), UNDEF);
21877 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
21880 case ISD::SIGN_EXTEND_INREG:
21885 // We don't want to expand or promote these.
21892 case ISD::UDIVREM: {
21893 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
21894 Results.push_back(V);
21897 case ISD::FP_TO_SINT:
21898 case ISD::FP_TO_UINT: {
21899 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
21901 std::pair<SDValue,SDValue> Vals =
21902 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
21903 SDValue FIST = Vals.first, StackSlot = Vals.second;
21904 if (FIST.getNode()) {
21905 EVT VT = N->getValueType(0);
21906 // Return a load from the stack slot.
21907 if (StackSlot.getNode())
21909 DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo()));
21911 Results.push_back(FIST);
21915 case ISD::UINT_TO_FP: {
21916 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
21917 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
21918 N->getValueType(0) != MVT::v2f32)
21920 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
21923 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
21924 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
21925 DAG.getBitcast(MVT::v2i64, VBias));
21926 Or = DAG.getBitcast(MVT::v2f64, Or);
21927 // TODO: Are there any fast-math-flags to propagate here?
21928 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
21929 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
21932 case ISD::FP_ROUND: {
21933 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
21935 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
21936 Results.push_back(V);
21939 case ISD::FP_EXTEND: {
21940 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
21941 // No other ValueType for FP_EXTEND should reach this point.
21942 assert(N->getValueType(0) == MVT::v2f32 &&
21943 "Do not know how to legalize this Node");
21946 case ISD::INTRINSIC_W_CHAIN: {
21947 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
21949 default : llvm_unreachable("Do not know how to custom type "
21950 "legalize this intrinsic operation!");
21951 case Intrinsic::x86_rdtsc:
21952 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
21954 case Intrinsic::x86_rdtscp:
21955 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
21957 case Intrinsic::x86_rdpmc:
21958 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
21961 case ISD::INTRINSIC_WO_CHAIN: {
21962 if (SDValue V = LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), Subtarget, DAG))
21963 Results.push_back(V);
21966 case ISD::READCYCLECOUNTER: {
21967 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
21970 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
21971 EVT T = N->getValueType(0);
21972 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
21973 bool Regs64bit = T == MVT::i128;
21974 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
21975 SDValue cpInL, cpInH;
21976 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
21977 DAG.getConstant(0, dl, HalfT));
21978 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
21979 DAG.getConstant(1, dl, HalfT));
21980 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
21981 Regs64bit ? X86::RAX : X86::EAX,
21983 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
21984 Regs64bit ? X86::RDX : X86::EDX,
21985 cpInH, cpInL.getValue(1));
21986 SDValue swapInL, swapInH;
21987 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
21988 DAG.getConstant(0, dl, HalfT));
21989 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
21990 DAG.getConstant(1, dl, HalfT));
21992 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
21993 swapInH, cpInH.getValue(1));
21994 // If the current function needs the base pointer, RBX,
21995 // we shouldn't use cmpxchg directly.
21996 // Indeed the lowering of that instruction will clobber
21997 // that register and since RBX will be a reserved register
21998 // the register allocator will not make sure its value will
21999 // be properly saved and restored around this live-range.
22000 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
22002 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
22003 unsigned BasePtr = TRI->getBaseRegister();
22004 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
22005 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
22006 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
22007 // ISel prefers the LCMPXCHG64 variant.
22008 // If that assert breaks, that means it is not the case anymore,
22009 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
22010 // not just EBX. This is a matter of accepting i64 input for that
22011 // pseudo, and restoring into the register of the right wide
22012 // in expand pseudo. Everything else should just work.
22013 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
22014 "Saving only half of the RBX");
22015 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
22016 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
22017 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
22018 Regs64bit ? X86::RBX : X86::EBX,
22019 HalfT, swapInH.getValue(1));
22020 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
22022 /*Glue*/ RBXSave.getValue(2)};
22023 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
22026 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
22027 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
22028 Regs64bit ? X86::RBX : X86::EBX, swapInL,
22029 swapInH.getValue(1));
22030 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
22031 swapInL.getValue(1)};
22032 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
22034 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
22035 Regs64bit ? X86::RAX : X86::EAX,
22036 HalfT, Result.getValue(1));
22037 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
22038 Regs64bit ? X86::RDX : X86::EDX,
22039 HalfT, cpOutL.getValue(2));
22040 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
22042 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
22043 MVT::i32, cpOutH.getValue(2));
22045 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
22046 DAG.getConstant(X86::COND_E, dl, MVT::i8), EFLAGS);
22047 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
22049 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
22050 Results.push_back(Success);
22051 Results.push_back(EFLAGS.getValue(1));
22054 case ISD::ATOMIC_SWAP:
22055 case ISD::ATOMIC_LOAD_ADD:
22056 case ISD::ATOMIC_LOAD_SUB:
22057 case ISD::ATOMIC_LOAD_AND:
22058 case ISD::ATOMIC_LOAD_OR:
22059 case ISD::ATOMIC_LOAD_XOR:
22060 case ISD::ATOMIC_LOAD_NAND:
22061 case ISD::ATOMIC_LOAD_MIN:
22062 case ISD::ATOMIC_LOAD_MAX:
22063 case ISD::ATOMIC_LOAD_UMIN:
22064 case ISD::ATOMIC_LOAD_UMAX:
22065 case ISD::ATOMIC_LOAD: {
22066 // Delegate to generic TypeLegalization. Situations we can really handle
22067 // should have already been dealt with by AtomicExpandPass.cpp.
22070 case ISD::BITCAST: {
22071 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
22072 EVT DstVT = N->getValueType(0);
22073 EVT SrcVT = N->getOperand(0)->getValueType(0);
22075 if (SrcVT != MVT::f64 ||
22076 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
22079 unsigned NumElts = DstVT.getVectorNumElements();
22080 EVT SVT = DstVT.getVectorElementType();
22081 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
22082 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
22083 MVT::v2f64, N->getOperand(0));
22084 SDValue ToVecInt = DAG.getBitcast(WiderVT, Expanded);
22086 if (ExperimentalVectorWideningLegalization) {
22087 // If we are legalizing vectors by widening, we already have the desired
22088 // legal vector type, just return it.
22089 Results.push_back(ToVecInt);
22093 SmallVector<SDValue, 8> Elts;
22094 for (unsigned i = 0, e = NumElts; i != e; ++i)
22095 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
22096 ToVecInt, DAG.getIntPtrConstant(i, dl)));
22098 Results.push_back(DAG.getBuildVector(DstVT, dl, Elts));
22103 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
22104 switch ((X86ISD::NodeType)Opcode) {
22105 case X86ISD::FIRST_NUMBER: break;
22106 case X86ISD::BSF: return "X86ISD::BSF";
22107 case X86ISD::BSR: return "X86ISD::BSR";
22108 case X86ISD::SHLD: return "X86ISD::SHLD";
22109 case X86ISD::SHRD: return "X86ISD::SHRD";
22110 case X86ISD::FAND: return "X86ISD::FAND";
22111 case X86ISD::FANDN: return "X86ISD::FANDN";
22112 case X86ISD::FOR: return "X86ISD::FOR";
22113 case X86ISD::FXOR: return "X86ISD::FXOR";
22114 case X86ISD::FILD: return "X86ISD::FILD";
22115 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
22116 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
22117 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
22118 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
22119 case X86ISD::FLD: return "X86ISD::FLD";
22120 case X86ISD::FST: return "X86ISD::FST";
22121 case X86ISD::CALL: return "X86ISD::CALL";
22122 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
22123 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
22124 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
22125 case X86ISD::BT: return "X86ISD::BT";
22126 case X86ISD::CMP: return "X86ISD::CMP";
22127 case X86ISD::COMI: return "X86ISD::COMI";
22128 case X86ISD::UCOMI: return "X86ISD::UCOMI";
22129 case X86ISD::CMPM: return "X86ISD::CMPM";
22130 case X86ISD::CMPMU: return "X86ISD::CMPMU";
22131 case X86ISD::CMPM_RND: return "X86ISD::CMPM_RND";
22132 case X86ISD::SETCC: return "X86ISD::SETCC";
22133 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
22134 case X86ISD::FSETCC: return "X86ISD::FSETCC";
22135 case X86ISD::CMOV: return "X86ISD::CMOV";
22136 case X86ISD::BRCOND: return "X86ISD::BRCOND";
22137 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
22138 case X86ISD::IRET: return "X86ISD::IRET";
22139 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
22140 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
22141 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
22142 case X86ISD::Wrapper: return "X86ISD::Wrapper";
22143 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
22144 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
22145 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
22146 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
22147 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
22148 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
22149 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
22150 case X86ISD::PINSRB: return "X86ISD::PINSRB";
22151 case X86ISD::PINSRW: return "X86ISD::PINSRW";
22152 case X86ISD::MMX_PINSRW: return "X86ISD::MMX_PINSRW";
22153 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
22154 case X86ISD::ANDNP: return "X86ISD::ANDNP";
22155 case X86ISD::BLENDI: return "X86ISD::BLENDI";
22156 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
22157 case X86ISD::ADDUS: return "X86ISD::ADDUS";
22158 case X86ISD::SUBUS: return "X86ISD::SUBUS";
22159 case X86ISD::HADD: return "X86ISD::HADD";
22160 case X86ISD::HSUB: return "X86ISD::HSUB";
22161 case X86ISD::FHADD: return "X86ISD::FHADD";
22162 case X86ISD::FHSUB: return "X86ISD::FHSUB";
22163 case X86ISD::ABS: return "X86ISD::ABS";
22164 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
22165 case X86ISD::FMAX: return "X86ISD::FMAX";
22166 case X86ISD::FMAX_RND: return "X86ISD::FMAX_RND";
22167 case X86ISD::FMIN: return "X86ISD::FMIN";
22168 case X86ISD::FMIN_RND: return "X86ISD::FMIN_RND";
22169 case X86ISD::FMAXC: return "X86ISD::FMAXC";
22170 case X86ISD::FMINC: return "X86ISD::FMINC";
22171 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
22172 case X86ISD::FRSQRTS: return "X86ISD::FRSQRTS";
22173 case X86ISD::FRCP: return "X86ISD::FRCP";
22174 case X86ISD::FRCPS: return "X86ISD::FRCPS";
22175 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
22176 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
22177 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
22178 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
22179 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
22180 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
22181 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
22182 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
22183 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
22184 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
22185 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
22186 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
22187 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
22188 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
22189 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
22190 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
22191 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
22192 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
22193 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
22194 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
22195 case X86ISD::LADD: return "X86ISD::LADD";
22196 case X86ISD::LSUB: return "X86ISD::LSUB";
22197 case X86ISD::LOR: return "X86ISD::LOR";
22198 case X86ISD::LXOR: return "X86ISD::LXOR";
22199 case X86ISD::LAND: return "X86ISD::LAND";
22200 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
22201 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
22202 case X86ISD::VZEXT: return "X86ISD::VZEXT";
22203 case X86ISD::VSEXT: return "X86ISD::VSEXT";
22204 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
22205 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
22206 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
22207 case X86ISD::VINSERT: return "X86ISD::VINSERT";
22208 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
22209 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
22210 case X86ISD::CVTDQ2PD: return "X86ISD::CVTDQ2PD";
22211 case X86ISD::CVTUDQ2PD: return "X86ISD::CVTUDQ2PD";
22212 case X86ISD::CVT2MASK: return "X86ISD::CVT2MASK";
22213 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
22214 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
22215 case X86ISD::VSHL: return "X86ISD::VSHL";
22216 case X86ISD::VSRL: return "X86ISD::VSRL";
22217 case X86ISD::VSRA: return "X86ISD::VSRA";
22218 case X86ISD::VSHLI: return "X86ISD::VSHLI";
22219 case X86ISD::VSRLI: return "X86ISD::VSRLI";
22220 case X86ISD::VSRAI: return "X86ISD::VSRAI";
22221 case X86ISD::VSRAV: return "X86ISD::VSRAV";
22222 case X86ISD::VROTLI: return "X86ISD::VROTLI";
22223 case X86ISD::VROTRI: return "X86ISD::VROTRI";
22224 case X86ISD::VPPERM: return "X86ISD::VPPERM";
22225 case X86ISD::CMPP: return "X86ISD::CMPP";
22226 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
22227 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
22228 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
22229 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
22230 case X86ISD::ADD: return "X86ISD::ADD";
22231 case X86ISD::SUB: return "X86ISD::SUB";
22232 case X86ISD::ADC: return "X86ISD::ADC";
22233 case X86ISD::SBB: return "X86ISD::SBB";
22234 case X86ISD::SMUL: return "X86ISD::SMUL";
22235 case X86ISD::UMUL: return "X86ISD::UMUL";
22236 case X86ISD::SMUL8: return "X86ISD::SMUL8";
22237 case X86ISD::UMUL8: return "X86ISD::UMUL8";
22238 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
22239 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
22240 case X86ISD::INC: return "X86ISD::INC";
22241 case X86ISD::DEC: return "X86ISD::DEC";
22242 case X86ISD::OR: return "X86ISD::OR";
22243 case X86ISD::XOR: return "X86ISD::XOR";
22244 case X86ISD::AND: return "X86ISD::AND";
22245 case X86ISD::BEXTR: return "X86ISD::BEXTR";
22246 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
22247 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
22248 case X86ISD::PTEST: return "X86ISD::PTEST";
22249 case X86ISD::TESTP: return "X86ISD::TESTP";
22250 case X86ISD::TESTM: return "X86ISD::TESTM";
22251 case X86ISD::TESTNM: return "X86ISD::TESTNM";
22252 case X86ISD::KORTEST: return "X86ISD::KORTEST";
22253 case X86ISD::KTEST: return "X86ISD::KTEST";
22254 case X86ISD::PACKSS: return "X86ISD::PACKSS";
22255 case X86ISD::PACKUS: return "X86ISD::PACKUS";
22256 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
22257 case X86ISD::VALIGN: return "X86ISD::VALIGN";
22258 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
22259 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
22260 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
22261 case X86ISD::SHUFP: return "X86ISD::SHUFP";
22262 case X86ISD::SHUF128: return "X86ISD::SHUF128";
22263 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
22264 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
22265 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
22266 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
22267 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
22268 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
22269 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
22270 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
22271 case X86ISD::MOVSD: return "X86ISD::MOVSD";
22272 case X86ISD::MOVSS: return "X86ISD::MOVSS";
22273 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
22274 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
22275 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
22276 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
22277 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
22278 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
22279 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
22280 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
22281 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
22282 case X86ISD::VPERMV: return "X86ISD::VPERMV";
22283 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
22284 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
22285 case X86ISD::VPERMI: return "X86ISD::VPERMI";
22286 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
22287 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
22288 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
22289 case X86ISD::VRANGE: return "X86ISD::VRANGE";
22290 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
22291 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
22292 case X86ISD::PSADBW: return "X86ISD::PSADBW";
22293 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
22294 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
22295 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
22296 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
22297 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
22298 case X86ISD::MFENCE: return "X86ISD::MFENCE";
22299 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
22300 case X86ISD::SAHF: return "X86ISD::SAHF";
22301 case X86ISD::RDRAND: return "X86ISD::RDRAND";
22302 case X86ISD::RDSEED: return "X86ISD::RDSEED";
22303 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
22304 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
22305 case X86ISD::VPROT: return "X86ISD::VPROT";
22306 case X86ISD::VPROTI: return "X86ISD::VPROTI";
22307 case X86ISD::VPSHA: return "X86ISD::VPSHA";
22308 case X86ISD::VPSHL: return "X86ISD::VPSHL";
22309 case X86ISD::VPCOM: return "X86ISD::VPCOM";
22310 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
22311 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
22312 case X86ISD::FMADD: return "X86ISD::FMADD";
22313 case X86ISD::FMSUB: return "X86ISD::FMSUB";
22314 case X86ISD::FNMADD: return "X86ISD::FNMADD";
22315 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
22316 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
22317 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
22318 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
22319 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
22320 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
22321 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
22322 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
22323 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
22324 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
22325 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
22326 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
22327 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
22328 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
22329 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
22330 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
22331 case X86ISD::XTEST: return "X86ISD::XTEST";
22332 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
22333 case X86ISD::EXPAND: return "X86ISD::EXPAND";
22334 case X86ISD::SELECT: return "X86ISD::SELECT";
22335 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
22336 case X86ISD::RCP28: return "X86ISD::RCP28";
22337 case X86ISD::EXP2: return "X86ISD::EXP2";
22338 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
22339 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
22340 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
22341 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
22342 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
22343 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
22344 case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
22345 case X86ISD::SCALEF: return "X86ISD::SCALEF";
22346 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
22347 case X86ISD::ADDS: return "X86ISD::ADDS";
22348 case X86ISD::SUBS: return "X86ISD::SUBS";
22349 case X86ISD::AVG: return "X86ISD::AVG";
22350 case X86ISD::MULHRS: return "X86ISD::MULHRS";
22351 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
22352 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
22353 case X86ISD::FP_TO_SINT_RND: return "X86ISD::FP_TO_SINT_RND";
22354 case X86ISD::FP_TO_UINT_RND: return "X86ISD::FP_TO_UINT_RND";
22355 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
22356 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
22357 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
22358 case X86ISD::SCALAR_FP_TO_SINT_RND: return "X86ISD::SCALAR_FP_TO_SINT_RND";
22359 case X86ISD::SCALAR_FP_TO_UINT_RND: return "X86ISD::SCALAR_FP_TO_UINT_RND";
22364 /// Return true if the addressing mode represented by AM is legal for this
22365 /// target, for a load/store of the specified type.
22366 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
22367 const AddrMode &AM, Type *Ty,
22368 unsigned AS) const {
22369 // X86 supports extremely general addressing modes.
22370 CodeModel::Model M = getTargetMachine().getCodeModel();
22372 // X86 allows a sign-extended 32-bit immediate field as a displacement.
22373 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
22377 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
22379 // If a reference to this global requires an extra load, we can't fold it.
22380 if (isGlobalStubReference(GVFlags))
22383 // If BaseGV requires a register for the PIC base, we cannot also have a
22384 // BaseReg specified.
22385 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
22388 // If lower 4G is not available, then we must use rip-relative addressing.
22389 if ((M != CodeModel::Small || isPositionIndependent()) &&
22390 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
22394 switch (AM.Scale) {
22400 // These scales always work.
22405 // These scales are formed with basereg+scalereg. Only accept if there is
22410 default: // Other stuff never works.
22417 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
22418 unsigned Bits = Ty->getScalarSizeInBits();
22420 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
22421 // particularly cheaper than those without.
22425 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
22426 // variable shifts just as cheap as scalar ones.
22427 if (Subtarget.hasInt256() && (Bits == 32 || Bits == 64))
22430 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
22431 // fully general vector.
22435 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
22436 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
22438 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
22439 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
22440 return NumBits1 > NumBits2;
22443 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
22444 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
22447 if (!isTypeLegal(EVT::getEVT(Ty1)))
22450 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
22452 // Assuming the caller doesn't have a zeroext or signext return parameter,
22453 // truncation all the way down to i1 is valid.
22457 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
22458 return isInt<32>(Imm);
22461 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
22462 // Can also use sub to handle negated immediates.
22463 return isInt<32>(Imm);
22466 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
22467 if (!VT1.isInteger() || !VT2.isInteger())
22469 unsigned NumBits1 = VT1.getSizeInBits();
22470 unsigned NumBits2 = VT2.getSizeInBits();
22471 return NumBits1 > NumBits2;
22474 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
22475 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
22476 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
22479 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
22480 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
22481 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
22484 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
22485 EVT VT1 = Val.getValueType();
22486 if (isZExtFree(VT1, VT2))
22489 if (Val.getOpcode() != ISD::LOAD)
22492 if (!VT1.isSimple() || !VT1.isInteger() ||
22493 !VT2.isSimple() || !VT2.isInteger())
22496 switch (VT1.getSimpleVT().SimpleTy) {
22501 // X86 has 8, 16, and 32-bit zero-extending loads.
22508 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
22511 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
22512 if (!Subtarget.hasAnyFMA())
22515 VT = VT.getScalarType();
22517 if (!VT.isSimple())
22520 switch (VT.getSimpleVT().SimpleTy) {
22531 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
22532 // i16 instructions are longer (0x66 prefix) and potentially slower.
22533 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
22536 /// Targets can use this to indicate that they only support *some*
22537 /// VECTOR_SHUFFLE operations, those with specific masks.
22538 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
22539 /// are assumed to be legal.
22541 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
22543 if (!VT.isSimple())
22546 // Not for i1 vectors
22547 if (VT.getSimpleVT().getScalarType() == MVT::i1)
22550 // Very little shuffling can be done for 64-bit vectors right now.
22551 if (VT.getSimpleVT().getSizeInBits() == 64)
22554 // We only care that the types being shuffled are legal. The lowering can
22555 // handle any possible shuffle mask that results.
22556 return isTypeLegal(VT.getSimpleVT());
22560 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
22562 // Just delegate to the generic legality, clear masks aren't special.
22563 return isShuffleMaskLegal(Mask, VT);
22566 //===----------------------------------------------------------------------===//
22567 // X86 Scheduler Hooks
22568 //===----------------------------------------------------------------------===//
22570 /// Utility function to emit xbegin specifying the start of an RTM region.
22571 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
22572 const TargetInstrInfo *TII) {
22573 DebugLoc DL = MI.getDebugLoc();
22575 const BasicBlock *BB = MBB->getBasicBlock();
22576 MachineFunction::iterator I = ++MBB->getIterator();
22578 // For the v = xbegin(), we generate
22589 MachineBasicBlock *thisMBB = MBB;
22590 MachineFunction *MF = MBB->getParent();
22591 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
22592 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
22593 MF->insert(I, mainMBB);
22594 MF->insert(I, sinkMBB);
22596 // Transfer the remainder of BB and its successor edges to sinkMBB.
22597 sinkMBB->splice(sinkMBB->begin(), MBB,
22598 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
22599 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
22603 // # fallthrough to mainMBB
22604 // # abortion to sinkMBB
22605 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
22606 thisMBB->addSuccessor(mainMBB);
22607 thisMBB->addSuccessor(sinkMBB);
22611 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
22612 mainMBB->addSuccessor(sinkMBB);
22615 // EAX is live into the sinkMBB
22616 sinkMBB->addLiveIn(X86::EAX);
22617 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(TargetOpcode::COPY),
22618 MI.getOperand(0).getReg())
22621 MI.eraseFromParent();
22625 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
22626 // or XMM0_V32I8 in AVX all of this code can be replaced with that
22627 // in the .td file.
22628 static MachineBasicBlock *emitPCMPSTRM(MachineInstr &MI, MachineBasicBlock *BB,
22629 const TargetInstrInfo *TII) {
22631 switch (MI.getOpcode()) {
22632 default: llvm_unreachable("illegal opcode!");
22633 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
22634 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
22635 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
22636 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
22637 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
22638 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
22639 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
22640 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
22643 DebugLoc dl = MI.getDebugLoc();
22644 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
22646 unsigned NumArgs = MI.getNumOperands();
22647 for (unsigned i = 1; i < NumArgs; ++i) {
22648 MachineOperand &Op = MI.getOperand(i);
22649 if (!(Op.isReg() && Op.isImplicit()))
22650 MIB.addOperand(Op);
22652 if (MI.hasOneMemOperand())
22653 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
22655 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
22656 .addReg(X86::XMM0);
22658 MI.eraseFromParent();
22662 // FIXME: Custom handling because TableGen doesn't support multiple implicit
22663 // defs in an instruction pattern
22664 static MachineBasicBlock *emitPCMPSTRI(MachineInstr &MI, MachineBasicBlock *BB,
22665 const TargetInstrInfo *TII) {
22667 switch (MI.getOpcode()) {
22668 default: llvm_unreachable("illegal opcode!");
22669 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
22670 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
22671 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
22672 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
22673 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
22674 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
22675 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
22676 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
22679 DebugLoc dl = MI.getDebugLoc();
22680 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
22682 unsigned NumArgs = MI.getNumOperands(); // remove the results
22683 for (unsigned i = 1; i < NumArgs; ++i) {
22684 MachineOperand &Op = MI.getOperand(i);
22685 if (!(Op.isReg() && Op.isImplicit()))
22686 MIB.addOperand(Op);
22688 if (MI.hasOneMemOperand())
22689 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
22691 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
22694 MI.eraseFromParent();
22698 static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB,
22699 const X86Subtarget &Subtarget) {
22700 DebugLoc dl = MI.getDebugLoc();
22701 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
22703 // insert input VAL into EAX
22704 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
22705 .addReg(MI.getOperand(0).getReg());
22706 // insert zero to ECX
22707 BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
22709 // insert zero to EDX
22710 BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX);
22712 // insert WRPKRU instruction
22713 BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
22715 MI.eraseFromParent(); // The pseudo is gone now.
22719 static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB,
22720 const X86Subtarget &Subtarget) {
22721 DebugLoc dl = MI.getDebugLoc();
22722 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
22724 // insert zero to ECX
22725 BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
22727 // insert RDPKRU instruction
22728 BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
22729 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
22732 MI.eraseFromParent(); // The pseudo is gone now.
22736 static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB,
22737 const X86Subtarget &Subtarget,
22739 DebugLoc dl = MI.getDebugLoc();
22740 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
22741 // Address into RAX/EAX, other two args into ECX, EDX.
22742 unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
22743 unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
22744 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
22745 for (int i = 0; i < X86::AddrNumOperands; ++i)
22746 MIB.addOperand(MI.getOperand(i));
22748 unsigned ValOps = X86::AddrNumOperands;
22749 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
22750 .addReg(MI.getOperand(ValOps).getReg());
22751 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
22752 .addReg(MI.getOperand(ValOps + 1).getReg());
22754 // The instruction doesn't actually take any operands though.
22755 BuildMI(*BB, MI, dl, TII->get(Opc));
22757 MI.eraseFromParent(); // The pseudo is gone now.
22761 MachineBasicBlock *
22762 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
22763 MachineBasicBlock *MBB) const {
22764 // Emit va_arg instruction on X86-64.
22766 // Operands to this pseudo-instruction:
22767 // 0 ) Output : destination address (reg)
22768 // 1-5) Input : va_list address (addr, i64mem)
22769 // 6 ) ArgSize : Size (in bytes) of vararg type
22770 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
22771 // 8 ) Align : Alignment of type
22772 // 9 ) EFLAGS (implicit-def)
22774 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
22775 static_assert(X86::AddrNumOperands == 5,
22776 "VAARG_64 assumes 5 address operands");
22778 unsigned DestReg = MI.getOperand(0).getReg();
22779 MachineOperand &Base = MI.getOperand(1);
22780 MachineOperand &Scale = MI.getOperand(2);
22781 MachineOperand &Index = MI.getOperand(3);
22782 MachineOperand &Disp = MI.getOperand(4);
22783 MachineOperand &Segment = MI.getOperand(5);
22784 unsigned ArgSize = MI.getOperand(6).getImm();
22785 unsigned ArgMode = MI.getOperand(7).getImm();
22786 unsigned Align = MI.getOperand(8).getImm();
22788 // Memory Reference
22789 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
22790 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
22791 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
22793 // Machine Information
22794 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
22795 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
22796 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
22797 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
22798 DebugLoc DL = MI.getDebugLoc();
22800 // struct va_list {
22803 // i64 overflow_area (address)
22804 // i64 reg_save_area (address)
22806 // sizeof(va_list) = 24
22807 // alignment(va_list) = 8
22809 unsigned TotalNumIntRegs = 6;
22810 unsigned TotalNumXMMRegs = 8;
22811 bool UseGPOffset = (ArgMode == 1);
22812 bool UseFPOffset = (ArgMode == 2);
22813 unsigned MaxOffset = TotalNumIntRegs * 8 +
22814 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
22816 /* Align ArgSize to a multiple of 8 */
22817 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
22818 bool NeedsAlign = (Align > 8);
22820 MachineBasicBlock *thisMBB = MBB;
22821 MachineBasicBlock *overflowMBB;
22822 MachineBasicBlock *offsetMBB;
22823 MachineBasicBlock *endMBB;
22825 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
22826 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
22827 unsigned OffsetReg = 0;
22829 if (!UseGPOffset && !UseFPOffset) {
22830 // If we only pull from the overflow region, we don't create a branch.
22831 // We don't need to alter control flow.
22832 OffsetDestReg = 0; // unused
22833 OverflowDestReg = DestReg;
22835 offsetMBB = nullptr;
22836 overflowMBB = thisMBB;
22839 // First emit code to check if gp_offset (or fp_offset) is below the bound.
22840 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
22841 // If not, pull from overflow_area. (branch to overflowMBB)
22846 // offsetMBB overflowMBB
22851 // Registers for the PHI in endMBB
22852 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
22853 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
22855 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
22856 MachineFunction *MF = MBB->getParent();
22857 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
22858 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
22859 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
22861 MachineFunction::iterator MBBIter = ++MBB->getIterator();
22863 // Insert the new basic blocks
22864 MF->insert(MBBIter, offsetMBB);
22865 MF->insert(MBBIter, overflowMBB);
22866 MF->insert(MBBIter, endMBB);
22868 // Transfer the remainder of MBB and its successor edges to endMBB.
22869 endMBB->splice(endMBB->begin(), thisMBB,
22870 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
22871 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
22873 // Make offsetMBB and overflowMBB successors of thisMBB
22874 thisMBB->addSuccessor(offsetMBB);
22875 thisMBB->addSuccessor(overflowMBB);
22877 // endMBB is a successor of both offsetMBB and overflowMBB
22878 offsetMBB->addSuccessor(endMBB);
22879 overflowMBB->addSuccessor(endMBB);
22881 // Load the offset value into a register
22882 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
22883 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
22887 .addDisp(Disp, UseFPOffset ? 4 : 0)
22888 .addOperand(Segment)
22889 .setMemRefs(MMOBegin, MMOEnd);
22891 // Check if there is enough room left to pull this argument.
22892 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
22894 .addImm(MaxOffset + 8 - ArgSizeA8);
22896 // Branch to "overflowMBB" if offset >= max
22897 // Fall through to "offsetMBB" otherwise
22898 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
22899 .addMBB(overflowMBB);
22902 // In offsetMBB, emit code to use the reg_save_area.
22904 assert(OffsetReg != 0);
22906 // Read the reg_save_area address.
22907 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
22908 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
22913 .addOperand(Segment)
22914 .setMemRefs(MMOBegin, MMOEnd);
22916 // Zero-extend the offset
22917 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
22918 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
22921 .addImm(X86::sub_32bit);
22923 // Add the offset to the reg_save_area to get the final address.
22924 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
22925 .addReg(OffsetReg64)
22926 .addReg(RegSaveReg);
22928 // Compute the offset for the next argument
22929 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
22930 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
22932 .addImm(UseFPOffset ? 16 : 8);
22934 // Store it back into the va_list.
22935 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
22939 .addDisp(Disp, UseFPOffset ? 4 : 0)
22940 .addOperand(Segment)
22941 .addReg(NextOffsetReg)
22942 .setMemRefs(MMOBegin, MMOEnd);
22945 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
22950 // Emit code to use overflow area
22953 // Load the overflow_area address into a register.
22954 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
22955 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
22960 .addOperand(Segment)
22961 .setMemRefs(MMOBegin, MMOEnd);
22963 // If we need to align it, do so. Otherwise, just copy the address
22964 // to OverflowDestReg.
22966 // Align the overflow address
22967 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
22968 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
22970 // aligned_addr = (addr + (align-1)) & ~(align-1)
22971 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
22972 .addReg(OverflowAddrReg)
22975 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
22977 .addImm(~(uint64_t)(Align-1));
22979 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
22980 .addReg(OverflowAddrReg);
22983 // Compute the next overflow address after this argument.
22984 // (the overflow address should be kept 8-byte aligned)
22985 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
22986 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
22987 .addReg(OverflowDestReg)
22988 .addImm(ArgSizeA8);
22990 // Store the new overflow address.
22991 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
22996 .addOperand(Segment)
22997 .addReg(NextAddrReg)
22998 .setMemRefs(MMOBegin, MMOEnd);
23000 // If we branched, emit the PHI to the front of endMBB.
23002 BuildMI(*endMBB, endMBB->begin(), DL,
23003 TII->get(X86::PHI), DestReg)
23004 .addReg(OffsetDestReg).addMBB(offsetMBB)
23005 .addReg(OverflowDestReg).addMBB(overflowMBB);
23008 // Erase the pseudo instruction
23009 MI.eraseFromParent();
23014 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
23015 MachineInstr &MI, MachineBasicBlock *MBB) const {
23016 // Emit code to save XMM registers to the stack. The ABI says that the
23017 // number of registers to save is given in %al, so it's theoretically
23018 // possible to do an indirect jump trick to avoid saving all of them,
23019 // however this code takes a simpler approach and just executes all
23020 // of the stores if %al is non-zero. It's less code, and it's probably
23021 // easier on the hardware branch predictor, and stores aren't all that
23022 // expensive anyway.
23024 // Create the new basic blocks. One block contains all the XMM stores,
23025 // and one block is the final destination regardless of whether any
23026 // stores were performed.
23027 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
23028 MachineFunction *F = MBB->getParent();
23029 MachineFunction::iterator MBBIter = ++MBB->getIterator();
23030 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
23031 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
23032 F->insert(MBBIter, XMMSaveMBB);
23033 F->insert(MBBIter, EndMBB);
23035 // Transfer the remainder of MBB and its successor edges to EndMBB.
23036 EndMBB->splice(EndMBB->begin(), MBB,
23037 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
23038 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
23040 // The original block will now fall through to the XMM save block.
23041 MBB->addSuccessor(XMMSaveMBB);
23042 // The XMMSaveMBB will fall through to the end block.
23043 XMMSaveMBB->addSuccessor(EndMBB);
23045 // Now add the instructions.
23046 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
23047 DebugLoc DL = MI.getDebugLoc();
23049 unsigned CountReg = MI.getOperand(0).getReg();
23050 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
23051 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
23053 if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
23054 // If %al is 0, branch around the XMM save block.
23055 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
23056 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
23057 MBB->addSuccessor(EndMBB);
23060 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
23061 // that was just emitted, but clearly shouldn't be "saved".
23062 assert((MI.getNumOperands() <= 3 ||
23063 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
23064 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
23065 "Expected last argument to be EFLAGS");
23066 unsigned MOVOpc = Subtarget.hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
23067 // In the XMM save block, save all the XMM argument registers.
23068 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
23069 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
23070 MachineMemOperand *MMO = F->getMachineMemOperand(
23071 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
23072 MachineMemOperand::MOStore,
23073 /*Size=*/16, /*Align=*/16);
23074 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
23075 .addFrameIndex(RegSaveFrameIndex)
23076 .addImm(/*Scale=*/1)
23077 .addReg(/*IndexReg=*/0)
23078 .addImm(/*Disp=*/Offset)
23079 .addReg(/*Segment=*/0)
23080 .addReg(MI.getOperand(i).getReg())
23081 .addMemOperand(MMO);
23084 MI.eraseFromParent(); // The pseudo instruction is gone now.
23089 // The EFLAGS operand of SelectItr might be missing a kill marker
23090 // because there were multiple uses of EFLAGS, and ISel didn't know
23091 // which to mark. Figure out whether SelectItr should have had a
23092 // kill marker, and set it if it should. Returns the correct kill
23094 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
23095 MachineBasicBlock* BB,
23096 const TargetRegisterInfo* TRI) {
23097 // Scan forward through BB for a use/def of EFLAGS.
23098 MachineBasicBlock::iterator miI(std::next(SelectItr));
23099 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
23100 const MachineInstr& mi = *miI;
23101 if (mi.readsRegister(X86::EFLAGS))
23103 if (mi.definesRegister(X86::EFLAGS))
23104 break; // Should have kill-flag - update below.
23107 // If we hit the end of the block, check whether EFLAGS is live into a
23109 if (miI == BB->end()) {
23110 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
23111 sEnd = BB->succ_end();
23112 sItr != sEnd; ++sItr) {
23113 MachineBasicBlock* succ = *sItr;
23114 if (succ->isLiveIn(X86::EFLAGS))
23119 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
23120 // out. SelectMI should have a kill flag on EFLAGS.
23121 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
23125 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
23126 // together with other CMOV pseudo-opcodes into a single basic-block with
23127 // conditional jump around it.
23128 static bool isCMOVPseudo(MachineInstr &MI) {
23129 switch (MI.getOpcode()) {
23130 case X86::CMOV_FR32:
23131 case X86::CMOV_FR64:
23132 case X86::CMOV_GR8:
23133 case X86::CMOV_GR16:
23134 case X86::CMOV_GR32:
23135 case X86::CMOV_RFP32:
23136 case X86::CMOV_RFP64:
23137 case X86::CMOV_RFP80:
23138 case X86::CMOV_V2F64:
23139 case X86::CMOV_V2I64:
23140 case X86::CMOV_V4F32:
23141 case X86::CMOV_V4F64:
23142 case X86::CMOV_V4I64:
23143 case X86::CMOV_V16F32:
23144 case X86::CMOV_V8F32:
23145 case X86::CMOV_V8F64:
23146 case X86::CMOV_V8I64:
23147 case X86::CMOV_V8I1:
23148 case X86::CMOV_V16I1:
23149 case X86::CMOV_V32I1:
23150 case X86::CMOV_V64I1:
23158 MachineBasicBlock *
23159 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
23160 MachineBasicBlock *BB) const {
23161 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
23162 DebugLoc DL = MI.getDebugLoc();
23164 // To "insert" a SELECT_CC instruction, we actually have to insert the
23165 // diamond control-flow pattern. The incoming instruction knows the
23166 // destination vreg to set, the condition code register to branch on, the
23167 // true/false values to select between, and a branch opcode to use.
23168 const BasicBlock *LLVM_BB = BB->getBasicBlock();
23169 MachineFunction::iterator It = ++BB->getIterator();
23174 // cmpTY ccX, r1, r2
23176 // fallthrough --> copy0MBB
23177 MachineBasicBlock *thisMBB = BB;
23178 MachineFunction *F = BB->getParent();
23180 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
23181 // as described above, by inserting a BB, and then making a PHI at the join
23182 // point to select the true and false operands of the CMOV in the PHI.
23184 // The code also handles two different cases of multiple CMOV opcodes
23188 // In this case, there are multiple CMOVs in a row, all which are based on
23189 // the same condition setting (or the exact opposite condition setting).
23190 // In this case we can lower all the CMOVs using a single inserted BB, and
23191 // then make a number of PHIs at the join point to model the CMOVs. The only
23192 // trickiness here, is that in a case like:
23194 // t2 = CMOV cond1 t1, f1
23195 // t3 = CMOV cond1 t2, f2
23197 // when rewriting this into PHIs, we have to perform some renaming on the
23198 // temps since you cannot have a PHI operand refer to a PHI result earlier
23199 // in the same block. The "simple" but wrong lowering would be:
23201 // t2 = PHI t1(BB1), f1(BB2)
23202 // t3 = PHI t2(BB1), f2(BB2)
23204 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
23205 // renaming is to note that on the path through BB1, t2 is really just a
23206 // copy of t1, and do that renaming, properly generating:
23208 // t2 = PHI t1(BB1), f1(BB2)
23209 // t3 = PHI t1(BB1), f2(BB2)
23211 // Case 2, we lower cascaded CMOVs such as
23213 // (CMOV (CMOV F, T, cc1), T, cc2)
23215 // to two successives branches. For that, we look for another CMOV as the
23216 // following instruction.
23218 // Without this, we would add a PHI between the two jumps, which ends up
23219 // creating a few copies all around. For instance, for
23221 // (sitofp (zext (fcmp une)))
23223 // we would generate:
23225 // ucomiss %xmm1, %xmm0
23226 // movss <1.0f>, %xmm0
23227 // movaps %xmm0, %xmm1
23229 // xorps %xmm1, %xmm1
23232 // movaps %xmm1, %xmm0
23236 // because this custom-inserter would have generated:
23248 // A: X = ...; Y = ...
23250 // C: Z = PHI [X, A], [Y, B]
23252 // E: PHI [X, C], [Z, D]
23254 // If we lower both CMOVs in a single step, we can instead generate:
23266 // A: X = ...; Y = ...
23268 // E: PHI [X, A], [X, C], [Y, D]
23270 // Which, in our sitofp/fcmp example, gives us something like:
23272 // ucomiss %xmm1, %xmm0
23273 // movss <1.0f>, %xmm0
23276 // xorps %xmm0, %xmm0
23280 MachineInstr *CascadedCMOV = nullptr;
23281 MachineInstr *LastCMOV = &MI;
23282 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
23283 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
23284 MachineBasicBlock::iterator NextMIIt =
23285 std::next(MachineBasicBlock::iterator(MI));
23287 // Check for case 1, where there are multiple CMOVs with the same condition
23288 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
23289 // number of jumps the most.
23291 if (isCMOVPseudo(MI)) {
23292 // See if we have a string of CMOVS with the same condition.
23293 while (NextMIIt != BB->end() && isCMOVPseudo(*NextMIIt) &&
23294 (NextMIIt->getOperand(3).getImm() == CC ||
23295 NextMIIt->getOperand(3).getImm() == OppCC)) {
23296 LastCMOV = &*NextMIIt;
23301 // This checks for case 2, but only do this if we didn't already find
23302 // case 1, as indicated by LastCMOV == MI.
23303 if (LastCMOV == &MI && NextMIIt != BB->end() &&
23304 NextMIIt->getOpcode() == MI.getOpcode() &&
23305 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
23306 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
23307 NextMIIt->getOperand(1).isKill()) {
23308 CascadedCMOV = &*NextMIIt;
23311 MachineBasicBlock *jcc1MBB = nullptr;
23313 // If we have a cascaded CMOV, we lower it to two successive branches to
23314 // the same block. EFLAGS is used by both, so mark it as live in the second.
23315 if (CascadedCMOV) {
23316 jcc1MBB = F->CreateMachineBasicBlock(LLVM_BB);
23317 F->insert(It, jcc1MBB);
23318 jcc1MBB->addLiveIn(X86::EFLAGS);
23321 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
23322 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
23323 F->insert(It, copy0MBB);
23324 F->insert(It, sinkMBB);
23326 // If the EFLAGS register isn't dead in the terminator, then claim that it's
23327 // live into the sink and copy blocks.
23328 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
23330 MachineInstr *LastEFLAGSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
23331 if (!LastEFLAGSUser->killsRegister(X86::EFLAGS) &&
23332 !checkAndUpdateEFLAGSKill(LastEFLAGSUser, BB, TRI)) {
23333 copy0MBB->addLiveIn(X86::EFLAGS);
23334 sinkMBB->addLiveIn(X86::EFLAGS);
23337 // Transfer the remainder of BB and its successor edges to sinkMBB.
23338 sinkMBB->splice(sinkMBB->begin(), BB,
23339 std::next(MachineBasicBlock::iterator(LastCMOV)), BB->end());
23340 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
23342 // Add the true and fallthrough blocks as its successors.
23343 if (CascadedCMOV) {
23344 // The fallthrough block may be jcc1MBB, if we have a cascaded CMOV.
23345 BB->addSuccessor(jcc1MBB);
23347 // In that case, jcc1MBB will itself fallthrough the copy0MBB, and
23348 // jump to the sinkMBB.
23349 jcc1MBB->addSuccessor(copy0MBB);
23350 jcc1MBB->addSuccessor(sinkMBB);
23352 BB->addSuccessor(copy0MBB);
23355 // The true block target of the first (or only) branch is always sinkMBB.
23356 BB->addSuccessor(sinkMBB);
23358 // Create the conditional branch instruction.
23359 unsigned Opc = X86::GetCondBranchFromCond(CC);
23360 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
23362 if (CascadedCMOV) {
23363 unsigned Opc2 = X86::GetCondBranchFromCond(
23364 (X86::CondCode)CascadedCMOV->getOperand(3).getImm());
23365 BuildMI(jcc1MBB, DL, TII->get(Opc2)).addMBB(sinkMBB);
23369 // %FalseValue = ...
23370 // # fallthrough to sinkMBB
23371 copy0MBB->addSuccessor(sinkMBB);
23374 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
23376 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
23377 MachineBasicBlock::iterator MIItEnd =
23378 std::next(MachineBasicBlock::iterator(LastCMOV));
23379 MachineBasicBlock::iterator SinkInsertionPoint = sinkMBB->begin();
23380 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
23381 MachineInstrBuilder MIB;
23383 // As we are creating the PHIs, we have to be careful if there is more than
23384 // one. Later CMOVs may reference the results of earlier CMOVs, but later
23385 // PHIs have to reference the individual true/false inputs from earlier PHIs.
23386 // That also means that PHI construction must work forward from earlier to
23387 // later, and that the code must maintain a mapping from earlier PHI's
23388 // destination registers, and the registers that went into the PHI.
23390 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
23391 unsigned DestReg = MIIt->getOperand(0).getReg();
23392 unsigned Op1Reg = MIIt->getOperand(1).getReg();
23393 unsigned Op2Reg = MIIt->getOperand(2).getReg();
23395 // If this CMOV we are generating is the opposite condition from
23396 // the jump we generated, then we have to swap the operands for the
23397 // PHI that is going to be generated.
23398 if (MIIt->getOperand(3).getImm() == OppCC)
23399 std::swap(Op1Reg, Op2Reg);
23401 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
23402 Op1Reg = RegRewriteTable[Op1Reg].first;
23404 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
23405 Op2Reg = RegRewriteTable[Op2Reg].second;
23407 MIB = BuildMI(*sinkMBB, SinkInsertionPoint, DL,
23408 TII->get(X86::PHI), DestReg)
23409 .addReg(Op1Reg).addMBB(copy0MBB)
23410 .addReg(Op2Reg).addMBB(thisMBB);
23412 // Add this PHI to the rewrite table.
23413 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
23416 // If we have a cascaded CMOV, the second Jcc provides the same incoming
23417 // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
23418 if (CascadedCMOV) {
23419 MIB.addReg(MI.getOperand(2).getReg()).addMBB(jcc1MBB);
23420 // Copy the PHI result to the register defined by the second CMOV.
23421 BuildMI(*sinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
23422 DL, TII->get(TargetOpcode::COPY),
23423 CascadedCMOV->getOperand(0).getReg())
23424 .addReg(MI.getOperand(0).getReg());
23425 CascadedCMOV->eraseFromParent();
23428 // Now remove the CMOV(s).
23429 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; )
23430 (MIIt++)->eraseFromParent();
23435 MachineBasicBlock *
23436 X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI,
23437 MachineBasicBlock *BB) const {
23438 // Combine the following atomic floating-point modification pattern:
23439 // a.store(reg OP a.load(acquire), release)
23440 // Transform them into:
23441 // OPss (%gpr), %xmm
23442 // movss %xmm, (%gpr)
23443 // Or sd equivalent for 64-bit operations.
23445 switch (MI.getOpcode()) {
23446 default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
23447 case X86::RELEASE_FADD32mr:
23448 FOp = X86::ADDSSrm;
23449 MOp = X86::MOVSSmr;
23451 case X86::RELEASE_FADD64mr:
23452 FOp = X86::ADDSDrm;
23453 MOp = X86::MOVSDmr;
23456 const X86InstrInfo *TII = Subtarget.getInstrInfo();
23457 DebugLoc DL = MI.getDebugLoc();
23458 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
23459 unsigned ValOpIdx = X86::AddrNumOperands;
23460 unsigned VSrc = MI.getOperand(ValOpIdx).getReg();
23461 MachineInstrBuilder MIB =
23462 BuildMI(*BB, MI, DL, TII->get(FOp),
23463 MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
23465 for (int i = 0; i < X86::AddrNumOperands; ++i) {
23466 MachineOperand &Operand = MI.getOperand(i);
23467 // Clear any kill flags on register operands as we'll create a second
23468 // instruction using the same address operands.
23469 if (Operand.isReg())
23470 Operand.setIsKill(false);
23471 MIB.addOperand(Operand);
23473 MachineInstr *FOpMI = MIB;
23474 MIB = BuildMI(*BB, MI, DL, TII->get(MOp));
23475 for (int i = 0; i < X86::AddrNumOperands; ++i)
23476 MIB.addOperand(MI.getOperand(i));
23477 MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill);
23478 MI.eraseFromParent(); // The pseudo instruction is gone now.
23482 MachineBasicBlock *
23483 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
23484 MachineBasicBlock *BB) const {
23485 MachineFunction *MF = BB->getParent();
23486 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
23487 DebugLoc DL = MI.getDebugLoc();
23488 const BasicBlock *LLVM_BB = BB->getBasicBlock();
23490 assert(MF->shouldSplitStack());
23492 const bool Is64Bit = Subtarget.is64Bit();
23493 const bool IsLP64 = Subtarget.isTarget64BitLP64();
23495 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
23496 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
23499 // ... [Till the alloca]
23500 // If stacklet is not large enough, jump to mallocMBB
23503 // Allocate by subtracting from RSP
23504 // Jump to continueMBB
23507 // Allocate by call to runtime
23511 // [rest of original BB]
23514 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
23515 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
23516 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
23518 MachineRegisterInfo &MRI = MF->getRegInfo();
23519 const TargetRegisterClass *AddrRegClass =
23520 getRegClassFor(getPointerTy(MF->getDataLayout()));
23522 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
23523 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
23524 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
23525 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
23526 sizeVReg = MI.getOperand(1).getReg(),
23528 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
23530 MachineFunction::iterator MBBIter = ++BB->getIterator();
23532 MF->insert(MBBIter, bumpMBB);
23533 MF->insert(MBBIter, mallocMBB);
23534 MF->insert(MBBIter, continueMBB);
23536 continueMBB->splice(continueMBB->begin(), BB,
23537 std::next(MachineBasicBlock::iterator(MI)), BB->end());
23538 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
23540 // Add code to the main basic block to check if the stack limit has been hit,
23541 // and if so, jump to mallocMBB otherwise to bumpMBB.
23542 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
23543 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
23544 .addReg(tmpSPVReg).addReg(sizeVReg);
23545 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
23546 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
23547 .addReg(SPLimitVReg);
23548 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
23550 // bumpMBB simply decreases the stack pointer, since we know the current
23551 // stacklet has enough space.
23552 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
23553 .addReg(SPLimitVReg);
23554 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
23555 .addReg(SPLimitVReg);
23556 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
23558 // Calls into a routine in libgcc to allocate more space from the heap.
23559 const uint32_t *RegMask =
23560 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
23562 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
23564 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
23565 .addExternalSymbol("__morestack_allocate_stack_space")
23566 .addRegMask(RegMask)
23567 .addReg(X86::RDI, RegState::Implicit)
23568 .addReg(X86::RAX, RegState::ImplicitDefine);
23569 } else if (Is64Bit) {
23570 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
23572 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
23573 .addExternalSymbol("__morestack_allocate_stack_space")
23574 .addRegMask(RegMask)
23575 .addReg(X86::EDI, RegState::Implicit)
23576 .addReg(X86::EAX, RegState::ImplicitDefine);
23578 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
23580 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
23581 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
23582 .addExternalSymbol("__morestack_allocate_stack_space")
23583 .addRegMask(RegMask)
23584 .addReg(X86::EAX, RegState::ImplicitDefine);
23588 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
23591 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
23592 .addReg(IsLP64 ? X86::RAX : X86::EAX);
23593 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
23595 // Set up the CFG correctly.
23596 BB->addSuccessor(bumpMBB);
23597 BB->addSuccessor(mallocMBB);
23598 mallocMBB->addSuccessor(continueMBB);
23599 bumpMBB->addSuccessor(continueMBB);
23601 // Take care of the PHI nodes.
23602 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
23603 MI.getOperand(0).getReg())
23604 .addReg(mallocPtrVReg)
23606 .addReg(bumpSPPtrVReg)
23609 // Delete the original pseudo instruction.
23610 MI.eraseFromParent();
23613 return continueMBB;
23616 MachineBasicBlock *
23617 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
23618 MachineBasicBlock *BB) const {
23619 MachineFunction *MF = BB->getParent();
23620 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
23621 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
23622 DebugLoc DL = MI.getDebugLoc();
23624 assert(!isAsynchronousEHPersonality(
23625 classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
23626 "SEH does not use catchret!");
23628 // Only 32-bit EH needs to worry about manually restoring stack pointers.
23629 if (!Subtarget.is32Bit())
23632 // C++ EH creates a new target block to hold the restore code, and wires up
23633 // the new block to the return destination with a normal JMP_4.
23634 MachineBasicBlock *RestoreMBB =
23635 MF->CreateMachineBasicBlock(BB->getBasicBlock());
23636 assert(BB->succ_size() == 1);
23637 MF->insert(std::next(BB->getIterator()), RestoreMBB);
23638 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
23639 BB->addSuccessor(RestoreMBB);
23640 MI.getOperand(0).setMBB(RestoreMBB);
23642 auto RestoreMBBI = RestoreMBB->begin();
23643 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
23644 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
23648 MachineBasicBlock *
23649 X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
23650 MachineBasicBlock *BB) const {
23651 MachineFunction *MF = BB->getParent();
23652 const Constant *PerFn = MF->getFunction()->getPersonalityFn();
23653 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
23654 // Only 32-bit SEH requires special handling for catchpad.
23655 if (IsSEH && Subtarget.is32Bit()) {
23656 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
23657 DebugLoc DL = MI.getDebugLoc();
23658 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
23660 MI.eraseFromParent();
23664 MachineBasicBlock *
23665 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
23666 MachineBasicBlock *BB) const {
23667 // So, here we replace TLSADDR with the sequence:
23668 // adjust_stackdown -> TLSADDR -> adjust_stackup.
23669 // We need this because TLSADDR is lowered into calls
23670 // inside MC, therefore without the two markers shrink-wrapping
23671 // may push the prologue/epilogue pass them.
23672 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
23673 DebugLoc DL = MI.getDebugLoc();
23674 MachineFunction &MF = *BB->getParent();
23676 // Emit CALLSEQ_START right before the instruction.
23677 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
23678 MachineInstrBuilder CallseqStart =
23679 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0);
23680 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
23682 // Emit CALLSEQ_END right after the instruction.
23683 // We don't call erase from parent because we want to keep the
23684 // original instruction around.
23685 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
23686 MachineInstrBuilder CallseqEnd =
23687 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
23688 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
23693 MachineBasicBlock *
23694 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
23695 MachineBasicBlock *BB) const {
23696 // This is pretty easy. We're taking the value that we received from
23697 // our load from the relocation, sticking it in either RDI (x86-64)
23698 // or EAX and doing an indirect call. The return value will then
23699 // be in the normal return register.
23700 MachineFunction *F = BB->getParent();
23701 const X86InstrInfo *TII = Subtarget.getInstrInfo();
23702 DebugLoc DL = MI.getDebugLoc();
23704 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
23705 assert(MI.getOperand(3).isGlobal() && "This should be a global");
23707 // Get a register mask for the lowered call.
23708 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
23709 // proper register mask.
23710 const uint32_t *RegMask =
23711 Subtarget.is64Bit() ?
23712 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
23713 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
23714 if (Subtarget.is64Bit()) {
23715 MachineInstrBuilder MIB =
23716 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
23720 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
23721 MI.getOperand(3).getTargetFlags())
23723 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
23724 addDirectMem(MIB, X86::RDI);
23725 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
23726 } else if (!isPositionIndependent()) {
23727 MachineInstrBuilder MIB =
23728 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
23732 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
23733 MI.getOperand(3).getTargetFlags())
23735 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
23736 addDirectMem(MIB, X86::EAX);
23737 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
23739 MachineInstrBuilder MIB =
23740 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
23741 .addReg(TII->getGlobalBaseReg(F))
23744 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
23745 MI.getOperand(3).getTargetFlags())
23747 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
23748 addDirectMem(MIB, X86::EAX);
23749 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
23752 MI.eraseFromParent(); // The pseudo instruction is gone now.
23756 MachineBasicBlock *
23757 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
23758 MachineBasicBlock *MBB) const {
23759 DebugLoc DL = MI.getDebugLoc();
23760 MachineFunction *MF = MBB->getParent();
23761 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
23762 MachineRegisterInfo &MRI = MF->getRegInfo();
23764 const BasicBlock *BB = MBB->getBasicBlock();
23765 MachineFunction::iterator I = ++MBB->getIterator();
23767 // Memory Reference
23768 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
23769 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
23772 unsigned MemOpndSlot = 0;
23774 unsigned CurOp = 0;
23776 DstReg = MI.getOperand(CurOp++).getReg();
23777 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
23778 assert(RC->hasType(MVT::i32) && "Invalid destination!");
23779 unsigned mainDstReg = MRI.createVirtualRegister(RC);
23780 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
23782 MemOpndSlot = CurOp;
23784 MVT PVT = getPointerTy(MF->getDataLayout());
23785 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
23786 "Invalid Pointer Size!");
23788 // For v = setjmp(buf), we generate
23791 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
23792 // SjLjSetup restoreMBB
23798 // v = phi(main, restore)
23801 // if base pointer being used, load it from frame
23804 MachineBasicBlock *thisMBB = MBB;
23805 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
23806 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
23807 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
23808 MF->insert(I, mainMBB);
23809 MF->insert(I, sinkMBB);
23810 MF->push_back(restoreMBB);
23811 restoreMBB->setHasAddressTaken();
23813 MachineInstrBuilder MIB;
23815 // Transfer the remainder of BB and its successor edges to sinkMBB.
23816 sinkMBB->splice(sinkMBB->begin(), MBB,
23817 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
23818 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
23821 unsigned PtrStoreOpc = 0;
23822 unsigned LabelReg = 0;
23823 const int64_t LabelOffset = 1 * PVT.getStoreSize();
23824 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
23825 !isPositionIndependent();
23827 // Prepare IP either in reg or imm.
23828 if (!UseImmLabel) {
23829 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
23830 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
23831 LabelReg = MRI.createVirtualRegister(PtrRC);
23832 if (Subtarget.is64Bit()) {
23833 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
23837 .addMBB(restoreMBB)
23840 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
23841 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
23842 .addReg(XII->getGlobalBaseReg(MF))
23845 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
23849 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
23851 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
23852 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
23853 if (i == X86::AddrDisp)
23854 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
23856 MIB.addOperand(MI.getOperand(MemOpndSlot + i));
23859 MIB.addReg(LabelReg);
23861 MIB.addMBB(restoreMBB);
23862 MIB.setMemRefs(MMOBegin, MMOEnd);
23864 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
23865 .addMBB(restoreMBB);
23867 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23868 MIB.addRegMask(RegInfo->getNoPreservedMask());
23869 thisMBB->addSuccessor(mainMBB);
23870 thisMBB->addSuccessor(restoreMBB);
23874 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
23875 mainMBB->addSuccessor(sinkMBB);
23878 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
23879 TII->get(X86::PHI), DstReg)
23880 .addReg(mainDstReg).addMBB(mainMBB)
23881 .addReg(restoreDstReg).addMBB(restoreMBB);
23884 if (RegInfo->hasBasePointer(*MF)) {
23885 const bool Uses64BitFramePtr =
23886 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
23887 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
23888 X86FI->setRestoreBasePointer(MF);
23889 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
23890 unsigned BasePtr = RegInfo->getBaseRegister();
23891 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
23892 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
23893 FramePtr, true, X86FI->getRestoreBasePointerOffset())
23894 .setMIFlag(MachineInstr::FrameSetup);
23896 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
23897 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
23898 restoreMBB->addSuccessor(sinkMBB);
23900 MI.eraseFromParent();
23904 MachineBasicBlock *
23905 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
23906 MachineBasicBlock *MBB) const {
23907 DebugLoc DL = MI.getDebugLoc();
23908 MachineFunction *MF = MBB->getParent();
23909 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
23910 MachineRegisterInfo &MRI = MF->getRegInfo();
23912 // Memory Reference
23913 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
23914 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
23916 MVT PVT = getPointerTy(MF->getDataLayout());
23917 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
23918 "Invalid Pointer Size!");
23920 const TargetRegisterClass *RC =
23921 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
23922 unsigned Tmp = MRI.createVirtualRegister(RC);
23923 // Since FP is only updated here but NOT referenced, it's treated as GPR.
23924 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23925 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
23926 unsigned SP = RegInfo->getStackRegister();
23928 MachineInstrBuilder MIB;
23930 const int64_t LabelOffset = 1 * PVT.getStoreSize();
23931 const int64_t SPOffset = 2 * PVT.getStoreSize();
23933 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
23934 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
23937 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
23938 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
23939 MIB.addOperand(MI.getOperand(i));
23940 MIB.setMemRefs(MMOBegin, MMOEnd);
23942 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
23943 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
23944 if (i == X86::AddrDisp)
23945 MIB.addDisp(MI.getOperand(i), LabelOffset);
23947 MIB.addOperand(MI.getOperand(i));
23949 MIB.setMemRefs(MMOBegin, MMOEnd);
23951 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
23952 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
23953 if (i == X86::AddrDisp)
23954 MIB.addDisp(MI.getOperand(i), SPOffset);
23956 MIB.addOperand(MI.getOperand(i));
23958 MIB.setMemRefs(MMOBegin, MMOEnd);
23960 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
23962 MI.eraseFromParent();
23966 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
23967 MachineBasicBlock *MBB,
23968 MachineBasicBlock *DispatchBB,
23970 DebugLoc DL = MI.getDebugLoc();
23971 MachineFunction *MF = MBB->getParent();
23972 MachineRegisterInfo *MRI = &MF->getRegInfo();
23973 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
23975 MVT PVT = getPointerTy(MF->getDataLayout());
23976 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
23981 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
23982 !isPositionIndependent();
23985 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
23987 const TargetRegisterClass *TRC =
23988 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
23989 VR = MRI->createVirtualRegister(TRC);
23990 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
23992 /* const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII); */
23994 if (Subtarget.is64Bit())
23995 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
23999 .addMBB(DispatchBB)
24002 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
24003 .addReg(0) /* XII->getGlobalBaseReg(MF) */
24006 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
24010 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
24011 addFrameReference(MIB, FI, 36);
24013 MIB.addMBB(DispatchBB);
24018 MachineBasicBlock *
24019 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
24020 MachineBasicBlock *BB) const {
24021 DebugLoc DL = MI.getDebugLoc();
24022 MachineFunction *MF = BB->getParent();
24023 MachineModuleInfo *MMI = &MF->getMMI();
24024 MachineFrameInfo *MFI = MF->getFrameInfo();
24025 MachineRegisterInfo *MRI = &MF->getRegInfo();
24026 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24027 int FI = MFI->getFunctionContextIndex();
24029 // Get a mapping of the call site numbers to all of the landing pads they're
24030 // associated with.
24031 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
24032 unsigned MaxCSNum = 0;
24033 for (auto &MBB : *MF) {
24034 if (!MBB.isEHPad())
24037 MCSymbol *Sym = nullptr;
24038 for (const auto &MI : MBB) {
24039 if (MI.isDebugValue())
24042 assert(MI.isEHLabel() && "expected EH_LABEL");
24043 Sym = MI.getOperand(0).getMCSymbol();
24047 if (!MMI->hasCallSiteLandingPad(Sym))
24050 for (unsigned CSI : MMI->getCallSiteLandingPad(Sym)) {
24051 CallSiteNumToLPad[CSI].push_back(&MBB);
24052 MaxCSNum = std::max(MaxCSNum, CSI);
24056 // Get an ordered list of the machine basic blocks for the jump table.
24057 std::vector<MachineBasicBlock *> LPadList;
24058 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
24059 LPadList.reserve(CallSiteNumToLPad.size());
24061 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
24062 for (auto &LP : CallSiteNumToLPad[CSI]) {
24063 LPadList.push_back(LP);
24064 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
24068 assert(!LPadList.empty() &&
24069 "No landing pad destinations for the dispatch jump table!");
24071 // Create the MBBs for the dispatch code.
24073 // Shove the dispatch's address into the return slot in the function context.
24074 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
24075 DispatchBB->setIsEHPad(true);
24077 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
24078 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
24079 DispatchBB->addSuccessor(TrapBB);
24081 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
24082 DispatchBB->addSuccessor(DispContBB);
24085 MF->push_back(DispatchBB);
24086 MF->push_back(DispContBB);
24087 MF->push_back(TrapBB);
24089 // Insert code into the entry block that creates and registers the function
24091 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
24093 // Create the jump table and associated information
24094 MachineJumpTableInfo *JTI =
24095 MF->getOrCreateJumpTableInfo(getJumpTableEncoding());
24096 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
24098 const X86InstrInfo *XII = static_cast<const X86InstrInfo *>(TII);
24099 const X86RegisterInfo &RI = XII->getRegisterInfo();
24101 // Add a register mask with no preserved registers. This results in all
24102 // registers being marked as clobbered.
24103 if (RI.hasBasePointer(*MF)) {
24104 const bool FPIs64Bit =
24105 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
24106 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
24107 MFI->setRestoreBasePointer(MF);
24109 unsigned FP = RI.getFrameRegister(*MF);
24110 unsigned BP = RI.getBaseRegister();
24111 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
24112 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
24113 MFI->getRestoreBasePointerOffset())
24114 .addRegMask(RI.getNoPreservedMask());
24116 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
24117 .addRegMask(RI.getNoPreservedMask());
24120 unsigned IReg = MRI->createVirtualRegister(&X86::GR32RegClass);
24121 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
24123 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
24125 .addImm(LPadList.size());
24126 BuildMI(DispatchBB, DL, TII->get(X86::JA_1)).addMBB(TrapBB);
24128 unsigned JReg = MRI->createVirtualRegister(&X86::GR32RegClass);
24129 BuildMI(DispContBB, DL, TII->get(X86::SUB32ri), JReg)
24132 BuildMI(DispContBB, DL,
24133 TII->get(Subtarget.is64Bit() ? X86::JMP64m : X86::JMP32m))
24135 .addImm(Subtarget.is64Bit() ? 8 : 4)
24137 .addJumpTableIndex(MJTI)
24140 // Add the jump table entries as successors to the MBB.
24141 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
24142 for (auto &LP : LPadList)
24143 if (SeenMBBs.insert(LP).second)
24144 DispContBB->addSuccessor(LP);
24146 // N.B. the order the invoke BBs are processed in doesn't matter here.
24147 SmallVector<MachineBasicBlock *, 64> MBBLPads;
24148 const MCPhysReg *SavedRegs =
24149 Subtarget.getRegisterInfo()->getCalleeSavedRegs(MF);
24150 for (MachineBasicBlock *MBB : InvokeBBs) {
24151 // Remove the landing pad successor from the invoke block and replace it
24152 // with the new dispatch block.
24153 // Keep a copy of Successors since it's modified inside the loop.
24154 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
24156 // FIXME: Avoid quadratic complexity.
24157 for (auto MBBS : Successors) {
24158 if (MBBS->isEHPad()) {
24159 MBB->removeSuccessor(MBBS);
24160 MBBLPads.push_back(MBBS);
24164 MBB->addSuccessor(DispatchBB);
24166 // Find the invoke call and mark all of the callee-saved registers as
24167 // 'implicit defined' so that they're spilled. This prevents code from
24168 // moving instructions to before the EH block, where they will never be
24170 for (auto &II : reverse(*MBB)) {
24174 DenseMap<unsigned, bool> DefRegs;
24175 for (auto &MOp : II.operands())
24177 DefRegs[MOp.getReg()] = true;
24179 MachineInstrBuilder MIB(*MF, &II);
24180 for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
24181 unsigned Reg = SavedRegs[RI];
24183 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
24190 // Mark all former landing pads as non-landing pads. The dispatch is the only
24191 // landing pad now.
24192 for (auto &LP : MBBLPads)
24193 LP->setIsEHPad(false);
24195 // The instruction is gone now.
24196 MI.eraseFromParent();
24200 // Replace 213-type (isel default) FMA3 instructions with 231-type for
24201 // accumulator loops. Writing back to the accumulator allows the coalescer
24202 // to remove extra copies in the loop.
24203 // FIXME: Do this on AVX512. We don't support 231 variants yet (PR23937).
24204 MachineBasicBlock *
24205 X86TargetLowering::emitFMA3Instr(MachineInstr &MI,
24206 MachineBasicBlock *MBB) const {
24207 MachineOperand &AddendOp = MI.getOperand(3);
24209 // Bail out early if the addend isn't a register - we can't switch these.
24210 if (!AddendOp.isReg())
24213 MachineFunction &MF = *MBB->getParent();
24214 MachineRegisterInfo &MRI = MF.getRegInfo();
24216 // Check whether the addend is defined by a PHI:
24217 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
24218 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
24219 if (!AddendDef.isPHI())
24222 // Look for the following pattern:
24224 // %addend = phi [%entry, 0], [%loop, %result]
24226 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
24230 // %addend = phi [%entry, 0], [%loop, %result]
24232 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
24234 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
24235 assert(AddendDef.getOperand(i).isReg());
24236 MachineOperand PHISrcOp = AddendDef.getOperand(i);
24237 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
24238 if (&PHISrcInst == &MI) {
24239 // Found a matching instruction.
24240 unsigned NewFMAOpc = 0;
24241 switch (MI.getOpcode()) {
24242 case X86::VFMADDPDr213r:
24243 NewFMAOpc = X86::VFMADDPDr231r;
24245 case X86::VFMADDPSr213r:
24246 NewFMAOpc = X86::VFMADDPSr231r;
24248 case X86::VFMADDSDr213r:
24249 NewFMAOpc = X86::VFMADDSDr231r;
24251 case X86::VFMADDSSr213r:
24252 NewFMAOpc = X86::VFMADDSSr231r;
24254 case X86::VFMSUBPDr213r:
24255 NewFMAOpc = X86::VFMSUBPDr231r;
24257 case X86::VFMSUBPSr213r:
24258 NewFMAOpc = X86::VFMSUBPSr231r;
24260 case X86::VFMSUBSDr213r:
24261 NewFMAOpc = X86::VFMSUBSDr231r;
24263 case X86::VFMSUBSSr213r:
24264 NewFMAOpc = X86::VFMSUBSSr231r;
24266 case X86::VFNMADDPDr213r:
24267 NewFMAOpc = X86::VFNMADDPDr231r;
24269 case X86::VFNMADDPSr213r:
24270 NewFMAOpc = X86::VFNMADDPSr231r;
24272 case X86::VFNMADDSDr213r:
24273 NewFMAOpc = X86::VFNMADDSDr231r;
24275 case X86::VFNMADDSSr213r:
24276 NewFMAOpc = X86::VFNMADDSSr231r;
24278 case X86::VFNMSUBPDr213r:
24279 NewFMAOpc = X86::VFNMSUBPDr231r;
24281 case X86::VFNMSUBPSr213r:
24282 NewFMAOpc = X86::VFNMSUBPSr231r;
24284 case X86::VFNMSUBSDr213r:
24285 NewFMAOpc = X86::VFNMSUBSDr231r;
24287 case X86::VFNMSUBSSr213r:
24288 NewFMAOpc = X86::VFNMSUBSSr231r;
24290 case X86::VFMADDSUBPDr213r:
24291 NewFMAOpc = X86::VFMADDSUBPDr231r;
24293 case X86::VFMADDSUBPSr213r:
24294 NewFMAOpc = X86::VFMADDSUBPSr231r;
24296 case X86::VFMSUBADDPDr213r:
24297 NewFMAOpc = X86::VFMSUBADDPDr231r;
24299 case X86::VFMSUBADDPSr213r:
24300 NewFMAOpc = X86::VFMSUBADDPSr231r;
24303 case X86::VFMADDPDr213rY:
24304 NewFMAOpc = X86::VFMADDPDr231rY;
24306 case X86::VFMADDPSr213rY:
24307 NewFMAOpc = X86::VFMADDPSr231rY;
24309 case X86::VFMSUBPDr213rY:
24310 NewFMAOpc = X86::VFMSUBPDr231rY;
24312 case X86::VFMSUBPSr213rY:
24313 NewFMAOpc = X86::VFMSUBPSr231rY;
24315 case X86::VFNMADDPDr213rY:
24316 NewFMAOpc = X86::VFNMADDPDr231rY;
24318 case X86::VFNMADDPSr213rY:
24319 NewFMAOpc = X86::VFNMADDPSr231rY;
24321 case X86::VFNMSUBPDr213rY:
24322 NewFMAOpc = X86::VFNMSUBPDr231rY;
24324 case X86::VFNMSUBPSr213rY:
24325 NewFMAOpc = X86::VFNMSUBPSr231rY;
24327 case X86::VFMADDSUBPDr213rY:
24328 NewFMAOpc = X86::VFMADDSUBPDr231rY;
24330 case X86::VFMADDSUBPSr213rY:
24331 NewFMAOpc = X86::VFMADDSUBPSr231rY;
24333 case X86::VFMSUBADDPDr213rY:
24334 NewFMAOpc = X86::VFMSUBADDPDr231rY;
24336 case X86::VFMSUBADDPSr213rY:
24337 NewFMAOpc = X86::VFMSUBADDPSr231rY;
24340 llvm_unreachable("Unrecognized FMA variant.");
24343 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
24344 MachineInstrBuilder MIB =
24345 BuildMI(MF, MI.getDebugLoc(), TII.get(NewFMAOpc))
24346 .addOperand(MI.getOperand(0))
24347 .addOperand(MI.getOperand(3))
24348 .addOperand(MI.getOperand(2))
24349 .addOperand(MI.getOperand(1));
24350 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
24351 MI.eraseFromParent();
24358 MachineBasicBlock *
24359 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
24360 MachineBasicBlock *BB) const {
24361 switch (MI.getOpcode()) {
24362 default: llvm_unreachable("Unexpected instr type to insert");
24363 case X86::TAILJMPd64:
24364 case X86::TAILJMPr64:
24365 case X86::TAILJMPm64:
24366 case X86::TAILJMPd64_REX:
24367 case X86::TAILJMPr64_REX:
24368 case X86::TAILJMPm64_REX:
24369 llvm_unreachable("TAILJMP64 would not be touched here.");
24370 case X86::TCRETURNdi64:
24371 case X86::TCRETURNri64:
24372 case X86::TCRETURNmi64:
24374 case X86::TLS_addr32:
24375 case X86::TLS_addr64:
24376 case X86::TLS_base_addr32:
24377 case X86::TLS_base_addr64:
24378 return EmitLoweredTLSAddr(MI, BB);
24379 case X86::CATCHRET:
24380 return EmitLoweredCatchRet(MI, BB);
24381 case X86::CATCHPAD:
24382 return EmitLoweredCatchPad(MI, BB);
24383 case X86::SEG_ALLOCA_32:
24384 case X86::SEG_ALLOCA_64:
24385 return EmitLoweredSegAlloca(MI, BB);
24386 case X86::TLSCall_32:
24387 case X86::TLSCall_64:
24388 return EmitLoweredTLSCall(MI, BB);
24389 case X86::CMOV_FR32:
24390 case X86::CMOV_FR64:
24391 case X86::CMOV_FR128:
24392 case X86::CMOV_GR8:
24393 case X86::CMOV_GR16:
24394 case X86::CMOV_GR32:
24395 case X86::CMOV_RFP32:
24396 case X86::CMOV_RFP64:
24397 case X86::CMOV_RFP80:
24398 case X86::CMOV_V2F64:
24399 case X86::CMOV_V2I64:
24400 case X86::CMOV_V4F32:
24401 case X86::CMOV_V4F64:
24402 case X86::CMOV_V4I64:
24403 case X86::CMOV_V16F32:
24404 case X86::CMOV_V8F32:
24405 case X86::CMOV_V8F64:
24406 case X86::CMOV_V8I64:
24407 case X86::CMOV_V8I1:
24408 case X86::CMOV_V16I1:
24409 case X86::CMOV_V32I1:
24410 case X86::CMOV_V64I1:
24411 return EmitLoweredSelect(MI, BB);
24413 case X86::RDFLAGS32:
24414 case X86::RDFLAGS64: {
24415 DebugLoc DL = MI.getDebugLoc();
24416 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24418 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
24419 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
24420 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
24421 // Permit reads of the FLAGS register without it being defined.
24422 // This intrinsic exists to read external processor state in flags, such as
24423 // the trap flag, interrupt flag, and direction flag, none of which are
24424 // modeled by the backend.
24425 Push->getOperand(2).setIsUndef();
24426 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
24428 MI.eraseFromParent(); // The pseudo is gone now.
24432 case X86::WRFLAGS32:
24433 case X86::WRFLAGS64: {
24434 DebugLoc DL = MI.getDebugLoc();
24435 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24437 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
24439 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
24440 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
24441 BuildMI(*BB, MI, DL, TII->get(PopF));
24443 MI.eraseFromParent(); // The pseudo is gone now.
24447 case X86::RELEASE_FADD32mr:
24448 case X86::RELEASE_FADD64mr:
24449 return EmitLoweredAtomicFP(MI, BB);
24451 case X86::FP32_TO_INT16_IN_MEM:
24452 case X86::FP32_TO_INT32_IN_MEM:
24453 case X86::FP32_TO_INT64_IN_MEM:
24454 case X86::FP64_TO_INT16_IN_MEM:
24455 case X86::FP64_TO_INT32_IN_MEM:
24456 case X86::FP64_TO_INT64_IN_MEM:
24457 case X86::FP80_TO_INT16_IN_MEM:
24458 case X86::FP80_TO_INT32_IN_MEM:
24459 case X86::FP80_TO_INT64_IN_MEM: {
24460 MachineFunction *F = BB->getParent();
24461 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
24462 DebugLoc DL = MI.getDebugLoc();
24464 // Change the floating point control register to use "round towards zero"
24465 // mode when truncating to an integer value.
24466 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
24467 addFrameReference(BuildMI(*BB, MI, DL,
24468 TII->get(X86::FNSTCW16m)), CWFrameIdx);
24470 // Load the old value of the high byte of the control word...
24472 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
24473 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
24476 // Set the high part to be round to zero...
24477 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
24480 // Reload the modified control word now...
24481 addFrameReference(BuildMI(*BB, MI, DL,
24482 TII->get(X86::FLDCW16m)), CWFrameIdx);
24484 // Restore the memory image of control word to original value
24485 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
24488 // Get the X86 opcode to use.
24490 switch (MI.getOpcode()) {
24491 default: llvm_unreachable("illegal opcode!");
24492 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
24493 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
24494 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
24495 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
24496 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
24497 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
24498 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
24499 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
24500 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
24503 X86AddressMode AM = getAddressFromInstr(&MI, 0);
24504 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
24505 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
24507 // Reload the original control word now.
24508 addFrameReference(BuildMI(*BB, MI, DL,
24509 TII->get(X86::FLDCW16m)), CWFrameIdx);
24511 MI.eraseFromParent(); // The pseudo instruction is gone now.
24514 // String/text processing lowering.
24515 case X86::PCMPISTRM128REG:
24516 case X86::VPCMPISTRM128REG:
24517 case X86::PCMPISTRM128MEM:
24518 case X86::VPCMPISTRM128MEM:
24519 case X86::PCMPESTRM128REG:
24520 case X86::VPCMPESTRM128REG:
24521 case X86::PCMPESTRM128MEM:
24522 case X86::VPCMPESTRM128MEM:
24523 assert(Subtarget.hasSSE42() &&
24524 "Target must have SSE4.2 or AVX features enabled");
24525 return emitPCMPSTRM(MI, BB, Subtarget.getInstrInfo());
24527 // String/text processing lowering.
24528 case X86::PCMPISTRIREG:
24529 case X86::VPCMPISTRIREG:
24530 case X86::PCMPISTRIMEM:
24531 case X86::VPCMPISTRIMEM:
24532 case X86::PCMPESTRIREG:
24533 case X86::VPCMPESTRIREG:
24534 case X86::PCMPESTRIMEM:
24535 case X86::VPCMPESTRIMEM:
24536 assert(Subtarget.hasSSE42() &&
24537 "Target must have SSE4.2 or AVX features enabled");
24538 return emitPCMPSTRI(MI, BB, Subtarget.getInstrInfo());
24540 // Thread synchronization.
24542 return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr);
24543 case X86::MONITORX:
24544 return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr);
24547 return emitWRPKRU(MI, BB, Subtarget);
24549 return emitRDPKRU(MI, BB, Subtarget);
24552 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
24554 case X86::VASTART_SAVE_XMM_REGS:
24555 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
24557 case X86::VAARG_64:
24558 return EmitVAARG64WithCustomInserter(MI, BB);
24560 case X86::EH_SjLj_SetJmp32:
24561 case X86::EH_SjLj_SetJmp64:
24562 return emitEHSjLjSetJmp(MI, BB);
24564 case X86::EH_SjLj_LongJmp32:
24565 case X86::EH_SjLj_LongJmp64:
24566 return emitEHSjLjLongJmp(MI, BB);
24568 case X86::Int_eh_sjlj_setup_dispatch:
24569 return EmitSjLjDispatchBlock(MI, BB);
24571 case TargetOpcode::STATEPOINT:
24572 // As an implementation detail, STATEPOINT shares the STACKMAP format at
24573 // this point in the process. We diverge later.
24574 return emitPatchPoint(MI, BB);
24576 case TargetOpcode::STACKMAP:
24577 case TargetOpcode::PATCHPOINT:
24578 return emitPatchPoint(MI, BB);
24580 case X86::VFMADDPDr213r:
24581 case X86::VFMADDPSr213r:
24582 case X86::VFMADDSDr213r:
24583 case X86::VFMADDSSr213r:
24584 case X86::VFMSUBPDr213r:
24585 case X86::VFMSUBPSr213r:
24586 case X86::VFMSUBSDr213r:
24587 case X86::VFMSUBSSr213r:
24588 case X86::VFNMADDPDr213r:
24589 case X86::VFNMADDPSr213r:
24590 case X86::VFNMADDSDr213r:
24591 case X86::VFNMADDSSr213r:
24592 case X86::VFNMSUBPDr213r:
24593 case X86::VFNMSUBPSr213r:
24594 case X86::VFNMSUBSDr213r:
24595 case X86::VFNMSUBSSr213r:
24596 case X86::VFMADDSUBPDr213r:
24597 case X86::VFMADDSUBPSr213r:
24598 case X86::VFMSUBADDPDr213r:
24599 case X86::VFMSUBADDPSr213r:
24600 case X86::VFMADDPDr213rY:
24601 case X86::VFMADDPSr213rY:
24602 case X86::VFMSUBPDr213rY:
24603 case X86::VFMSUBPSr213rY:
24604 case X86::VFNMADDPDr213rY:
24605 case X86::VFNMADDPSr213rY:
24606 case X86::VFNMSUBPDr213rY:
24607 case X86::VFNMSUBPSr213rY:
24608 case X86::VFMADDSUBPDr213rY:
24609 case X86::VFMADDSUBPSr213rY:
24610 case X86::VFMSUBADDPDr213rY:
24611 case X86::VFMSUBADDPSr213rY:
24612 return emitFMA3Instr(MI, BB);
24613 case X86::LCMPXCHG8B_SAVE_EBX:
24614 case X86::LCMPXCHG16B_SAVE_RBX: {
24616 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
24617 if (!BB->isLiveIn(BasePtr))
24618 BB->addLiveIn(BasePtr);
24624 //===----------------------------------------------------------------------===//
24625 // X86 Optimization Hooks
24626 //===----------------------------------------------------------------------===//
24628 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
24631 const SelectionDAG &DAG,
24632 unsigned Depth) const {
24633 unsigned BitWidth = KnownZero.getBitWidth();
24634 unsigned Opc = Op.getOpcode();
24635 assert((Opc >= ISD::BUILTIN_OP_END ||
24636 Opc == ISD::INTRINSIC_WO_CHAIN ||
24637 Opc == ISD::INTRINSIC_W_CHAIN ||
24638 Opc == ISD::INTRINSIC_VOID) &&
24639 "Should use MaskedValueIsZero if you don't know whether Op"
24640 " is a target node!");
24642 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
24656 // These nodes' second result is a boolean.
24657 if (Op.getResNo() == 0)
24660 case X86ISD::SETCC:
24661 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
24663 case X86ISD::MOVMSK: {
24664 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
24665 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
24671 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
24673 const SelectionDAG &,
24674 unsigned Depth) const {
24675 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
24676 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
24677 return Op.getValueType().getScalarSizeInBits();
24683 /// Returns true (and the GlobalValue and the offset) if the node is a
24684 /// GlobalAddress + offset.
24685 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
24686 const GlobalValue* &GA,
24687 int64_t &Offset) const {
24688 if (N->getOpcode() == X86ISD::Wrapper) {
24689 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
24690 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
24691 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
24695 return TargetLowering::isGAPlusOffset(N, GA, Offset);
24698 /// Performs shuffle combines for 256-bit vectors.
24699 /// FIXME: This could be expanded to support 512 bit vectors as well.
24700 static SDValue combineShuffle256(SDNode *N, SelectionDAG &DAG,
24701 TargetLowering::DAGCombinerInfo &DCI,
24702 const X86Subtarget &Subtarget) {
24704 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
24705 SDValue V1 = SVOp->getOperand(0);
24706 SDValue V2 = SVOp->getOperand(1);
24707 MVT VT = SVOp->getSimpleValueType(0);
24708 unsigned NumElems = VT.getVectorNumElements();
24710 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
24711 V2.getOpcode() == ISD::CONCAT_VECTORS) {
24715 // V UNDEF BUILD_VECTOR UNDEF
24717 // CONCAT_VECTOR CONCAT_VECTOR
24720 // RESULT: V + zero extended
24722 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
24723 !V2.getOperand(1).isUndef() || !V1.getOperand(1).isUndef())
24726 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
24729 // To match the shuffle mask, the first half of the mask should
24730 // be exactly the first vector, and all the rest a splat with the
24731 // first element of the second one.
24732 for (unsigned i = 0; i != NumElems/2; ++i)
24733 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
24734 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
24737 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
24738 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
24739 if (Ld->hasNUsesOfValue(1, 0)) {
24740 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
24741 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
24743 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
24745 Ld->getPointerInfo(),
24746 Ld->getAlignment(),
24747 false/*isVolatile*/, true/*ReadMem*/,
24748 false/*WriteMem*/);
24750 // Make sure the newly-created LOAD is in the same position as Ld in
24751 // terms of dependency. We create a TokenFactor for Ld and ResNode,
24752 // and update uses of Ld's output chain to use the TokenFactor.
24753 if (Ld->hasAnyUseOfValue(1)) {
24754 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24755 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
24756 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
24757 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
24758 SDValue(ResNode.getNode(), 1));
24761 return DAG.getBitcast(VT, ResNode);
24765 // Emit a zeroed vector and insert the desired subvector on its
24767 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
24768 SDValue InsV = insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
24769 return DCI.CombineTo(N, InsV);
24775 // Attempt to match a combined shuffle mask against supported unary shuffle
24777 // TODO: Investigate sharing more of this with shuffle lowering.
24778 static bool matchUnaryVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
24779 const X86Subtarget &Subtarget,
24780 unsigned &Shuffle, MVT &ShuffleVT) {
24781 bool FloatDomain = SrcVT.isFloatingPoint() ||
24782 (!Subtarget.hasAVX2() && SrcVT.is256BitVector());
24784 // Match a 128-bit integer vector against a VZEXT_MOVL (MOVQ) instruction.
24785 if (!FloatDomain && SrcVT.is128BitVector() &&
24786 isTargetShuffleEquivalent(Mask, {0, SM_SentinelZero})) {
24787 Shuffle = X86ISD::VZEXT_MOVL;
24788 ShuffleVT = MVT::v2i64;
24792 // Check if we have SSE3 which will let us use MOVDDUP etc. The
24793 // instructions are no slower than UNPCKLPD but has the option to
24794 // fold the input operand into even an unaligned memory load.
24795 if (SrcVT.is128BitVector() && Subtarget.hasSSE3() && FloatDomain) {
24796 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
24797 Shuffle = X86ISD::MOVDDUP;
24798 ShuffleVT = MVT::v2f64;
24801 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
24802 Shuffle = X86ISD::MOVSLDUP;
24803 ShuffleVT = MVT::v4f32;
24806 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
24807 Shuffle = X86ISD::MOVSHDUP;
24808 ShuffleVT = MVT::v4f32;
24813 if (SrcVT.is256BitVector() && FloatDomain) {
24814 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
24815 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
24816 Shuffle = X86ISD::MOVDDUP;
24817 ShuffleVT = MVT::v4f64;
24820 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
24821 Shuffle = X86ISD::MOVSLDUP;
24822 ShuffleVT = MVT::v8f32;
24825 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
24826 Shuffle = X86ISD::MOVSHDUP;
24827 ShuffleVT = MVT::v8f32;
24832 if (SrcVT.is512BitVector() && FloatDomain) {
24833 assert(Subtarget.hasAVX512() &&
24834 "AVX512 required for 512-bit vector shuffles");
24835 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
24836 Shuffle = X86ISD::MOVDDUP;
24837 ShuffleVT = MVT::v8f64;
24840 if (isTargetShuffleEquivalent(
24841 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
24842 Shuffle = X86ISD::MOVSLDUP;
24843 ShuffleVT = MVT::v16f32;
24846 if (isTargetShuffleEquivalent(
24847 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
24848 Shuffle = X86ISD::MOVSHDUP;
24849 ShuffleVT = MVT::v16f32;
24854 // Attempt to match against broadcast-from-vector.
24855 if (Subtarget.hasAVX2()) {
24856 unsigned NumElts = Mask.size();
24857 SmallVector<int, 64> BroadcastMask(NumElts, 0);
24858 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
24859 unsigned EltSize = SrcVT.getSizeInBits() / NumElts;
24860 ShuffleVT = FloatDomain ? MVT::getFloatingPointVT(EltSize)
24861 : MVT::getIntegerVT(EltSize);
24862 ShuffleVT = MVT::getVectorVT(ShuffleVT, NumElts);
24863 Shuffle = X86ISD::VBROADCAST;
24871 // Attempt to match a combined shuffle mask against supported unary immediate
24872 // permute instructions.
24873 // TODO: Investigate sharing more of this with shuffle lowering.
24874 static bool matchPermuteVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
24875 const X86Subtarget &Subtarget,
24876 unsigned &Shuffle, MVT &ShuffleVT,
24877 unsigned &PermuteImm) {
24878 // Ensure we don't contain any zero elements.
24879 for (int M : Mask) {
24880 if (M == SM_SentinelZero)
24882 assert(SM_SentinelUndef <= M && M < (int)Mask.size() &&
24883 "Expected unary shuffle");
24886 unsigned MaskScalarSizeInBits = SrcVT.getSizeInBits() / Mask.size();
24887 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
24889 // Handle PSHUFLW/PSHUFHW repeated patterns.
24890 if (MaskScalarSizeInBits == 16) {
24891 SmallVector<int, 4> RepeatedMask;
24892 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
24893 ArrayRef<int> LoMask(Mask.data() + 0, 4);
24894 ArrayRef<int> HiMask(Mask.data() + 4, 4);
24896 // PSHUFLW: permute lower 4 elements only.
24897 if (isUndefOrInRange(LoMask, 0, 4) &&
24898 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
24899 Shuffle = X86ISD::PSHUFLW;
24900 ShuffleVT = MVT::getVectorVT(MVT::i16, SrcVT.getSizeInBits() / 16);
24901 PermuteImm = getV4X86ShuffleImm(LoMask);
24905 // PSHUFHW: permute upper 4 elements only.
24906 if (isUndefOrInRange(HiMask, 4, 8) &&
24907 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
24908 // Offset the HiMask so that we can create the shuffle immediate.
24909 int OffsetHiMask[4];
24910 for (int i = 0; i != 4; ++i)
24911 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
24913 Shuffle = X86ISD::PSHUFHW;
24914 ShuffleVT = MVT::getVectorVT(MVT::i16, SrcVT.getSizeInBits() / 16);
24915 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
24924 // We only support permutation of 32/64 bit elements after this.
24925 if (MaskScalarSizeInBits != 32 && MaskScalarSizeInBits != 64)
24928 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
24929 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
24930 bool FloatDomain = SrcVT.isFloatingPoint();
24931 if (FloatDomain && !Subtarget.hasAVX())
24934 // Pre-AVX2 we must use float shuffles on 256-bit vectors.
24935 if (SrcVT.is256BitVector() && !Subtarget.hasAVX2())
24936 FloatDomain = true;
24938 // Check for lane crossing permutes.
24939 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
24940 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
24941 if (Subtarget.hasAVX2() && SrcVT.is256BitVector() && Mask.size() == 4) {
24942 Shuffle = X86ISD::VPERMI;
24943 ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
24944 PermuteImm = getV4X86ShuffleImm(Mask);
24947 if (Subtarget.hasAVX512() && SrcVT.is512BitVector() && Mask.size() == 8) {
24948 SmallVector<int, 4> RepeatedMask;
24949 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
24950 Shuffle = X86ISD::VPERMI;
24951 ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
24952 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
24959 // VPERMILPD can permute with a non-repeating shuffle.
24960 if (FloatDomain && MaskScalarSizeInBits == 64) {
24961 Shuffle = X86ISD::VPERMILPI;
24962 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
24964 for (int i = 0, e = Mask.size(); i != e; ++i) {
24966 if (M == SM_SentinelUndef)
24968 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
24969 PermuteImm |= (M & 1) << i;
24974 // We need a repeating shuffle mask for VPERMILPS/PSHUFD.
24975 SmallVector<int, 4> RepeatedMask;
24976 if (!is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask))
24979 // Narrow the repeated mask for 32-bit element permutes.
24980 SmallVector<int, 4> WordMask = RepeatedMask;
24981 if (MaskScalarSizeInBits == 64)
24982 scaleShuffleMask(2, RepeatedMask, WordMask);
24984 Shuffle = (FloatDomain ? X86ISD::VPERMILPI : X86ISD::PSHUFD);
24985 ShuffleVT = (FloatDomain ? MVT::f32 : MVT::i32);
24986 ShuffleVT = MVT::getVectorVT(ShuffleVT, SrcVT.getSizeInBits() / 32);
24987 PermuteImm = getV4X86ShuffleImm(WordMask);
24991 // Attempt to match a combined unary shuffle mask against supported binary
24992 // shuffle instructions.
24993 // TODO: Investigate sharing more of this with shuffle lowering.
24994 static bool matchBinaryVectorShuffle(MVT SrcVT, ArrayRef<int> Mask,
24995 unsigned &Shuffle, MVT &ShuffleVT) {
24996 bool FloatDomain = SrcVT.isFloatingPoint();
24998 if (SrcVT.is128BitVector()) {
24999 if (isTargetShuffleEquivalent(Mask, {0, 0}) && FloatDomain) {
25000 Shuffle = X86ISD::MOVLHPS;
25001 ShuffleVT = MVT::v4f32;
25004 if (isTargetShuffleEquivalent(Mask, {1, 1}) && FloatDomain) {
25005 Shuffle = X86ISD::MOVHLPS;
25006 ShuffleVT = MVT::v4f32;
25009 if (isTargetShuffleEquivalent(Mask, {0, 0, 1, 1}) && FloatDomain) {
25010 Shuffle = X86ISD::UNPCKL;
25011 ShuffleVT = MVT::v4f32;
25014 if (isTargetShuffleEquivalent(Mask, {2, 2, 3, 3}) && FloatDomain) {
25015 Shuffle = X86ISD::UNPCKH;
25016 ShuffleVT = MVT::v4f32;
25019 if (isTargetShuffleEquivalent(Mask, {0, 0, 1, 1, 2, 2, 3, 3}) ||
25020 isTargetShuffleEquivalent(
25021 Mask, {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7})) {
25022 Shuffle = X86ISD::UNPCKL;
25023 ShuffleVT = Mask.size() == 8 ? MVT::v8i16 : MVT::v16i8;
25026 if (isTargetShuffleEquivalent(Mask, {4, 4, 5, 5, 6, 6, 7, 7}) ||
25027 isTargetShuffleEquivalent(Mask, {8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13,
25028 13, 14, 14, 15, 15})) {
25029 Shuffle = X86ISD::UNPCKH;
25030 ShuffleVT = Mask.size() == 8 ? MVT::v8i16 : MVT::v16i8;
25038 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
25041 /// This is the leaf of the recursive combine below. When we have found some
25042 /// chain of single-use x86 shuffle instructions and accumulated the combined
25043 /// shuffle mask represented by them, this will try to pattern match that mask
25044 /// into either a single instruction if there is a special purpose instruction
25045 /// for this operation, or into a PSHUFB instruction which is a fully general
25046 /// instruction but should only be used to replace chains over a certain depth.
25047 static bool combineX86ShuffleChain(SDValue Input, SDValue Root,
25048 ArrayRef<int> BaseMask, int Depth,
25049 bool HasVariableMask, SelectionDAG &DAG,
25050 TargetLowering::DAGCombinerInfo &DCI,
25051 const X86Subtarget &Subtarget) {
25052 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
25054 // Find the operand that enters the chain. Note that multiple uses are OK
25055 // here, we're not going to remove the operand we find.
25056 Input = peekThroughBitcasts(Input);
25058 MVT VT = Input.getSimpleValueType();
25059 MVT RootVT = Root.getSimpleValueType();
25064 unsigned NumBaseMaskElts = BaseMask.size();
25065 if (NumBaseMaskElts == 1) {
25066 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
25067 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Input),
25072 unsigned RootSizeInBits = RootVT.getSizeInBits();
25073 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
25075 // Don't combine if we are a AVX512/EVEX target and the mask element size
25076 // is different from the root element size - this would prevent writemasks
25077 // from being reused.
25078 // TODO - this currently prevents all lane shuffles from occurring.
25079 // TODO - check for writemasks usage instead of always preventing combining.
25080 // TODO - attempt to narrow Mask back to writemask size.
25081 if (RootVT.getScalarSizeInBits() != BaseMaskEltSizeInBits &&
25082 (RootSizeInBits == 512 ||
25083 (Subtarget.hasVLX() && RootSizeInBits >= 128))) {
25087 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
25089 // Handle 128-bit lane shuffles of 256-bit vectors.
25090 if (VT.is256BitVector() && NumBaseMaskElts == 2 &&
25091 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
25092 if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
25093 return false; // Nothing to do!
25094 MVT ShuffleVT = (VT.isFloatingPoint() || !Subtarget.hasAVX2() ? MVT::v4f64
25096 unsigned PermMask = 0;
25097 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
25098 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
25100 Res = DAG.getBitcast(ShuffleVT, Input);
25101 DCI.AddToWorklist(Res.getNode());
25102 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
25103 DAG.getUNDEF(ShuffleVT),
25104 DAG.getConstant(PermMask, DL, MVT::i8));
25105 DCI.AddToWorklist(Res.getNode());
25106 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25111 // For masks that have been widened to 128-bit elements or more,
25112 // narrow back down to 64-bit elements.
25113 SmallVector<int, 64> Mask;
25114 if (BaseMaskEltSizeInBits > 64) {
25115 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
25116 int MaskScale = BaseMaskEltSizeInBits / 64;
25117 scaleShuffleMask(MaskScale, BaseMask, Mask);
25119 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
25122 unsigned NumMaskElts = Mask.size();
25123 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
25125 // Determine the effective mask value type.
25127 (VT.isFloatingPoint() || (VT.is256BitVector() && !Subtarget.hasAVX2())) &&
25128 (32 <= MaskEltSizeInBits);
25129 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
25130 : MVT::getIntegerVT(MaskEltSizeInBits);
25131 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
25133 // Attempt to match the mask against known shuffle patterns.
25135 unsigned Shuffle, PermuteImm;
25137 if (matchUnaryVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT)) {
25138 if (Depth == 1 && Root.getOpcode() == Shuffle)
25139 return false; // Nothing to do!
25140 Res = DAG.getBitcast(ShuffleVT, Input);
25141 DCI.AddToWorklist(Res.getNode());
25142 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
25143 DCI.AddToWorklist(Res.getNode());
25144 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25149 if (matchPermuteVectorShuffle(VT, Mask, Subtarget, Shuffle, ShuffleVT,
25151 if (Depth == 1 && Root.getOpcode() == Shuffle)
25152 return false; // Nothing to do!
25153 Res = DAG.getBitcast(ShuffleVT, Input);
25154 DCI.AddToWorklist(Res.getNode());
25155 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
25156 DAG.getConstant(PermuteImm, DL, MVT::i8));
25157 DCI.AddToWorklist(Res.getNode());
25158 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25163 if (matchBinaryVectorShuffle(VT, Mask, Shuffle, ShuffleVT)) {
25164 if (Depth == 1 && Root.getOpcode() == Shuffle)
25165 return false; // Nothing to do!
25166 Res = DAG.getBitcast(ShuffleVT, Input);
25167 DCI.AddToWorklist(Res.getNode());
25168 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res, Res);
25169 DCI.AddToWorklist(Res.getNode());
25170 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25175 // Attempt to blend with zero.
25176 if (NumMaskElts <= 8 &&
25177 ((Subtarget.hasSSE41() && VT.is128BitVector()) ||
25178 (Subtarget.hasAVX() && VT.is256BitVector()))) {
25179 // Convert VT to a type compatible with X86ISD::BLENDI.
25180 // TODO - add 16i16 support (requires lane duplication).
25181 MVT ShuffleVT = MaskVT;
25182 if (Subtarget.hasAVX2()) {
25183 if (ShuffleVT == MVT::v4i64)
25184 ShuffleVT = MVT::v8i32;
25185 else if (ShuffleVT == MVT::v2i64)
25186 ShuffleVT = MVT::v4i32;
25188 if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32)
25189 ShuffleVT = MVT::v8i16;
25190 else if (ShuffleVT == MVT::v4i64)
25191 ShuffleVT = MVT::v4f64;
25192 else if (ShuffleVT == MVT::v8i32)
25193 ShuffleVT = MVT::v8f32;
25196 if (isSequentialOrUndefOrZeroInRange(Mask, /*Pos*/ 0, /*Size*/ NumMaskElts,
25198 NumMaskElts <= ShuffleVT.getVectorNumElements()) {
25199 unsigned BlendMask = 0;
25200 unsigned ShuffleSize = ShuffleVT.getVectorNumElements();
25201 unsigned MaskRatio = ShuffleSize / NumMaskElts;
25203 if (Depth == 1 && Root.getOpcode() == X86ISD::BLENDI)
25206 for (unsigned i = 0; i != ShuffleSize; ++i)
25207 if (Mask[i / MaskRatio] < 0)
25208 BlendMask |= 1u << i;
25210 SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL);
25211 Res = DAG.getBitcast(ShuffleVT, Input);
25212 DCI.AddToWorklist(Res.getNode());
25213 Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero,
25214 DAG.getConstant(BlendMask, DL, MVT::i8));
25215 DCI.AddToWorklist(Res.getNode());
25216 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25222 // Attempt to combine to INSERTPS.
25223 if (Subtarget.hasSSE41() && NumMaskElts == 4 &&
25224 (VT == MVT::v2f64 || VT == MVT::v4f32)) {
25225 SmallBitVector Zeroable(4, false);
25226 for (unsigned i = 0; i != NumMaskElts; ++i)
25228 Zeroable[i] = true;
25230 unsigned InsertPSMask;
25231 SDValue V1 = Input, V2 = Input;
25232 if (Zeroable.any() && matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask,
25233 Zeroable, Mask, DAG)) {
25234 if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTPS)
25235 return false; // Nothing to do!
25236 V1 = DAG.getBitcast(MVT::v4f32, V1);
25237 DCI.AddToWorklist(V1.getNode());
25238 V2 = DAG.getBitcast(MVT::v4f32, V2);
25239 DCI.AddToWorklist(V2.getNode());
25240 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
25241 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25242 DCI.AddToWorklist(Res.getNode());
25243 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25249 // Don't try to re-form single instruction chains under any circumstances now
25250 // that we've done encoding canonicalization for them.
25254 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask))
25257 bool MaskContainsZeros =
25258 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
25260 // If we have a single input shuffle with different shuffle patterns in the
25261 // the 128-bit lanes use the variable mask to VPERMILPS.
25262 // TODO Combine other mask types at higher depths.
25263 if (HasVariableMask && !MaskContainsZeros &&
25264 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
25265 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
25266 SmallVector<SDValue, 16> VPermIdx;
25267 for (int M : Mask) {
25269 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
25270 VPermIdx.push_back(Idx);
25272 MVT VPermMaskVT = MVT::getVectorVT(MVT::i32, NumMaskElts);
25273 SDValue VPermMask = DAG.getBuildVector(VPermMaskVT, DL, VPermIdx);
25274 DCI.AddToWorklist(VPermMask.getNode());
25275 Res = DAG.getBitcast(MaskVT, Input);
25276 DCI.AddToWorklist(Res.getNode());
25277 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
25278 DCI.AddToWorklist(Res.getNode());
25279 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25284 // If we have 3 or more shuffle instructions or a chain involving a variable
25285 // mask, we can replace them with a single PSHUFB instruction profitably.
25286 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
25287 // instructions, but in practice PSHUFB tends to be *very* fast so we're
25288 // more aggressive.
25289 if ((Depth >= 3 || HasVariableMask) &&
25290 ((VT.is128BitVector() && Subtarget.hasSSSE3()) ||
25291 (VT.is256BitVector() && Subtarget.hasAVX2()) ||
25292 (VT.is512BitVector() && Subtarget.hasBWI()))) {
25293 SmallVector<SDValue, 16> PSHUFBMask;
25294 int NumBytes = VT.getSizeInBits() / 8;
25295 int Ratio = NumBytes / NumMaskElts;
25296 for (int i = 0; i < NumBytes; ++i) {
25297 int M = Mask[i / Ratio];
25298 if (M == SM_SentinelUndef) {
25299 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
25302 if (M == SM_SentinelZero) {
25303 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
25306 M = Ratio * M + i % Ratio;
25307 assert ((M / 16) == (i / 16) && "Lane crossing detected");
25308 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
25310 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
25311 Res = DAG.getBitcast(ByteVT, Input);
25312 DCI.AddToWorklist(Res.getNode());
25313 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
25314 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
25315 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
25316 DCI.AddToWorklist(Res.getNode());
25317 DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
25322 // Failed to find any combines.
25326 /// \brief Fully generic combining of x86 shuffle instructions.
25328 /// This should be the last combine run over the x86 shuffle instructions. Once
25329 /// they have been fully optimized, this will recursively consider all chains
25330 /// of single-use shuffle instructions, build a generic model of the cumulative
25331 /// shuffle operation, and check for simpler instructions which implement this
25332 /// operation. We use this primarily for two purposes:
25334 /// 1) Collapse generic shuffles to specialized single instructions when
25335 /// equivalent. In most cases, this is just an encoding size win, but
25336 /// sometimes we will collapse multiple generic shuffles into a single
25337 /// special-purpose shuffle.
25338 /// 2) Look for sequences of shuffle instructions with 3 or more total
25339 /// instructions, and replace them with the slightly more expensive SSSE3
25340 /// PSHUFB instruction if available. We do this as the last combining step
25341 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
25342 /// a suitable short sequence of other instructions. The PHUFB will either
25343 /// use a register or have to read from memory and so is slightly (but only
25344 /// slightly) more expensive than the other shuffle instructions.
25346 /// Because this is inherently a quadratic operation (for each shuffle in
25347 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
25348 /// This should never be an issue in practice as the shuffle lowering doesn't
25349 /// produce sequences of more than 8 instructions.
25351 /// FIXME: We will currently miss some cases where the redundant shuffling
25352 /// would simplify under the threshold for PSHUFB formation because of
25353 /// combine-ordering. To fix this, we should do the redundant instruction
25354 /// combining in this recursive walk.
25355 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
25356 ArrayRef<int> RootMask,
25357 int Depth, bool HasVariableMask,
25359 TargetLowering::DAGCombinerInfo &DCI,
25360 const X86Subtarget &Subtarget) {
25361 // Bound the depth of our recursive combine because this is ultimately
25362 // quadratic in nature.
25366 // Directly rip through bitcasts to find the underlying operand.
25367 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
25368 Op = Op.getOperand(0);
25370 MVT VT = Op.getSimpleValueType();
25371 if (!VT.isVector())
25372 return false; // Bail if we hit a non-vector.
25374 assert(Root.getSimpleValueType().isVector() &&
25375 "Shuffles operate on vector types!");
25376 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
25377 "Can only combine shuffles of the same vector register size.");
25379 // Extract target shuffle mask and resolve sentinels and inputs.
25380 SDValue Input0, Input1;
25381 SmallVector<int, 16> OpMask;
25382 if (!resolveTargetShuffleInputs(Op, Input0, Input1, OpMask))
25385 assert(VT.getVectorNumElements() == OpMask.size() &&
25386 "Different mask size from vector size!");
25387 assert(((RootMask.size() > OpMask.size() &&
25388 RootMask.size() % OpMask.size() == 0) ||
25389 (OpMask.size() > RootMask.size() &&
25390 OpMask.size() % RootMask.size() == 0) ||
25391 OpMask.size() == RootMask.size()) &&
25392 "The smaller number of elements must divide the larger.");
25393 int MaskWidth = std::max<int>(OpMask.size(), RootMask.size());
25394 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
25395 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
25396 assert(((RootRatio == 1 && OpRatio == 1) ||
25397 (RootRatio == 1) != (OpRatio == 1)) &&
25398 "Must not have a ratio for both incoming and op masks!");
25400 SmallVector<int, 16> Mask;
25401 Mask.reserve(MaskWidth);
25403 // Merge this shuffle operation's mask into our accumulated mask. Note that
25404 // this shuffle's mask will be the first applied to the input, followed by the
25405 // root mask to get us all the way to the root value arrangement. The reason
25406 // for this order is that we are recursing up the operation chain.
25407 for (int i = 0; i < MaskWidth; ++i) {
25408 int RootIdx = i / RootRatio;
25409 if (RootMask[RootIdx] < 0) {
25410 // This is a zero or undef lane, we're done.
25411 Mask.push_back(RootMask[RootIdx]);
25415 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
25416 int OpIdx = RootMaskedIdx / OpRatio;
25417 if (OpMask[OpIdx] < 0) {
25418 // The incoming lanes are zero or undef, it doesn't matter which ones we
25420 Mask.push_back(OpMask[OpIdx]);
25424 // Ok, we have non-zero lanes, map them through.
25425 Mask.push_back(OpMask[OpIdx] * OpRatio +
25426 RootMaskedIdx % OpRatio);
25429 // Handle the all undef/zero cases early.
25430 if (llvm::all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) {
25431 DCI.CombineTo(Root.getNode(), DAG.getUNDEF(Root.getValueType()));
25434 if (llvm::all_of(Mask, [](int Idx) { return Idx < 0; })) {
25435 // TODO - should we handle the mixed zero/undef case as well? Just returning
25436 // a zero mask will lose information on undef elements possibly reducing
25437 // future combine possibilities.
25438 DCI.CombineTo(Root.getNode(), getZeroVector(Root.getSimpleValueType(),
25439 Subtarget, DAG, SDLoc(Root)));
25443 int MaskSize = Mask.size();
25444 bool UseInput0 = std::any_of(Mask.begin(), Mask.end(),
25445 [MaskSize](int Idx) { return 0 <= Idx && Idx < MaskSize; });
25446 bool UseInput1 = std::any_of(Mask.begin(), Mask.end(),
25447 [MaskSize](int Idx) { return MaskSize <= Idx; });
25449 // At the moment we can only combine unary shuffle mask cases.
25450 if (UseInput0 && UseInput1)
25452 else if (UseInput1) {
25453 std::swap(Input0, Input1);
25454 ShuffleVectorSDNode::commuteMask(Mask);
25457 assert(Input0 && "Shuffle with no inputs detected");
25459 HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
25461 // See if we can recurse into Input0 (if it's a target shuffle).
25462 if (Op->isOnlyUserOf(Input0.getNode()) &&
25463 combineX86ShufflesRecursively(Input0, Root, Mask, Depth + 1,
25464 HasVariableMask, DAG, DCI, Subtarget))
25467 // Minor canonicalization of the accumulated shuffle mask to make it easier
25468 // to match below. All this does is detect masks with sequential pairs of
25469 // elements, and shrink them to the half-width mask. It does this in a loop
25470 // so it will reduce the size of the mask to the minimal width mask which
25471 // performs an equivalent shuffle.
25472 SmallVector<int, 16> WidenedMask;
25473 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
25474 Mask = std::move(WidenedMask);
25477 return combineX86ShuffleChain(Input0, Root, Mask, Depth, HasVariableMask, DAG,
25481 /// \brief Get the PSHUF-style mask from PSHUF node.
25483 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
25484 /// PSHUF-style masks that can be reused with such instructions.
25485 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
25486 MVT VT = N.getSimpleValueType();
25487 SmallVector<int, 4> Mask;
25488 SmallVector<SDValue, 2> Ops;
25491 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
25495 // If we have more than 128-bits, only the low 128-bits of shuffle mask
25496 // matter. Check that the upper masks are repeats and remove them.
25497 if (VT.getSizeInBits() > 128) {
25498 int LaneElts = 128 / VT.getScalarSizeInBits();
25500 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
25501 for (int j = 0; j < LaneElts; ++j)
25502 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
25503 "Mask doesn't repeat in high 128-bit lanes!");
25505 Mask.resize(LaneElts);
25508 switch (N.getOpcode()) {
25509 case X86ISD::PSHUFD:
25511 case X86ISD::PSHUFLW:
25514 case X86ISD::PSHUFHW:
25515 Mask.erase(Mask.begin(), Mask.begin() + 4);
25516 for (int &M : Mask)
25520 llvm_unreachable("No valid shuffle instruction found!");
25524 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
25526 /// We walk up the chain and look for a combinable shuffle, skipping over
25527 /// shuffles that we could hoist this shuffle's transformation past without
25528 /// altering anything.
25530 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
25532 TargetLowering::DAGCombinerInfo &DCI) {
25533 assert(N.getOpcode() == X86ISD::PSHUFD &&
25534 "Called with something other than an x86 128-bit half shuffle!");
25537 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
25538 // of the shuffles in the chain so that we can form a fresh chain to replace
25540 SmallVector<SDValue, 8> Chain;
25541 SDValue V = N.getOperand(0);
25542 for (; V.hasOneUse(); V = V.getOperand(0)) {
25543 switch (V.getOpcode()) {
25545 return SDValue(); // Nothing combined!
25548 // Skip bitcasts as we always know the type for the target specific
25552 case X86ISD::PSHUFD:
25553 // Found another dword shuffle.
25556 case X86ISD::PSHUFLW:
25557 // Check that the low words (being shuffled) are the identity in the
25558 // dword shuffle, and the high words are self-contained.
25559 if (Mask[0] != 0 || Mask[1] != 1 ||
25560 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
25563 Chain.push_back(V);
25566 case X86ISD::PSHUFHW:
25567 // Check that the high words (being shuffled) are the identity in the
25568 // dword shuffle, and the low words are self-contained.
25569 if (Mask[2] != 2 || Mask[3] != 3 ||
25570 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
25573 Chain.push_back(V);
25576 case X86ISD::UNPCKL:
25577 case X86ISD::UNPCKH:
25578 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
25579 // shuffle into a preceding word shuffle.
25580 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
25581 V.getSimpleValueType().getVectorElementType() != MVT::i16)
25584 // Search for a half-shuffle which we can combine with.
25585 unsigned CombineOp =
25586 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
25587 if (V.getOperand(0) != V.getOperand(1) ||
25588 !V->isOnlyUserOf(V.getOperand(0).getNode()))
25590 Chain.push_back(V);
25591 V = V.getOperand(0);
25593 switch (V.getOpcode()) {
25595 return SDValue(); // Nothing to combine.
25597 case X86ISD::PSHUFLW:
25598 case X86ISD::PSHUFHW:
25599 if (V.getOpcode() == CombineOp)
25602 Chain.push_back(V);
25606 V = V.getOperand(0);
25610 } while (V.hasOneUse());
25613 // Break out of the loop if we break out of the switch.
25617 if (!V.hasOneUse())
25618 // We fell out of the loop without finding a viable combining instruction.
25621 // Merge this node's mask and our incoming mask.
25622 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
25623 for (int &M : Mask)
25625 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
25626 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
25628 // Rebuild the chain around this new shuffle.
25629 while (!Chain.empty()) {
25630 SDValue W = Chain.pop_back_val();
25632 if (V.getValueType() != W.getOperand(0).getValueType())
25633 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
25635 switch (W.getOpcode()) {
25637 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
25639 case X86ISD::UNPCKL:
25640 case X86ISD::UNPCKH:
25641 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
25644 case X86ISD::PSHUFD:
25645 case X86ISD::PSHUFLW:
25646 case X86ISD::PSHUFHW:
25647 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
25651 if (V.getValueType() != N.getValueType())
25652 V = DAG.getBitcast(N.getValueType(), V);
25654 // Return the new chain to replace N.
25658 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or
25661 /// We walk up the chain, skipping shuffles of the other half and looking
25662 /// through shuffles which switch halves trying to find a shuffle of the same
25663 /// pair of dwords.
25664 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
25666 TargetLowering::DAGCombinerInfo &DCI) {
25668 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
25669 "Called with something other than an x86 128-bit half shuffle!");
25671 unsigned CombineOpcode = N.getOpcode();
25673 // Walk up a single-use chain looking for a combinable shuffle.
25674 SDValue V = N.getOperand(0);
25675 for (; V.hasOneUse(); V = V.getOperand(0)) {
25676 switch (V.getOpcode()) {
25678 return false; // Nothing combined!
25681 // Skip bitcasts as we always know the type for the target specific
25685 case X86ISD::PSHUFLW:
25686 case X86ISD::PSHUFHW:
25687 if (V.getOpcode() == CombineOpcode)
25690 // Other-half shuffles are no-ops.
25693 // Break out of the loop if we break out of the switch.
25697 if (!V.hasOneUse())
25698 // We fell out of the loop without finding a viable combining instruction.
25701 // Combine away the bottom node as its shuffle will be accumulated into
25702 // a preceding shuffle.
25703 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
25705 // Record the old value.
25708 // Merge this node's mask and our incoming mask (adjusted to account for all
25709 // the pshufd instructions encountered).
25710 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
25711 for (int &M : Mask)
25713 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
25714 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
25716 // Check that the shuffles didn't cancel each other out. If not, we need to
25717 // combine to the new one.
25719 // Replace the combinable shuffle with the combined one, updating all users
25720 // so that we re-evaluate the chain here.
25721 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
25726 /// \brief Try to combine x86 target specific shuffles.
25727 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
25728 TargetLowering::DAGCombinerInfo &DCI,
25729 const X86Subtarget &Subtarget) {
25731 MVT VT = N.getSimpleValueType();
25732 SmallVector<int, 4> Mask;
25734 switch (N.getOpcode()) {
25735 case X86ISD::PSHUFD:
25736 case X86ISD::PSHUFLW:
25737 case X86ISD::PSHUFHW:
25738 Mask = getPSHUFShuffleMask(N);
25739 assert(Mask.size() == 4);
25741 case X86ISD::UNPCKL: {
25742 // Combine X86ISD::UNPCKL and ISD::VECTOR_SHUFFLE into X86ISD::UNPCKH, in
25743 // which X86ISD::UNPCKL has a ISD::UNDEF operand, and ISD::VECTOR_SHUFFLE
25744 // moves upper half elements into the lower half part. For example:
25746 // t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1,
25748 // t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
25750 // will be combined to:
25752 // t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
25754 // This is only for 128-bit vectors. From SSE4.1 onward this combine may not
25755 // happen due to advanced instructions.
25756 if (!VT.is128BitVector())
25759 auto Op0 = N.getOperand(0);
25760 auto Op1 = N.getOperand(1);
25761 if (Op0.isUndef() && Op1.getNode()->getOpcode() == ISD::VECTOR_SHUFFLE) {
25762 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask();
25764 unsigned NumElts = VT.getVectorNumElements();
25765 SmallVector<int, 8> ExpectedMask(NumElts, -1);
25766 std::iota(ExpectedMask.begin(), ExpectedMask.begin() + NumElts / 2,
25769 auto ShufOp = Op1.getOperand(0);
25770 if (isShuffleEquivalent(Op1, ShufOp, Mask, ExpectedMask))
25771 return DAG.getNode(X86ISD::UNPCKH, DL, VT, N.getOperand(0), ShufOp);
25775 case X86ISD::BLENDI: {
25776 SDValue V0 = N->getOperand(0);
25777 SDValue V1 = N->getOperand(1);
25778 assert(VT == V0.getSimpleValueType() && VT == V1.getSimpleValueType() &&
25779 "Unexpected input vector types");
25781 // Canonicalize a v2f64 blend with a mask of 2 by swapping the vector
25782 // operands and changing the mask to 1. This saves us a bunch of
25783 // pattern-matching possibilities related to scalar math ops in SSE/AVX.
25784 // x86InstrInfo knows how to commute this back after instruction selection
25785 // if it would help register allocation.
25787 // TODO: If optimizing for size or a processor that doesn't suffer from
25788 // partial register update stalls, this should be transformed into a MOVSD
25789 // instruction because a MOVSD is 1-2 bytes smaller than a BLENDPD.
25791 if (VT == MVT::v2f64)
25792 if (auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(2)))
25793 if (Mask->getZExtValue() == 2 && !isShuffleFoldableLoad(V0)) {
25794 SDValue NewMask = DAG.getConstant(1, DL, MVT::i8);
25795 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V0, NewMask);
25798 // Attempt to merge blend(insertps(x,y),zero).
25799 if (V0.getOpcode() == X86ISD::INSERTPS ||
25800 V1.getOpcode() == X86ISD::INSERTPS) {
25801 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
25803 // Determine which elements are known to be zero.
25804 SmallVector<int, 8> TargetMask;
25805 SmallVector<SDValue, 2> BlendOps;
25806 if (!setTargetShuffleZeroElements(N, TargetMask, BlendOps))
25809 // Helper function to take inner insertps node and attempt to
25810 // merge the blend with zero into its zero mask.
25811 auto MergeInsertPSAndBlend = [&](SDValue V, int Offset) {
25812 if (V.getOpcode() != X86ISD::INSERTPS)
25814 SDValue Op0 = V.getOperand(0);
25815 SDValue Op1 = V.getOperand(1);
25816 SDValue Op2 = V.getOperand(2);
25817 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
25819 // Check each element of the blend node's target mask - must either
25820 // be zeroable (and update the zero mask) or selects the element from
25821 // the inner insertps node.
25822 for (int i = 0; i != 4; ++i)
25823 if (TargetMask[i] < 0)
25824 InsertPSMask |= (1u << i);
25825 else if (TargetMask[i] != (i + Offset))
25827 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, Op0, Op1,
25828 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25831 if (SDValue V = MergeInsertPSAndBlend(V0, 0))
25833 if (SDValue V = MergeInsertPSAndBlend(V1, 4))
25838 case X86ISD::INSERTPS: {
25839 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
25840 SDValue Op0 = N.getOperand(0);
25841 SDValue Op1 = N.getOperand(1);
25842 SDValue Op2 = N.getOperand(2);
25843 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
25844 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
25845 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
25846 unsigned ZeroMask = InsertPSMask & 0xF;
25848 // If we zero out all elements from Op0 then we don't need to reference it.
25849 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
25850 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
25851 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25853 // If we zero out the element from Op1 then we don't need to reference it.
25854 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
25855 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
25856 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25858 // Attempt to merge insertps Op1 with an inner target shuffle node.
25859 SmallVector<int, 8> TargetMask1;
25860 SmallVector<SDValue, 2> Ops1;
25861 if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
25862 int M = TargetMask1[SrcIdx];
25863 if (isUndefOrZero(M)) {
25864 // Zero/UNDEF insertion - zero out element and remove dependency.
25865 InsertPSMask |= (1u << DstIdx);
25866 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
25867 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25869 // Update insertps mask srcidx and reference the source input directly.
25870 assert(0 <= M && M < 8 && "Shuffle index out of range");
25871 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
25872 Op1 = Ops1[M < 4 ? 0 : 1];
25873 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
25874 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25877 // Attempt to merge insertps Op0 with an inner target shuffle node.
25878 SmallVector<int, 8> TargetMask0;
25879 SmallVector<SDValue, 2> Ops0;
25880 if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
25883 bool Updated = false;
25884 bool UseInput00 = false;
25885 bool UseInput01 = false;
25886 for (int i = 0; i != 4; ++i) {
25887 int M = TargetMask0[i];
25888 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
25889 // No change if element is already zero or the inserted element.
25891 } else if (isUndefOrZero(M)) {
25892 // If the target mask is undef/zero then we must zero the element.
25893 InsertPSMask |= (1u << i);
25898 // The input vector element must be inline.
25899 if (M != i && M != (i + 4))
25902 // Determine which inputs of the target shuffle we're using.
25903 UseInput00 |= (0 <= M && M < 4);
25904 UseInput01 |= (4 <= M);
25907 // If we're not using both inputs of the target shuffle then use the
25908 // referenced input directly.
25909 if (UseInput00 && !UseInput01) {
25912 } else if (!UseInput00 && UseInput01) {
25918 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
25919 DAG.getConstant(InsertPSMask, DL, MVT::i8));
25927 // Nuke no-op shuffles that show up after combining.
25928 if (isNoopShuffleMask(Mask))
25929 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
25931 // Look for simplifications involving one or two shuffle instructions.
25932 SDValue V = N.getOperand(0);
25933 switch (N.getOpcode()) {
25936 case X86ISD::PSHUFLW:
25937 case X86ISD::PSHUFHW:
25938 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
25940 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
25941 return SDValue(); // We combined away this shuffle, so we're done.
25943 // See if this reduces to a PSHUFD which is no more expensive and can
25944 // combine with more operations. Note that it has to at least flip the
25945 // dwords as otherwise it would have been removed as a no-op.
25946 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
25947 int DMask[] = {0, 1, 2, 3};
25948 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
25949 DMask[DOffset + 0] = DOffset + 1;
25950 DMask[DOffset + 1] = DOffset + 0;
25951 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
25952 V = DAG.getBitcast(DVT, V);
25953 DCI.AddToWorklist(V.getNode());
25954 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
25955 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
25956 DCI.AddToWorklist(V.getNode());
25957 return DAG.getBitcast(VT, V);
25960 // Look for shuffle patterns which can be implemented as a single unpack.
25961 // FIXME: This doesn't handle the location of the PSHUFD generically, and
25962 // only works when we have a PSHUFD followed by two half-shuffles.
25963 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
25964 (V.getOpcode() == X86ISD::PSHUFLW ||
25965 V.getOpcode() == X86ISD::PSHUFHW) &&
25966 V.getOpcode() != N.getOpcode() &&
25968 SDValue D = V.getOperand(0);
25969 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
25970 D = D.getOperand(0);
25971 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
25972 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
25973 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
25974 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
25975 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
25977 for (int i = 0; i < 4; ++i) {
25978 WordMask[i + NOffset] = Mask[i] + NOffset;
25979 WordMask[i + VOffset] = VMask[i] + VOffset;
25981 // Map the word mask through the DWord mask.
25983 for (int i = 0; i < 8; ++i)
25984 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
25985 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
25986 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
25987 // We can replace all three shuffles with an unpack.
25988 V = DAG.getBitcast(VT, D.getOperand(0));
25989 DCI.AddToWorklist(V.getNode());
25990 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
25999 case X86ISD::PSHUFD:
26000 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
26009 /// \brief Try to combine a shuffle into a target-specific add-sub node.
26011 /// We combine this directly on the abstract vector shuffle nodes so it is
26012 /// easier to generically match. We also insert dummy vector shuffle nodes for
26013 /// the operands which explicitly discard the lanes which are unused by this
26014 /// operation to try to flow through the rest of the combiner the fact that
26015 /// they're unused.
26016 static SDValue combineShuffleToAddSub(SDNode *N, const X86Subtarget &Subtarget,
26017 SelectionDAG &DAG) {
26019 EVT VT = N->getValueType(0);
26020 if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) &&
26021 (!Subtarget.hasAVX() || (VT != MVT::v8f32 && VT != MVT::v4f64)))
26024 // We only handle target-independent shuffles.
26025 // FIXME: It would be easy and harmless to use the target shuffle mask
26026 // extraction tool to support more.
26027 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
26030 auto *SVN = cast<ShuffleVectorSDNode>(N);
26031 SmallVector<int, 8> Mask;
26032 for (int M : SVN->getMask())
26035 SDValue V1 = N->getOperand(0);
26036 SDValue V2 = N->getOperand(1);
26038 // We require the first shuffle operand to be the FSUB node, and the second to
26039 // be the FADD node.
26040 if (V1.getOpcode() == ISD::FADD && V2.getOpcode() == ISD::FSUB) {
26041 ShuffleVectorSDNode::commuteMask(Mask);
26043 } else if (V1.getOpcode() != ISD::FSUB || V2.getOpcode() != ISD::FADD)
26046 // If there are other uses of these operations we can't fold them.
26047 if (!V1->hasOneUse() || !V2->hasOneUse())
26050 // Ensure that both operations have the same operands. Note that we can
26051 // commute the FADD operands.
26052 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
26053 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
26054 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
26057 // We're looking for blends between FADD and FSUB nodes. We insist on these
26058 // nodes being lined up in a specific expected pattern.
26059 if (!(isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
26060 isShuffleEquivalent(V1, V2, Mask, {0, 5, 2, 7}) ||
26061 isShuffleEquivalent(V1, V2, Mask, {0, 9, 2, 11, 4, 13, 6, 15})))
26064 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
26067 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
26068 TargetLowering::DAGCombinerInfo &DCI,
26069 const X86Subtarget &Subtarget) {
26071 EVT VT = N->getValueType(0);
26073 // Don't create instructions with illegal types after legalize types has run.
26074 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26075 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
26078 // If we have legalized the vector types, look for blends of FADD and FSUB
26079 // nodes that we can fuse into an ADDSUB node.
26080 if (TLI.isTypeLegal(VT))
26081 if (SDValue AddSub = combineShuffleToAddSub(N, Subtarget, DAG))
26084 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
26085 if (TLI.isTypeLegal(VT) && Subtarget.hasFp256() && VT.is256BitVector() &&
26086 N->getOpcode() == ISD::VECTOR_SHUFFLE)
26087 return combineShuffle256(N, DAG, DCI, Subtarget);
26089 // During Type Legalization, when promoting illegal vector types,
26090 // the backend might introduce new shuffle dag nodes and bitcasts.
26092 // This code performs the following transformation:
26093 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
26094 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
26096 // We do this only if both the bitcast and the BINOP dag nodes have
26097 // one use. Also, perform this transformation only if the new binary
26098 // operation is legal. This is to avoid introducing dag nodes that
26099 // potentially need to be further expanded (or custom lowered) into a
26100 // less optimal sequence of dag nodes.
26101 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
26102 N->getOpcode() == ISD::VECTOR_SHUFFLE &&
26103 N->getOperand(0).getOpcode() == ISD::BITCAST &&
26104 N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
26105 SDValue N0 = N->getOperand(0);
26106 SDValue N1 = N->getOperand(1);
26108 SDValue BC0 = N0.getOperand(0);
26109 EVT SVT = BC0.getValueType();
26110 unsigned Opcode = BC0.getOpcode();
26111 unsigned NumElts = VT.getVectorNumElements();
26113 if (BC0.hasOneUse() && SVT.isVector() &&
26114 SVT.getVectorNumElements() * 2 == NumElts &&
26115 TLI.isOperationLegal(Opcode, VT)) {
26116 bool CanFold = false;
26128 unsigned SVTNumElts = SVT.getVectorNumElements();
26129 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
26130 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
26131 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
26132 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
26133 CanFold = SVOp->getMaskElt(i) < 0;
26136 SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
26137 SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
26138 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
26139 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
26144 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
26145 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
26146 // consecutive, non-overlapping, and in the right order.
26147 SmallVector<SDValue, 16> Elts;
26148 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
26149 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
26151 if (SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true))
26154 if (isTargetShuffle(N->getOpcode())) {
26155 if (SDValue Shuffle =
26156 combineTargetShuffle(SDValue(N, 0), DAG, DCI, Subtarget))
26159 // Try recursively combining arbitrary sequences of x86 shuffle
26160 // instructions into higher-order shuffles. We do this after combining
26161 // specific PSHUF instruction sequences into their minimal form so that we
26162 // can evaluate how many specialized shuffle instructions are involved in
26163 // a particular chain.
26164 SmallVector<int, 1> NonceMask; // Just a placeholder.
26165 NonceMask.push_back(0);
26166 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
26167 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
26169 return SDValue(); // This routine will use CombineTo to replace N.
26175 /// Check if a vector extract from a target-specific shuffle of a load can be
26176 /// folded into a single element load.
26177 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
26178 /// shuffles have been custom lowered so we need to handle those here.
26179 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
26180 TargetLowering::DAGCombinerInfo &DCI) {
26181 if (DCI.isBeforeLegalizeOps())
26184 SDValue InVec = N->getOperand(0);
26185 SDValue EltNo = N->getOperand(1);
26186 EVT EltVT = N->getValueType(0);
26188 if (!isa<ConstantSDNode>(EltNo))
26191 EVT OriginalVT = InVec.getValueType();
26193 if (InVec.getOpcode() == ISD::BITCAST) {
26194 // Don't duplicate a load with other uses.
26195 if (!InVec.hasOneUse())
26197 EVT BCVT = InVec.getOperand(0).getValueType();
26198 if (!BCVT.isVector() ||
26199 BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
26201 InVec = InVec.getOperand(0);
26204 EVT CurrentVT = InVec.getValueType();
26206 if (!isTargetShuffle(InVec.getOpcode()))
26209 // Don't duplicate a load with other uses.
26210 if (!InVec.hasOneUse())
26213 SmallVector<int, 16> ShuffleMask;
26214 SmallVector<SDValue, 2> ShuffleOps;
26216 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
26217 ShuffleOps, ShuffleMask, UnaryShuffle))
26220 // Select the input vector, guarding against out of range extract vector.
26221 unsigned NumElems = CurrentVT.getVectorNumElements();
26222 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
26223 int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt];
26225 if (Idx == SM_SentinelZero)
26226 return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
26227 : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
26228 if (Idx == SM_SentinelUndef)
26229 return DAG.getUNDEF(EltVT);
26231 assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
26232 SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0]
26235 // If inputs to shuffle are the same for both ops, then allow 2 uses
26236 unsigned AllowedUses =
26237 (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
26239 if (LdNode.getOpcode() == ISD::BITCAST) {
26240 // Don't duplicate a load with other uses.
26241 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
26244 AllowedUses = 1; // only allow 1 load use if we have a bitcast
26245 LdNode = LdNode.getOperand(0);
26248 if (!ISD::isNormalLoad(LdNode.getNode()))
26251 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
26253 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
26256 // If there's a bitcast before the shuffle, check if the load type and
26257 // alignment is valid.
26258 unsigned Align = LN0->getAlignment();
26259 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26260 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
26261 EltVT.getTypeForEVT(*DAG.getContext()));
26263 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
26266 // All checks match so transform back to vector_shuffle so that DAG combiner
26267 // can finish the job
26270 // Create shuffle node taking into account the case that its a unary shuffle
26271 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
26272 Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
26274 Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
26275 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
26279 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
26280 const X86Subtarget &Subtarget) {
26281 SDValue N0 = N->getOperand(0);
26282 EVT VT = N->getValueType(0);
26284 // Detect bitcasts between i32 to x86mmx low word. Since MMX types are
26285 // special and don't usually play with other vector types, it's better to
26286 // handle them early to be sure we emit efficient code by avoiding
26287 // store-load conversions.
26288 if (VT == MVT::x86mmx && N0.getOpcode() == ISD::BUILD_VECTOR &&
26289 N0.getValueType() == MVT::v2i32 &&
26290 isNullConstant(N0.getOperand(1))) {
26291 SDValue N00 = N0->getOperand(0);
26292 if (N00.getValueType() == MVT::i32)
26293 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(N00), VT, N00);
26296 // Convert a bitcasted integer logic operation that has one bitcasted
26297 // floating-point operand and one constant operand into a floating-point
26298 // logic operation. This may create a load of the constant, but that is
26299 // cheaper than materializing the constant in an integer register and
26300 // transferring it to an SSE register or transferring the SSE operand to
26301 // integer register and back.
26303 switch (N0.getOpcode()) {
26304 case ISD::AND: FPOpcode = X86ISD::FAND; break;
26305 case ISD::OR: FPOpcode = X86ISD::FOR; break;
26306 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
26307 default: return SDValue();
26309 if (((Subtarget.hasSSE1() && VT == MVT::f32) ||
26310 (Subtarget.hasSSE2() && VT == MVT::f64)) &&
26311 isa<ConstantSDNode>(N0.getOperand(1)) &&
26312 N0.getOperand(0).getOpcode() == ISD::BITCAST &&
26313 N0.getOperand(0).getOperand(0).getValueType() == VT) {
26314 SDValue N000 = N0.getOperand(0).getOperand(0);
26315 SDValue FPConst = DAG.getBitcast(VT, N0.getOperand(1));
26316 return DAG.getNode(FPOpcode, SDLoc(N0), VT, N000, FPConst);
26322 /// Detect vector gather/scatter index generation and convert it from being a
26323 /// bunch of shuffles and extracts into a somewhat faster sequence.
26324 /// For i686, the best sequence is apparently storing the value and loading
26325 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
26326 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
26327 TargetLowering::DAGCombinerInfo &DCI) {
26328 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
26331 SDValue InputVector = N->getOperand(0);
26332 SDLoc dl(InputVector);
26333 // Detect mmx to i32 conversion through a v2i32 elt extract.
26334 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
26335 N->getValueType(0) == MVT::i32 &&
26336 InputVector.getValueType() == MVT::v2i32 &&
26337 isa<ConstantSDNode>(N->getOperand(1)) &&
26338 N->getConstantOperandVal(1) == 0) {
26339 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
26341 // The bitcast source is a direct mmx result.
26342 if (MMXSrc.getValueType() == MVT::x86mmx)
26343 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
26346 EVT VT = N->getValueType(0);
26348 if (VT == MVT::i1 && isa<ConstantSDNode>(N->getOperand(1)) &&
26349 InputVector.getOpcode() == ISD::BITCAST &&
26350 isa<ConstantSDNode>(InputVector.getOperand(0))) {
26351 uint64_t ExtractedElt =
26352 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
26353 uint64_t InputValue =
26354 cast<ConstantSDNode>(InputVector.getOperand(0))->getZExtValue();
26355 uint64_t Res = (InputValue >> ExtractedElt) & 1;
26356 return DAG.getConstant(Res, dl, MVT::i1);
26358 // Only operate on vectors of 4 elements, where the alternative shuffling
26359 // gets to be more expensive.
26360 if (InputVector.getValueType() != MVT::v4i32)
26363 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
26364 // single use which is a sign-extend or zero-extend, and all elements are
26366 SmallVector<SDNode *, 4> Uses;
26367 unsigned ExtractedElements = 0;
26368 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
26369 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
26370 if (UI.getUse().getResNo() != InputVector.getResNo())
26373 SDNode *Extract = *UI;
26374 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
26377 if (Extract->getValueType(0) != MVT::i32)
26379 if (!Extract->hasOneUse())
26381 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
26382 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
26384 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
26387 // Record which element was extracted.
26388 ExtractedElements |=
26389 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
26391 Uses.push_back(Extract);
26394 // If not all the elements were used, this may not be worthwhile.
26395 if (ExtractedElements != 15)
26398 // Ok, we've now decided to do the transformation.
26399 // If 64-bit shifts are legal, use the extract-shift sequence,
26400 // otherwise bounce the vector off the cache.
26401 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26404 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
26405 SDValue Cst = DAG.getBitcast(MVT::v2i64, InputVector);
26406 auto &DL = DAG.getDataLayout();
26407 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy(DL);
26408 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
26409 DAG.getConstant(0, dl, VecIdxTy));
26410 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
26411 DAG.getConstant(1, dl, VecIdxTy));
26413 SDValue ShAmt = DAG.getConstant(
26414 32, dl, DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64, DL));
26415 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
26416 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
26417 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
26418 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
26419 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
26420 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
26422 // Store the value to a temporary stack slot.
26423 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
26424 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
26425 MachinePointerInfo());
26427 EVT ElementType = InputVector.getValueType().getVectorElementType();
26428 unsigned EltSize = ElementType.getSizeInBits() / 8;
26430 // Replace each use (extract) with a load of the appropriate element.
26431 for (unsigned i = 0; i < 4; ++i) {
26432 uint64_t Offset = EltSize * i;
26433 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26434 SDValue OffsetVal = DAG.getConstant(Offset, dl, PtrVT);
26436 SDValue ScalarAddr =
26437 DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, OffsetVal);
26439 // Load the scalar.
26441 DAG.getLoad(ElementType, dl, Ch, ScalarAddr, MachinePointerInfo());
26445 // Replace the extracts
26446 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
26447 UE = Uses.end(); UI != UE; ++UI) {
26448 SDNode *Extract = *UI;
26450 SDValue Idx = Extract->getOperand(1);
26451 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
26452 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
26455 // The replacement was made in place; don't return anything.
26459 /// Do target-specific dag combines on SELECT and VSELECT nodes.
26460 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
26461 TargetLowering::DAGCombinerInfo &DCI,
26462 const X86Subtarget &Subtarget) {
26464 SDValue Cond = N->getOperand(0);
26465 // Get the LHS/RHS of the select.
26466 SDValue LHS = N->getOperand(1);
26467 SDValue RHS = N->getOperand(2);
26468 EVT VT = LHS.getValueType();
26469 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26471 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
26472 // instructions match the semantics of the common C idiom x<y?x:y but not
26473 // x<=y?x:y, because of how they handle negative zero (which can be
26474 // ignored in unsafe-math mode).
26475 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
26476 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
26477 VT != MVT::f80 && VT != MVT::f128 &&
26478 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
26479 (Subtarget.hasSSE2() ||
26480 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
26481 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
26483 unsigned Opcode = 0;
26484 // Check for x CC y ? x : y.
26485 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
26486 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
26490 // Converting this to a min would handle NaNs incorrectly, and swapping
26491 // the operands would cause it to handle comparisons between positive
26492 // and negative zero incorrectly.
26493 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
26494 if (!DAG.getTarget().Options.UnsafeFPMath &&
26495 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
26497 std::swap(LHS, RHS);
26499 Opcode = X86ISD::FMIN;
26502 // Converting this to a min would handle comparisons between positive
26503 // and negative zero incorrectly.
26504 if (!DAG.getTarget().Options.UnsafeFPMath &&
26505 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
26507 Opcode = X86ISD::FMIN;
26510 // Converting this to a min would handle both negative zeros and NaNs
26511 // incorrectly, but we can swap the operands to fix both.
26512 std::swap(LHS, RHS);
26516 Opcode = X86ISD::FMIN;
26520 // Converting this to a max would handle comparisons between positive
26521 // and negative zero incorrectly.
26522 if (!DAG.getTarget().Options.UnsafeFPMath &&
26523 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
26525 Opcode = X86ISD::FMAX;
26528 // Converting this to a max would handle NaNs incorrectly, and swapping
26529 // the operands would cause it to handle comparisons between positive
26530 // and negative zero incorrectly.
26531 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
26532 if (!DAG.getTarget().Options.UnsafeFPMath &&
26533 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
26535 std::swap(LHS, RHS);
26537 Opcode = X86ISD::FMAX;
26540 // Converting this to a max would handle both negative zeros and NaNs
26541 // incorrectly, but we can swap the operands to fix both.
26542 std::swap(LHS, RHS);
26546 Opcode = X86ISD::FMAX;
26549 // Check for x CC y ? y : x -- a min/max with reversed arms.
26550 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
26551 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
26555 // Converting this to a min would handle comparisons between positive
26556 // and negative zero incorrectly, and swapping the operands would
26557 // cause it to handle NaNs incorrectly.
26558 if (!DAG.getTarget().Options.UnsafeFPMath &&
26559 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
26560 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
26562 std::swap(LHS, RHS);
26564 Opcode = X86ISD::FMIN;
26567 // Converting this to a min would handle NaNs incorrectly.
26568 if (!DAG.getTarget().Options.UnsafeFPMath &&
26569 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
26571 Opcode = X86ISD::FMIN;
26574 // Converting this to a min would handle both negative zeros and NaNs
26575 // incorrectly, but we can swap the operands to fix both.
26576 std::swap(LHS, RHS);
26580 Opcode = X86ISD::FMIN;
26584 // Converting this to a max would handle NaNs incorrectly.
26585 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
26587 Opcode = X86ISD::FMAX;
26590 // Converting this to a max would handle comparisons between positive
26591 // and negative zero incorrectly, and swapping the operands would
26592 // cause it to handle NaNs incorrectly.
26593 if (!DAG.getTarget().Options.UnsafeFPMath &&
26594 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
26595 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
26597 std::swap(LHS, RHS);
26599 Opcode = X86ISD::FMAX;
26602 // Converting this to a max would handle both negative zeros and NaNs
26603 // incorrectly, but we can swap the operands to fix both.
26604 std::swap(LHS, RHS);
26608 Opcode = X86ISD::FMAX;
26614 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
26617 EVT CondVT = Cond.getValueType();
26618 if (Subtarget.hasAVX512() && VT.isVector() && CondVT.isVector() &&
26619 CondVT.getVectorElementType() == MVT::i1) {
26620 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
26621 // lowering on KNL. In this case we convert it to
26622 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
26623 // The same situation for all 128 and 256-bit vectors of i8 and i16.
26624 // Since SKX these selects have a proper lowering.
26625 EVT OpVT = LHS.getValueType();
26626 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
26627 (OpVT.getVectorElementType() == MVT::i8 ||
26628 OpVT.getVectorElementType() == MVT::i16) &&
26629 !(Subtarget.hasBWI() && Subtarget.hasVLX())) {
26630 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
26631 DCI.AddToWorklist(Cond.getNode());
26632 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
26635 // If this is a select between two integer constants, try to do some
26637 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
26638 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
26639 // Don't do this for crazy integer types.
26640 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
26641 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
26642 // so that TrueC (the true value) is larger than FalseC.
26643 bool NeedsCondInvert = false;
26645 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
26646 // Efficiently invertible.
26647 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
26648 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
26649 isa<ConstantSDNode>(Cond.getOperand(1))))) {
26650 NeedsCondInvert = true;
26651 std::swap(TrueC, FalseC);
26654 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
26655 if (FalseC->getAPIntValue() == 0 &&
26656 TrueC->getAPIntValue().isPowerOf2()) {
26657 if (NeedsCondInvert) // Invert the condition if needed.
26658 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
26659 DAG.getConstant(1, DL, Cond.getValueType()));
26661 // Zero extend the condition if needed.
26662 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
26664 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
26665 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
26666 DAG.getConstant(ShAmt, DL, MVT::i8));
26669 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
26670 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
26671 if (NeedsCondInvert) // Invert the condition if needed.
26672 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
26673 DAG.getConstant(1, DL, Cond.getValueType()));
26675 // Zero extend the condition if needed.
26676 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
26677 FalseC->getValueType(0), Cond);
26678 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
26679 SDValue(FalseC, 0));
26682 // Optimize cases that will turn into an LEA instruction. This requires
26683 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
26684 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
26685 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
26686 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
26688 bool isFastMultiplier = false;
26690 switch ((unsigned char)Diff) {
26692 case 1: // result = add base, cond
26693 case 2: // result = lea base( , cond*2)
26694 case 3: // result = lea base(cond, cond*2)
26695 case 4: // result = lea base( , cond*4)
26696 case 5: // result = lea base(cond, cond*4)
26697 case 8: // result = lea base( , cond*8)
26698 case 9: // result = lea base(cond, cond*8)
26699 isFastMultiplier = true;
26704 if (isFastMultiplier) {
26705 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
26706 if (NeedsCondInvert) // Invert the condition if needed.
26707 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
26708 DAG.getConstant(1, DL, Cond.getValueType()));
26710 // Zero extend the condition if needed.
26711 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
26713 // Scale the condition by the difference.
26715 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
26716 DAG.getConstant(Diff, DL,
26717 Cond.getValueType()));
26719 // Add the base if non-zero.
26720 if (FalseC->getAPIntValue() != 0)
26721 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
26722 SDValue(FalseC, 0));
26729 // Canonicalize max and min:
26730 // (x > y) ? x : y -> (x >= y) ? x : y
26731 // (x < y) ? x : y -> (x <= y) ? x : y
26732 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
26733 // the need for an extra compare
26734 // against zero. e.g.
26735 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
26737 // testl %edi, %edi
26739 // cmovgl %edi, %eax
26743 // cmovsl %eax, %edi
26744 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
26745 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
26746 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
26747 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
26752 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
26753 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
26754 Cond.getOperand(0), Cond.getOperand(1), NewCC);
26755 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
26760 // Early exit check
26761 if (!TLI.isTypeLegal(VT))
26764 // Match VSELECTs into subs with unsigned saturation.
26765 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
26766 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
26767 ((Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
26768 (Subtarget.hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
26769 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
26771 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
26772 // left side invert the predicate to simplify logic below.
26774 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
26776 CC = ISD::getSetCCInverse(CC, true);
26777 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
26781 if (Other.getNode() && Other->getNumOperands() == 2 &&
26782 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
26783 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
26784 SDValue CondRHS = Cond->getOperand(1);
26786 // Look for a general sub with unsigned saturation first.
26787 // x >= y ? x-y : 0 --> subus x, y
26788 // x > y ? x-y : 0 --> subus x, y
26789 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
26790 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
26791 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
26793 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
26794 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
26795 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
26796 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
26797 // If the RHS is a constant we have to reverse the const
26798 // canonicalization.
26799 // x > C-1 ? x+-C : 0 --> subus x, C
26800 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
26801 CondRHSConst->getAPIntValue() ==
26802 (-OpRHSConst->getAPIntValue() - 1))
26803 return DAG.getNode(
26804 X86ISD::SUBUS, DL, VT, OpLHS,
26805 DAG.getConstant(-OpRHSConst->getAPIntValue(), DL, VT));
26807 // Another special case: If C was a sign bit, the sub has been
26808 // canonicalized into a xor.
26809 // FIXME: Would it be better to use computeKnownBits to determine
26810 // whether it's safe to decanonicalize the xor?
26811 // x s< 0 ? x^C : 0 --> subus x, C
26812 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
26813 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
26814 OpRHSConst->getAPIntValue().isSignBit())
26815 // Note that we have to rebuild the RHS constant here to ensure we
26816 // don't rely on particular values of undef lanes.
26817 return DAG.getNode(
26818 X86ISD::SUBUS, DL, VT, OpLHS,
26819 DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT));
26824 // Simplify vector selection if condition value type matches vselect
26826 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
26827 assert(Cond.getValueType().isVector() &&
26828 "vector select expects a vector selector!");
26830 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
26831 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
26833 // Try invert the condition if true value is not all 1s and false value
26835 if (!TValIsAllOnes && !FValIsAllZeros &&
26836 // Check if the selector will be produced by CMPP*/PCMP*
26837 Cond.getOpcode() == ISD::SETCC &&
26838 // Check if SETCC has already been promoted
26839 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
26841 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
26842 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
26844 if (TValIsAllZeros || FValIsAllOnes) {
26845 SDValue CC = Cond.getOperand(2);
26846 ISD::CondCode NewCC =
26847 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
26848 Cond.getOperand(0).getValueType().isInteger());
26849 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
26850 std::swap(LHS, RHS);
26851 TValIsAllOnes = FValIsAllOnes;
26852 FValIsAllZeros = TValIsAllZeros;
26856 if (TValIsAllOnes || FValIsAllZeros) {
26859 if (TValIsAllOnes && FValIsAllZeros)
26861 else if (TValIsAllOnes)
26863 DAG.getNode(ISD::OR, DL, CondVT, Cond, DAG.getBitcast(CondVT, RHS));
26864 else if (FValIsAllZeros)
26865 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
26866 DAG.getBitcast(CondVT, LHS));
26868 return DAG.getBitcast(VT, Ret);
26872 // If this is a *dynamic* select (non-constant condition) and we can match
26873 // this node with one of the variable blend instructions, restructure the
26874 // condition so that the blends can use the high bit of each element and use
26875 // SimplifyDemandedBits to simplify the condition operand.
26876 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
26877 !DCI.isBeforeLegalize() &&
26878 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
26879 unsigned BitWidth = Cond.getValueType().getScalarSizeInBits();
26881 // Don't optimize vector selects that map to mask-registers.
26885 // We can only handle the cases where VSELECT is directly legal on the
26886 // subtarget. We custom lower VSELECT nodes with constant conditions and
26887 // this makes it hard to see whether a dynamic VSELECT will correctly
26888 // lower, so we both check the operation's status and explicitly handle the
26889 // cases where a *dynamic* blend will fail even though a constant-condition
26890 // blend could be custom lowered.
26891 // FIXME: We should find a better way to handle this class of problems.
26892 // Potentially, we should combine constant-condition vselect nodes
26893 // pre-legalization into shuffles and not mark as many types as custom
26895 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
26897 // FIXME: We don't support i16-element blends currently. We could and
26898 // should support them by making *all* the bits in the condition be set
26899 // rather than just the high bit and using an i8-element blend.
26900 if (VT.getVectorElementType() == MVT::i16)
26902 // Dynamic blending was only available from SSE4.1 onward.
26903 if (VT.is128BitVector() && !Subtarget.hasSSE41())
26905 // Byte blends are only available in AVX2
26906 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
26909 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
26910 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
26912 APInt KnownZero, KnownOne;
26913 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
26914 DCI.isBeforeLegalizeOps());
26915 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
26916 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
26918 // If we changed the computation somewhere in the DAG, this change
26919 // will affect all users of Cond.
26920 // Make sure it is fine and update all the nodes so that we do not
26921 // use the generic VSELECT anymore. Otherwise, we may perform
26922 // wrong optimizations as we messed up with the actual expectation
26923 // for the vector boolean values.
26924 if (Cond != TLO.Old) {
26925 // Check all uses of that condition operand to check whether it will be
26926 // consumed by non-BLEND instructions, which may depend on all bits are
26928 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
26930 if (I->getOpcode() != ISD::VSELECT)
26931 // TODO: Add other opcodes eventually lowered into BLEND.
26934 // Update all the users of the condition, before committing the change,
26935 // so that the VSELECT optimizations that expect the correct vector
26936 // boolean value will not be triggered.
26937 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
26939 DAG.ReplaceAllUsesOfValueWith(
26941 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
26942 Cond, I->getOperand(1), I->getOperand(2)));
26943 DCI.CommitTargetLoweringOpt(TLO);
26946 // At this point, only Cond is changed. Change the condition
26947 // just for N to keep the opportunity to optimize all other
26948 // users their own way.
26949 DAG.ReplaceAllUsesOfValueWith(
26951 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
26952 TLO.New, N->getOperand(1), N->getOperand(2)));
26961 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
26963 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
26964 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
26965 /// Note that this is only legal for some op/cc combinations.
26966 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
26967 SelectionDAG &DAG) {
26968 // This combine only operates on CMP-like nodes.
26969 if (!(Cmp.getOpcode() == X86ISD::CMP ||
26970 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
26973 // This only applies to variations of the common case:
26974 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
26975 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
26976 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
26977 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
26978 // Using the proper condcodes (see below), overflow is checked for.
26980 // FIXME: We can generalize both constraints:
26981 // - XOR/OR/AND (if they were made to survive AtomicExpand)
26983 // if the result is compared.
26985 SDValue CmpLHS = Cmp.getOperand(0);
26986 SDValue CmpRHS = Cmp.getOperand(1);
26988 if (!CmpLHS.hasOneUse())
26991 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
26992 if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
26995 const unsigned Opc = CmpLHS.getOpcode();
26997 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
27000 SDValue OpRHS = CmpLHS.getOperand(2);
27001 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
27005 APInt Addend = OpRHSC->getAPIntValue();
27006 if (Opc == ISD::ATOMIC_LOAD_SUB)
27009 if (CC == X86::COND_S && Addend == 1)
27011 else if (CC == X86::COND_NS && Addend == 1)
27013 else if (CC == X86::COND_G && Addend == -1)
27015 else if (CC == X86::COND_LE && Addend == -1)
27020 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
27021 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
27022 DAG.getUNDEF(CmpLHS.getValueType()));
27023 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
27027 // Check whether a boolean test is testing a boolean value generated by
27028 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
27031 // Simplify the following patterns:
27032 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
27033 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
27034 // to (Op EFLAGS Cond)
27036 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
27037 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
27038 // to (Op EFLAGS !Cond)
27040 // where Op could be BRCOND or CMOV.
27042 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
27043 // This combine only operates on CMP-like nodes.
27044 if (!(Cmp.getOpcode() == X86ISD::CMP ||
27045 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
27048 // Quit if not used as a boolean value.
27049 if (CC != X86::COND_E && CC != X86::COND_NE)
27052 // Check CMP operands. One of them should be 0 or 1 and the other should be
27053 // an SetCC or extended from it.
27054 SDValue Op1 = Cmp.getOperand(0);
27055 SDValue Op2 = Cmp.getOperand(1);
27058 const ConstantSDNode* C = nullptr;
27059 bool needOppositeCond = (CC == X86::COND_E);
27060 bool checkAgainstTrue = false; // Is it a comparison against 1?
27062 if ((C = dyn_cast<ConstantSDNode>(Op1)))
27064 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
27066 else // Quit if all operands are not constants.
27069 if (C->getZExtValue() == 1) {
27070 needOppositeCond = !needOppositeCond;
27071 checkAgainstTrue = true;
27072 } else if (C->getZExtValue() != 0)
27073 // Quit if the constant is neither 0 or 1.
27076 bool truncatedToBoolWithAnd = false;
27077 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
27078 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
27079 SetCC.getOpcode() == ISD::TRUNCATE ||
27080 SetCC.getOpcode() == ISD::AssertZext ||
27081 SetCC.getOpcode() == ISD::AND) {
27082 if (SetCC.getOpcode() == ISD::AND) {
27084 if (isOneConstant(SetCC.getOperand(0)))
27086 if (isOneConstant(SetCC.getOperand(1)))
27090 SetCC = SetCC.getOperand(OpIdx);
27091 truncatedToBoolWithAnd = true;
27093 SetCC = SetCC.getOperand(0);
27096 switch (SetCC.getOpcode()) {
27097 case X86ISD::SETCC_CARRY:
27098 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
27099 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
27100 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
27101 // truncated to i1 using 'and'.
27102 if (checkAgainstTrue && !truncatedToBoolWithAnd)
27104 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
27105 "Invalid use of SETCC_CARRY!");
27107 case X86ISD::SETCC:
27108 // Set the condition code or opposite one if necessary.
27109 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
27110 if (needOppositeCond)
27111 CC = X86::GetOppositeBranchCondition(CC);
27112 return SetCC.getOperand(1);
27113 case X86ISD::CMOV: {
27114 // Check whether false/true value has canonical one, i.e. 0 or 1.
27115 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
27116 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
27117 // Quit if true value is not a constant.
27120 // Quit if false value is not a constant.
27122 SDValue Op = SetCC.getOperand(0);
27123 // Skip 'zext' or 'trunc' node.
27124 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
27125 Op.getOpcode() == ISD::TRUNCATE)
27126 Op = Op.getOperand(0);
27127 // A special case for rdrand/rdseed, where 0 is set if false cond is
27129 if ((Op.getOpcode() != X86ISD::RDRAND &&
27130 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
27133 // Quit if false value is not the constant 0 or 1.
27134 bool FValIsFalse = true;
27135 if (FVal && FVal->getZExtValue() != 0) {
27136 if (FVal->getZExtValue() != 1)
27138 // If FVal is 1, opposite cond is needed.
27139 needOppositeCond = !needOppositeCond;
27140 FValIsFalse = false;
27142 // Quit if TVal is not the constant opposite of FVal.
27143 if (FValIsFalse && TVal->getZExtValue() != 1)
27145 if (!FValIsFalse && TVal->getZExtValue() != 0)
27147 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
27148 if (needOppositeCond)
27149 CC = X86::GetOppositeBranchCondition(CC);
27150 return SetCC.getOperand(3);
27157 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
27159 /// (X86or (X86setcc) (X86setcc))
27160 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
27161 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
27162 X86::CondCode &CC1, SDValue &Flags,
27164 if (Cond->getOpcode() == X86ISD::CMP) {
27165 if (!isNullConstant(Cond->getOperand(1)))
27168 Cond = Cond->getOperand(0);
27173 SDValue SetCC0, SetCC1;
27174 switch (Cond->getOpcode()) {
27175 default: return false;
27182 SetCC0 = Cond->getOperand(0);
27183 SetCC1 = Cond->getOperand(1);
27187 // Make sure we have SETCC nodes, using the same flags value.
27188 if (SetCC0.getOpcode() != X86ISD::SETCC ||
27189 SetCC1.getOpcode() != X86ISD::SETCC ||
27190 SetCC0->getOperand(1) != SetCC1->getOperand(1))
27193 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
27194 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
27195 Flags = SetCC0->getOperand(1);
27199 /// Optimize an EFLAGS definition used according to the condition code \p CC
27200 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
27201 /// uses of chain values.
27202 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
27203 SelectionDAG &DAG) {
27204 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
27206 return combineSetCCAtomicArith(EFLAGS, CC, DAG);
27209 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
27210 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
27211 TargetLowering::DAGCombinerInfo &DCI,
27212 const X86Subtarget &Subtarget) {
27215 // If the flag operand isn't dead, don't touch this CMOV.
27216 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
27219 SDValue FalseOp = N->getOperand(0);
27220 SDValue TrueOp = N->getOperand(1);
27221 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
27222 SDValue Cond = N->getOperand(3);
27224 if (CC == X86::COND_E || CC == X86::COND_NE) {
27225 switch (Cond.getOpcode()) {
27229 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
27230 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
27231 return (CC == X86::COND_E) ? FalseOp : TrueOp;
27235 // Try to simplify the EFLAGS and condition code operands.
27236 // We can't always do this as FCMOV only supports a subset of X86 cond.
27237 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG)) {
27238 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
27239 SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
27241 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
27245 // If this is a select between two integer constants, try to do some
27246 // optimizations. Note that the operands are ordered the opposite of SELECT
27248 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
27249 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
27250 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
27251 // larger than FalseC (the false value).
27252 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
27253 CC = X86::GetOppositeBranchCondition(CC);
27254 std::swap(TrueC, FalseC);
27255 std::swap(TrueOp, FalseOp);
27258 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
27259 // This is efficient for any integer data type (including i8/i16) and
27261 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
27262 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
27263 DAG.getConstant(CC, DL, MVT::i8), Cond);
27265 // Zero extend the condition if needed.
27266 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
27268 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
27269 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
27270 DAG.getConstant(ShAmt, DL, MVT::i8));
27271 if (N->getNumValues() == 2) // Dead flag value?
27272 return DCI.CombineTo(N, Cond, SDValue());
27276 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
27277 // for any integer data type, including i8/i16.
27278 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
27279 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
27280 DAG.getConstant(CC, DL, MVT::i8), Cond);
27282 // Zero extend the condition if needed.
27283 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
27284 FalseC->getValueType(0), Cond);
27285 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
27286 SDValue(FalseC, 0));
27288 if (N->getNumValues() == 2) // Dead flag value?
27289 return DCI.CombineTo(N, Cond, SDValue());
27293 // Optimize cases that will turn into an LEA instruction. This requires
27294 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
27295 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
27296 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
27297 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
27299 bool isFastMultiplier = false;
27301 switch ((unsigned char)Diff) {
27303 case 1: // result = add base, cond
27304 case 2: // result = lea base( , cond*2)
27305 case 3: // result = lea base(cond, cond*2)
27306 case 4: // result = lea base( , cond*4)
27307 case 5: // result = lea base(cond, cond*4)
27308 case 8: // result = lea base( , cond*8)
27309 case 9: // result = lea base(cond, cond*8)
27310 isFastMultiplier = true;
27315 if (isFastMultiplier) {
27316 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
27317 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
27318 DAG.getConstant(CC, DL, MVT::i8), Cond);
27319 // Zero extend the condition if needed.
27320 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
27322 // Scale the condition by the difference.
27324 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
27325 DAG.getConstant(Diff, DL, Cond.getValueType()));
27327 // Add the base if non-zero.
27328 if (FalseC->getAPIntValue() != 0)
27329 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
27330 SDValue(FalseC, 0));
27331 if (N->getNumValues() == 2) // Dead flag value?
27332 return DCI.CombineTo(N, Cond, SDValue());
27339 // Handle these cases:
27340 // (select (x != c), e, c) -> select (x != c), e, x),
27341 // (select (x == c), c, e) -> select (x == c), x, e)
27342 // where the c is an integer constant, and the "select" is the combination
27343 // of CMOV and CMP.
27345 // The rationale for this change is that the conditional-move from a constant
27346 // needs two instructions, however, conditional-move from a register needs
27347 // only one instruction.
27349 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
27350 // some instruction-combining opportunities. This opt needs to be
27351 // postponed as late as possible.
27353 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
27354 // the DCI.xxxx conditions are provided to postpone the optimization as
27355 // late as possible.
27357 ConstantSDNode *CmpAgainst = nullptr;
27358 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
27359 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
27360 !isa<ConstantSDNode>(Cond.getOperand(0))) {
27362 if (CC == X86::COND_NE &&
27363 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
27364 CC = X86::GetOppositeBranchCondition(CC);
27365 std::swap(TrueOp, FalseOp);
27368 if (CC == X86::COND_E &&
27369 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
27370 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
27371 DAG.getConstant(CC, DL, MVT::i8), Cond };
27372 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
27377 // Fold and/or of setcc's to double CMOV:
27378 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
27379 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
27381 // This combine lets us generate:
27382 // cmovcc1 (jcc1 if we don't have CMOV)
27388 // cmovne (jne if we don't have CMOV)
27389 // When we can't use the CMOV instruction, it might increase branch
27391 // When we can use CMOV, or when there is no mispredict, this improves
27392 // throughput and reduces register pressure.
27394 if (CC == X86::COND_NE) {
27396 X86::CondCode CC0, CC1;
27398 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
27400 std::swap(FalseOp, TrueOp);
27401 CC0 = X86::GetOppositeBranchCondition(CC0);
27402 CC1 = X86::GetOppositeBranchCondition(CC1);
27405 SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
27407 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), LOps);
27408 SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
27409 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
27410 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(CMOV.getNode(), 1));
27418 /// Different mul shrinking modes.
27419 enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
27421 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
27422 EVT VT = N->getOperand(0).getValueType();
27423 if (VT.getScalarSizeInBits() != 32)
27426 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
27427 unsigned SignBits[2] = {1, 1};
27428 bool IsPositive[2] = {false, false};
27429 for (unsigned i = 0; i < 2; i++) {
27430 SDValue Opd = N->getOperand(i);
27432 // DAG.ComputeNumSignBits return 1 for ISD::ANY_EXTEND, so we need to
27433 // compute signbits for it separately.
27434 if (Opd.getOpcode() == ISD::ANY_EXTEND) {
27435 // For anyextend, it is safe to assume an appropriate number of leading
27437 if (Opd.getOperand(0).getValueType().getVectorElementType() == MVT::i8)
27439 else if (Opd.getOperand(0).getValueType().getVectorElementType() ==
27444 IsPositive[i] = true;
27445 } else if (Opd.getOpcode() == ISD::BUILD_VECTOR) {
27446 // All the operands of BUILD_VECTOR need to be int constant.
27447 // Find the smallest value range which all the operands belong to.
27449 IsPositive[i] = true;
27450 for (const SDValue &SubOp : Opd.getNode()->op_values()) {
27451 if (SubOp.isUndef())
27453 auto *CN = dyn_cast<ConstantSDNode>(SubOp);
27456 APInt IntVal = CN->getAPIntValue();
27457 if (IntVal.isNegative())
27458 IsPositive[i] = false;
27459 SignBits[i] = std::min(SignBits[i], IntVal.getNumSignBits());
27462 SignBits[i] = DAG.ComputeNumSignBits(Opd);
27463 if (Opd.getOpcode() == ISD::ZERO_EXTEND)
27464 IsPositive[i] = true;
27468 bool AllPositive = IsPositive[0] && IsPositive[1];
27469 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
27470 // When ranges are from -128 ~ 127, use MULS8 mode.
27471 if (MinSignBits >= 25)
27473 // When ranges are from 0 ~ 255, use MULU8 mode.
27474 else if (AllPositive && MinSignBits >= 24)
27476 // When ranges are from -32768 ~ 32767, use MULS16 mode.
27477 else if (MinSignBits >= 17)
27479 // When ranges are from 0 ~ 65535, use MULU16 mode.
27480 else if (AllPositive && MinSignBits >= 16)
27487 /// When the operands of vector mul are extended from smaller size values,
27488 /// like i8 and i16, the type of mul may be shrinked to generate more
27489 /// efficient code. Two typical patterns are handled:
27491 /// %2 = sext/zext <N x i8> %1 to <N x i32>
27492 /// %4 = sext/zext <N x i8> %3 to <N x i32>
27493 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
27494 /// %5 = mul <N x i32> %2, %4
27497 /// %2 = zext/sext <N x i16> %1 to <N x i32>
27498 /// %4 = zext/sext <N x i16> %3 to <N x i32>
27499 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
27500 /// %5 = mul <N x i32> %2, %4
27502 /// There are four mul shrinking modes:
27503 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
27504 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
27505 /// generate pmullw+sext32 for it (MULS8 mode).
27506 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
27507 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
27508 /// generate pmullw+zext32 for it (MULU8 mode).
27509 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
27510 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
27511 /// generate pmullw+pmulhw for it (MULS16 mode).
27512 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
27513 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
27514 /// generate pmullw+pmulhuw for it (MULU16 mode).
27515 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
27516 const X86Subtarget &Subtarget) {
27517 // pmulld is supported since SSE41. It is better to use pmulld
27518 // instead of pmullw+pmulhw.
27519 // pmullw/pmulhw are not supported by SSE.
27520 if (Subtarget.hasSSE41() || !Subtarget.hasSSE2())
27524 if (!canReduceVMulWidth(N, DAG, Mode))
27528 SDValue N0 = N->getOperand(0);
27529 SDValue N1 = N->getOperand(1);
27530 EVT VT = N->getOperand(0).getValueType();
27531 unsigned RegSize = 128;
27532 MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
27534 EVT::getVectorVT(*DAG.getContext(), MVT::i16, VT.getVectorNumElements());
27535 // Shrink the operands of mul.
27536 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
27537 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
27539 if (VT.getVectorNumElements() >= OpsVT.getVectorNumElements()) {
27540 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
27541 // lower part is needed.
27542 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
27543 if (Mode == MULU8 || Mode == MULS8) {
27544 return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
27547 MVT ResVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
27548 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
27549 // the higher part is also needed.
27550 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
27551 ReducedVT, NewN0, NewN1);
27553 // Repack the lower part and higher part result of mul into a wider
27555 // Generate shuffle functioning as punpcklwd.
27556 SmallVector<int, 16> ShuffleMask(VT.getVectorNumElements());
27557 for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
27558 ShuffleMask[2 * i] = i;
27559 ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements();
27562 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
27563 ResLo = DAG.getNode(ISD::BITCAST, DL, ResVT, ResLo);
27564 // Generate shuffle functioning as punpckhwd.
27565 for (unsigned i = 0; i < VT.getVectorNumElements() / 2; i++) {
27566 ShuffleMask[2 * i] = i + VT.getVectorNumElements() / 2;
27567 ShuffleMask[2 * i + 1] = i + VT.getVectorNumElements() * 3 / 2;
27570 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
27571 ResHi = DAG.getNode(ISD::BITCAST, DL, ResVT, ResHi);
27572 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
27575 // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
27576 // to legalize the mul explicitly because implicit legalization for type
27577 // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
27578 // instructions which will not exist when we explicitly legalize it by
27579 // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
27580 // <4 x i16> undef).
27582 // Legalize the operands of mul.
27583 SmallVector<SDValue, 16> Ops(RegSize / ReducedVT.getSizeInBits(),
27584 DAG.getUNDEF(ReducedVT));
27586 NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
27588 NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
27590 if (Mode == MULU8 || Mode == MULS8) {
27591 // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
27593 SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
27595 // convert the type of mul result to VT.
27596 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
27597 SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
27598 : ISD::SIGN_EXTEND_VECTOR_INREG,
27600 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
27601 DAG.getIntPtrConstant(0, DL));
27603 // Generate the lower and higher part of mul: pmulhw/pmulhuw. For
27604 // MULU16/MULS16, both parts are needed.
27605 SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
27606 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
27607 OpsVT, NewN0, NewN1);
27609 // Repack the lower part and higher part result of mul into a wider
27610 // result. Make sure the type of mul result is VT.
27611 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
27612 SDValue Res = DAG.getNode(X86ISD::UNPCKL, DL, OpsVT, MulLo, MulHi);
27613 Res = DAG.getNode(ISD::BITCAST, DL, ResVT, Res);
27614 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
27615 DAG.getIntPtrConstant(0, DL));
27620 /// Optimize a single multiply with constant into two operations in order to
27621 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
27622 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
27623 TargetLowering::DAGCombinerInfo &DCI,
27624 const X86Subtarget &Subtarget) {
27625 EVT VT = N->getValueType(0);
27626 if (DCI.isBeforeLegalize() && VT.isVector())
27627 return reduceVMULWidth(N, DAG, Subtarget);
27629 // An imul is usually smaller than the alternative sequence.
27630 if (DAG.getMachineFunction().getFunction()->optForMinSize())
27633 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
27636 if (VT != MVT::i64 && VT != MVT::i32)
27639 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
27642 uint64_t MulAmt = C->getZExtValue();
27643 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
27646 uint64_t MulAmt1 = 0;
27647 uint64_t MulAmt2 = 0;
27648 if ((MulAmt % 9) == 0) {
27650 MulAmt2 = MulAmt / 9;
27651 } else if ((MulAmt % 5) == 0) {
27653 MulAmt2 = MulAmt / 5;
27654 } else if ((MulAmt % 3) == 0) {
27656 MulAmt2 = MulAmt / 3;
27662 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
27664 if (isPowerOf2_64(MulAmt2) &&
27665 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
27666 // If second multiplifer is pow2, issue it first. We want the multiply by
27667 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
27669 std::swap(MulAmt1, MulAmt2);
27671 if (isPowerOf2_64(MulAmt1))
27672 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
27673 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
27675 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
27676 DAG.getConstant(MulAmt1, DL, VT));
27678 if (isPowerOf2_64(MulAmt2))
27679 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
27680 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
27682 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
27683 DAG.getConstant(MulAmt2, DL, VT));
27687 assert(MulAmt != 0 && MulAmt != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX)
27688 && "Both cases that could cause potential overflows should have "
27689 "already been handled.");
27690 if (isPowerOf2_64(MulAmt - 1))
27691 // (mul x, 2^N + 1) => (add (shl x, N), x)
27692 NewMul = DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
27693 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
27694 DAG.getConstant(Log2_64(MulAmt - 1), DL,
27697 else if (isPowerOf2_64(MulAmt + 1))
27698 // (mul x, 2^N - 1) => (sub (shl x, N), x)
27699 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getNode(ISD::SHL, DL, VT,
27701 DAG.getConstant(Log2_64(MulAmt + 1),
27702 DL, MVT::i8)), N->getOperand(0));
27706 // Do not add new nodes to DAG combiner worklist.
27707 DCI.CombineTo(N, NewMul, false);
27712 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
27713 SDValue N0 = N->getOperand(0);
27714 SDValue N1 = N->getOperand(1);
27715 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
27716 EVT VT = N0.getValueType();
27718 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
27719 // since the result of setcc_c is all zero's or all ones.
27720 if (VT.isInteger() && !VT.isVector() &&
27721 N1C && N0.getOpcode() == ISD::AND &&
27722 N0.getOperand(1).getOpcode() == ISD::Constant) {
27723 SDValue N00 = N0.getOperand(0);
27724 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
27725 const APInt &ShAmt = N1C->getAPIntValue();
27726 Mask = Mask.shl(ShAmt);
27727 bool MaskOK = false;
27728 // We can handle cases concerning bit-widening nodes containing setcc_c if
27729 // we carefully interrogate the mask to make sure we are semantics
27731 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
27732 // of the underlying setcc_c operation if the setcc_c was zero extended.
27733 // Consider the following example:
27734 // zext(setcc_c) -> i32 0x0000FFFF
27735 // c1 -> i32 0x0000FFFF
27736 // c2 -> i32 0x00000001
27737 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
27738 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
27739 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
27741 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
27742 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
27744 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
27745 N00.getOpcode() == ISD::ANY_EXTEND) &&
27746 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
27747 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
27749 if (MaskOK && Mask != 0) {
27751 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
27755 // Hardware support for vector shifts is sparse which makes us scalarize the
27756 // vector operations in many cases. Also, on sandybridge ADD is faster than
27758 // (shl V, 1) -> add V,V
27759 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
27760 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
27761 assert(N0.getValueType().isVector() && "Invalid vector shift type");
27762 // We shift all of the values by one. In many cases we do not have
27763 // hardware support for this operation. This is better expressed as an ADD
27765 if (N1SplatC->getAPIntValue() == 1)
27766 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
27772 static SDValue combineShiftRightAlgebraic(SDNode *N, SelectionDAG &DAG) {
27773 SDValue N0 = N->getOperand(0);
27774 SDValue N1 = N->getOperand(1);
27775 EVT VT = N0.getValueType();
27776 unsigned Size = VT.getSizeInBits();
27778 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
27779 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
27780 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
27781 // depending on sign of (SarConst - [56,48,32,24,16])
27783 // sexts in X86 are MOVs. The MOVs have the same code size
27784 // as above SHIFTs (only SHIFT on 1 has lower code size).
27785 // However the MOVs have 2 advantages to a SHIFT:
27786 // 1. MOVs can write to a register that differs from source
27787 // 2. MOVs accept memory operands
27789 if (!VT.isInteger() || VT.isVector() || N1.getOpcode() != ISD::Constant ||
27790 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
27791 N0.getOperand(1).getOpcode() != ISD::Constant)
27794 SDValue N00 = N0.getOperand(0);
27795 SDValue N01 = N0.getOperand(1);
27796 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
27797 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
27798 EVT CVT = N1.getValueType();
27800 if (SarConst.isNegative())
27803 for (MVT SVT : MVT::integer_valuetypes()) {
27804 unsigned ShiftSize = SVT.getSizeInBits();
27805 // skipping types without corresponding sext/zext and
27806 // ShlConst that is not one of [56,48,32,24,16]
27807 if (ShiftSize < 8 || ShiftSize > 64 || ShlConst != Size - ShiftSize)
27811 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
27812 SarConst = SarConst - (Size - ShiftSize);
27815 else if (SarConst.isNegative())
27816 return DAG.getNode(ISD::SHL, DL, VT, NN,
27817 DAG.getConstant(-SarConst, DL, CVT));
27819 return DAG.getNode(ISD::SRA, DL, VT, NN,
27820 DAG.getConstant(SarConst, DL, CVT));
27825 /// \brief Returns a vector of 0s if the node in input is a vector logical
27826 /// shift by a constant amount which is known to be bigger than or equal
27827 /// to the vector element size in bits.
27828 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
27829 const X86Subtarget &Subtarget) {
27830 EVT VT = N->getValueType(0);
27832 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
27833 (!Subtarget.hasInt256() ||
27834 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
27837 SDValue Amt = N->getOperand(1);
27839 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
27840 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
27841 const APInt &ShiftAmt = AmtSplat->getAPIntValue();
27842 unsigned MaxAmount =
27843 VT.getSimpleVT().getVectorElementType().getSizeInBits();
27845 // SSE2/AVX2 logical shifts always return a vector of 0s
27846 // if the shift amount is bigger than or equal to
27847 // the element size. The constant shift amount will be
27848 // encoded as a 8-bit immediate.
27849 if (ShiftAmt.trunc(8).uge(MaxAmount))
27850 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, DL);
27856 static SDValue combineShift(SDNode* N, SelectionDAG &DAG,
27857 TargetLowering::DAGCombinerInfo &DCI,
27858 const X86Subtarget &Subtarget) {
27859 if (N->getOpcode() == ISD::SHL)
27860 if (SDValue V = combineShiftLeft(N, DAG))
27863 if (N->getOpcode() == ISD::SRA)
27864 if (SDValue V = combineShiftRightAlgebraic(N, DAG))
27867 // Try to fold this logical shift into a zero vector.
27868 if (N->getOpcode() != ISD::SRA)
27869 if (SDValue V = performShiftToAllZeros(N, DAG, Subtarget))
27875 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
27876 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
27877 /// OR -> CMPNEQSS.
27878 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
27879 TargetLowering::DAGCombinerInfo &DCI,
27880 const X86Subtarget &Subtarget) {
27883 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
27884 // we're requiring SSE2 for both.
27885 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
27886 SDValue N0 = N->getOperand(0);
27887 SDValue N1 = N->getOperand(1);
27888 SDValue CMP0 = N0->getOperand(1);
27889 SDValue CMP1 = N1->getOperand(1);
27892 // The SETCCs should both refer to the same CMP.
27893 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
27896 SDValue CMP00 = CMP0->getOperand(0);
27897 SDValue CMP01 = CMP0->getOperand(1);
27898 EVT VT = CMP00.getValueType();
27900 if (VT == MVT::f32 || VT == MVT::f64) {
27901 bool ExpectingFlags = false;
27902 // Check for any users that want flags:
27903 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
27904 !ExpectingFlags && UI != UE; ++UI)
27905 switch (UI->getOpcode()) {
27910 ExpectingFlags = true;
27912 case ISD::CopyToReg:
27913 case ISD::SIGN_EXTEND:
27914 case ISD::ZERO_EXTEND:
27915 case ISD::ANY_EXTEND:
27919 if (!ExpectingFlags) {
27920 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
27921 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
27923 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
27924 X86::CondCode tmp = cc0;
27929 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
27930 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
27931 // FIXME: need symbolic constants for these magic numbers.
27932 // See X86ATTInstPrinter.cpp:printSSECC().
27933 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
27934 if (Subtarget.hasAVX512()) {
27935 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
27937 DAG.getConstant(x86cc, DL, MVT::i8));
27938 if (N->getValueType(0) != MVT::i1)
27939 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
27943 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
27944 CMP00.getValueType(), CMP00, CMP01,
27945 DAG.getConstant(x86cc, DL,
27948 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
27949 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
27951 if (is64BitFP && !Subtarget.is64Bit()) {
27952 // On a 32-bit target, we cannot bitcast the 64-bit float to a
27953 // 64-bit integer, since that's not a legal type. Since
27954 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
27955 // bits, but can do this little dance to extract the lowest 32 bits
27956 // and work with those going forward.
27957 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
27959 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
27960 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
27961 Vector32, DAG.getIntPtrConstant(0, DL));
27965 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
27966 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
27967 DAG.getConstant(1, DL, IntVT));
27968 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
27970 return OneBitOfTruth;
27978 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
27979 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
27980 assert(N->getOpcode() == ISD::AND);
27982 EVT VT = N->getValueType(0);
27983 SDValue N0 = N->getOperand(0);
27984 SDValue N1 = N->getOperand(1);
27987 if (VT != MVT::v2i64 && VT != MVT::v4i64 &&
27988 VT != MVT::v8i64 && VT != MVT::v16i32 &&
27989 VT != MVT::v4i32 && VT != MVT::v8i32) // Legal with VLX
27992 // Canonicalize XOR to the left.
27993 if (N1.getOpcode() == ISD::XOR)
27996 if (N0.getOpcode() != ISD::XOR)
27999 SDValue N00 = N0->getOperand(0);
28000 SDValue N01 = N0->getOperand(1);
28002 N01 = peekThroughBitcasts(N01);
28004 // Either match a direct AllOnes for 128, 256, and 512-bit vectors, or an
28005 // insert_subvector building a 256-bit AllOnes vector.
28006 if (!ISD::isBuildVectorAllOnes(N01.getNode())) {
28007 if (!VT.is256BitVector() || N01->getOpcode() != ISD::INSERT_SUBVECTOR)
28010 SDValue V1 = N01->getOperand(0);
28011 SDValue V2 = N01->getOperand(1);
28012 if (V1.getOpcode() != ISD::INSERT_SUBVECTOR ||
28013 !V1.getOperand(0).isUndef() ||
28014 !ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) ||
28015 !ISD::isBuildVectorAllOnes(V2.getNode()))
28018 return DAG.getNode(X86ISD::ANDNP, DL, VT, N00, N1);
28021 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
28022 // register. In most cases we actually compare or select YMM-sized registers
28023 // and mixing the two types creates horrible code. This method optimizes
28024 // some of the transition sequences.
28025 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
28026 TargetLowering::DAGCombinerInfo &DCI,
28027 const X86Subtarget &Subtarget) {
28028 EVT VT = N->getValueType(0);
28029 if (!VT.is256BitVector())
28032 assert((N->getOpcode() == ISD::ANY_EXTEND ||
28033 N->getOpcode() == ISD::ZERO_EXTEND ||
28034 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
28036 SDValue Narrow = N->getOperand(0);
28037 EVT NarrowVT = Narrow->getValueType(0);
28038 if (!NarrowVT.is128BitVector())
28041 if (Narrow->getOpcode() != ISD::XOR &&
28042 Narrow->getOpcode() != ISD::AND &&
28043 Narrow->getOpcode() != ISD::OR)
28046 SDValue N0 = Narrow->getOperand(0);
28047 SDValue N1 = Narrow->getOperand(1);
28050 // The Left side has to be a trunc.
28051 if (N0.getOpcode() != ISD::TRUNCATE)
28054 // The type of the truncated inputs.
28055 EVT WideVT = N0->getOperand(0)->getValueType(0);
28059 // The right side has to be a 'trunc' or a constant vector.
28060 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
28061 ConstantSDNode *RHSConstSplat = nullptr;
28062 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
28063 RHSConstSplat = RHSBV->getConstantSplatNode();
28064 if (!RHSTrunc && !RHSConstSplat)
28067 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28069 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
28072 // Set N0 and N1 to hold the inputs to the new wide operation.
28073 N0 = N0->getOperand(0);
28074 if (RHSConstSplat) {
28075 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getVectorElementType(),
28076 SDValue(RHSConstSplat, 0));
28077 N1 = DAG.getSplatBuildVector(WideVT, DL, N1);
28078 } else if (RHSTrunc) {
28079 N1 = N1->getOperand(0);
28082 // Generate the wide operation.
28083 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
28084 unsigned Opcode = N->getOpcode();
28086 case ISD::ANY_EXTEND:
28088 case ISD::ZERO_EXTEND: {
28089 unsigned InBits = NarrowVT.getScalarSizeInBits();
28090 APInt Mask = APInt::getAllOnesValue(InBits);
28091 Mask = Mask.zext(VT.getScalarSizeInBits());
28092 return DAG.getNode(ISD::AND, DL, VT,
28093 Op, DAG.getConstant(Mask, DL, VT));
28095 case ISD::SIGN_EXTEND:
28096 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
28097 Op, DAG.getValueType(NarrowVT));
28099 llvm_unreachable("Unexpected opcode");
28103 static SDValue combineVectorZext(SDNode *N, SelectionDAG &DAG,
28104 TargetLowering::DAGCombinerInfo &DCI,
28105 const X86Subtarget &Subtarget) {
28106 SDValue N0 = N->getOperand(0);
28107 SDValue N1 = N->getOperand(1);
28110 // A vector zext_in_reg may be represented as a shuffle,
28111 // feeding into a bitcast (this represents anyext) feeding into
28112 // an and with a mask.
28113 // We'd like to try to combine that into a shuffle with zero
28114 // plus a bitcast, removing the and.
28115 if (N0.getOpcode() != ISD::BITCAST ||
28116 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
28119 // The other side of the AND should be a splat of 2^C, where C
28120 // is the number of bits in the source type.
28121 N1 = peekThroughBitcasts(N1);
28122 if (N1.getOpcode() != ISD::BUILD_VECTOR)
28124 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
28126 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
28127 EVT SrcType = Shuffle->getValueType(0);
28129 // We expect a single-source shuffle
28130 if (!Shuffle->getOperand(1)->isUndef())
28133 unsigned SrcSize = SrcType.getScalarSizeInBits();
28134 unsigned NumElems = SrcType.getVectorNumElements();
28136 APInt SplatValue, SplatUndef;
28137 unsigned SplatBitSize;
28139 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
28140 SplatBitSize, HasAnyUndefs))
28143 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
28144 // Make sure the splat matches the mask we expect
28145 if (SplatBitSize > ResSize ||
28146 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
28149 // Make sure the input and output size make sense
28150 if (SrcSize >= ResSize || ResSize % SrcSize)
28153 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
28154 // The number of u's between each two values depends on the ratio between
28155 // the source and dest type.
28156 unsigned ZextRatio = ResSize / SrcSize;
28157 bool IsZext = true;
28158 for (unsigned i = 0; i != NumElems; ++i) {
28159 if (i % ZextRatio) {
28160 if (Shuffle->getMaskElt(i) > 0) {
28166 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
28167 // Expected element number
28177 // Ok, perform the transformation - replace the shuffle with
28178 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
28179 // (instead of undef) where the k elements come from the zero vector.
28180 SmallVector<int, 8> Mask;
28181 for (unsigned i = 0; i != NumElems; ++i)
28183 Mask.push_back(NumElems);
28185 Mask.push_back(i / ZextRatio);
28187 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
28188 Shuffle->getOperand(0), DAG.getConstant(0, DL, SrcType), Mask);
28189 return DAG.getBitcast(N0.getValueType(), NewShuffle);
28192 /// If both input operands of a logic op are being cast from floating point
28193 /// types, try to convert this into a floating point logic node to avoid
28194 /// unnecessary moves from SSE to integer registers.
28195 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
28196 const X86Subtarget &Subtarget) {
28197 unsigned FPOpcode = ISD::DELETED_NODE;
28198 if (N->getOpcode() == ISD::AND)
28199 FPOpcode = X86ISD::FAND;
28200 else if (N->getOpcode() == ISD::OR)
28201 FPOpcode = X86ISD::FOR;
28202 else if (N->getOpcode() == ISD::XOR)
28203 FPOpcode = X86ISD::FXOR;
28205 assert(FPOpcode != ISD::DELETED_NODE &&
28206 "Unexpected input node for FP logic conversion");
28208 EVT VT = N->getValueType(0);
28209 SDValue N0 = N->getOperand(0);
28210 SDValue N1 = N->getOperand(1);
28212 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
28213 ((Subtarget.hasSSE1() && VT == MVT::i32) ||
28214 (Subtarget.hasSSE2() && VT == MVT::i64))) {
28215 SDValue N00 = N0.getOperand(0);
28216 SDValue N10 = N1.getOperand(0);
28217 EVT N00Type = N00.getValueType();
28218 EVT N10Type = N10.getValueType();
28219 if (N00Type.isFloatingPoint() && N10Type.isFloatingPoint()) {
28220 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
28221 return DAG.getBitcast(VT, FPLogic);
28227 /// If this is a PCMPEQ or PCMPGT result that is bitwise-anded with 1 (this is
28228 /// the x86 lowering of a SETCC + ZEXT), replace the 'and' with a shift-right to
28229 /// eliminate loading the vector constant mask value. This relies on the fact
28230 /// that a PCMP always creates an all-ones or all-zeros bitmask per element.
28231 static SDValue combinePCMPAnd1(SDNode *N, SelectionDAG &DAG) {
28232 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
28233 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
28235 // TODO: Use AssertSext to mark any nodes that have the property of producing
28236 // all-ones or all-zeros. Then check for that node rather than particular
28238 if (Op0.getOpcode() != X86ISD::PCMPEQ && Op0.getOpcode() != X86ISD::PCMPGT)
28241 // The existence of the PCMP node guarantees that we have the required SSE2 or
28242 // AVX2 for a shift of this vector type, but there is no vector shift by
28243 // immediate for a vector with byte elements (PSRLB). 512-bit vectors use the
28244 // masked compare nodes, so they should not make it here.
28245 EVT VT0 = Op0.getValueType();
28246 EVT VT1 = Op1.getValueType();
28247 unsigned EltBitWidth = VT0.getScalarType().getSizeInBits();
28248 if (VT0 != VT1 || EltBitWidth == 8)
28251 assert(VT0.getSizeInBits() == 128 || VT0.getSizeInBits() == 256);
28254 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) || SplatVal != 1)
28258 SDValue ShAmt = DAG.getConstant(EltBitWidth - 1, DL, MVT::i8);
28259 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
28260 return DAG.getBitcast(N->getValueType(0), Shift);
28263 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
28264 TargetLowering::DAGCombinerInfo &DCI,
28265 const X86Subtarget &Subtarget) {
28266 if (DCI.isBeforeLegalizeOps())
28269 if (SDValue Zext = combineVectorZext(N, DAG, DCI, Subtarget))
28272 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
28275 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
28278 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
28281 if (SDValue ShiftRight = combinePCMPAnd1(N, DAG))
28284 EVT VT = N->getValueType(0);
28285 SDValue N0 = N->getOperand(0);
28286 SDValue N1 = N->getOperand(1);
28289 // Create BEXTR instructions
28290 // BEXTR is ((X >> imm) & (2**size-1))
28291 if (VT != MVT::i32 && VT != MVT::i64)
28294 if (!Subtarget.hasBMI() && !Subtarget.hasTBM())
28296 if (N0.getOpcode() != ISD::SRA && N0.getOpcode() != ISD::SRL)
28299 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
28300 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
28301 if (MaskNode && ShiftNode) {
28302 uint64_t Mask = MaskNode->getZExtValue();
28303 uint64_t Shift = ShiftNode->getZExtValue();
28304 if (isMask_64(Mask)) {
28305 uint64_t MaskSize = countPopulation(Mask);
28306 if (Shift + MaskSize <= VT.getSizeInBits())
28307 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
28308 DAG.getConstant(Shift | (MaskSize << 8), DL,
28316 // (or (and (m, y), (pandn m, x)))
28318 // (vselect m, x, y)
28319 // As a special case, try to fold:
28320 // (or (and (m, (sub 0, x)), (pandn m, x)))
28322 // (sub (xor X, M), M)
28323 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
28324 const X86Subtarget &Subtarget) {
28325 assert(N->getOpcode() == ISD::OR);
28327 SDValue N0 = N->getOperand(0);
28328 SDValue N1 = N->getOperand(1);
28329 EVT VT = N->getValueType(0);
28331 if (!((VT == MVT::v2i64) || (VT == MVT::v4i64 && Subtarget.hasInt256())))
28333 assert(Subtarget.hasSSE2() && "Unexpected i64 vector without SSE2!");
28335 // Canonicalize pandn to RHS
28336 if (N0.getOpcode() == X86ISD::ANDNP)
28339 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
28342 SDValue Mask = N1.getOperand(0);
28343 SDValue X = N1.getOperand(1);
28345 if (N0.getOperand(0) == Mask)
28346 Y = N0.getOperand(1);
28347 if (N0.getOperand(1) == Mask)
28348 Y = N0.getOperand(0);
28350 // Check to see if the mask appeared in both the AND and ANDNP.
28354 // Validate that X, Y, and Mask are bitcasts, and see through them.
28355 Mask = peekThroughBitcasts(Mask);
28356 X = peekThroughBitcasts(X);
28357 Y = peekThroughBitcasts(Y);
28359 EVT MaskVT = Mask.getValueType();
28361 // Validate that the Mask operand is a vector sra node.
28362 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
28363 // there is no psrai.b
28364 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
28365 unsigned SraAmt = ~0;
28366 if (Mask.getOpcode() == ISD::SRA) {
28367 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
28368 if (auto *AmtConst = AmtBV->getConstantSplatNode())
28369 SraAmt = AmtConst->getZExtValue();
28370 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
28371 SDValue SraC = Mask.getOperand(1);
28372 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
28374 if ((SraAmt + 1) != EltBits)
28380 // (or (and (M, (sub 0, X)), (pandn M, X)))
28381 // which is a special case of vselect:
28382 // (vselect M, (sub 0, X), X)
28384 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
28385 // We know that, if fNegate is 0 or 1:
28386 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
28388 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
28389 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
28390 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
28391 // This lets us transform our vselect to:
28392 // (add (xor X, M), (and M, 1))
28394 // (sub (xor X, M), M)
28395 if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
28396 auto IsNegV = [](SDNode *N, SDValue V) {
28397 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
28398 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
28401 if (IsNegV(Y.getNode(), X))
28403 else if (IsNegV(X.getNode(), Y))
28407 assert(EltBits == 8 || EltBits == 16 || EltBits == 32);
28408 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
28409 SDValue SubOp2 = Mask;
28411 // If the negate was on the false side of the select, then
28412 // the operands of the SUB need to be swapped. PR 27251.
28413 // This is because the pattern being matched above is
28414 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
28415 // but if the pattern matched was
28416 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
28417 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
28418 // pattern also needs to be a negation of the replacement pattern above.
28419 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
28420 // sub accomplishes the negation of the replacement pattern.
28422 std::swap(SubOp1, SubOp2);
28424 return DAG.getBitcast(VT,
28425 DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2));
28429 // PBLENDVB is only available on SSE 4.1.
28430 if (!Subtarget.hasSSE41())
28433 MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
28435 X = DAG.getBitcast(BlendVT, X);
28436 Y = DAG.getBitcast(BlendVT, Y);
28437 Mask = DAG.getBitcast(BlendVT, Mask);
28438 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
28439 return DAG.getBitcast(VT, Mask);
28442 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
28443 TargetLowering::DAGCombinerInfo &DCI,
28444 const X86Subtarget &Subtarget) {
28445 if (DCI.isBeforeLegalizeOps())
28448 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
28451 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
28454 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
28457 SDValue N0 = N->getOperand(0);
28458 SDValue N1 = N->getOperand(1);
28459 EVT VT = N->getValueType(0);
28461 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
28464 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
28465 bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
28467 // SHLD/SHRD instructions have lower register pressure, but on some
28468 // platforms they have higher latency than the equivalent
28469 // series of shifts/or that would otherwise be generated.
28470 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
28471 // have higher latencies and we are not optimizing for size.
28472 if (!OptForSize && Subtarget.isSHLDSlow())
28475 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
28477 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
28479 if (!N0.hasOneUse() || !N1.hasOneUse())
28482 SDValue ShAmt0 = N0.getOperand(1);
28483 if (ShAmt0.getValueType() != MVT::i8)
28485 SDValue ShAmt1 = N1.getOperand(1);
28486 if (ShAmt1.getValueType() != MVT::i8)
28488 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
28489 ShAmt0 = ShAmt0.getOperand(0);
28490 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
28491 ShAmt1 = ShAmt1.getOperand(0);
28494 unsigned Opc = X86ISD::SHLD;
28495 SDValue Op0 = N0.getOperand(0);
28496 SDValue Op1 = N1.getOperand(0);
28497 if (ShAmt0.getOpcode() == ISD::SUB) {
28498 Opc = X86ISD::SHRD;
28499 std::swap(Op0, Op1);
28500 std::swap(ShAmt0, ShAmt1);
28503 unsigned Bits = VT.getSizeInBits();
28504 if (ShAmt1.getOpcode() == ISD::SUB) {
28505 SDValue Sum = ShAmt1.getOperand(0);
28506 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
28507 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
28508 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
28509 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
28510 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
28511 return DAG.getNode(Opc, DL, VT,
28513 DAG.getNode(ISD::TRUNCATE, DL,
28516 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
28517 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
28519 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
28520 return DAG.getNode(Opc, DL, VT,
28521 N0.getOperand(0), N1.getOperand(0),
28522 DAG.getNode(ISD::TRUNCATE, DL,
28529 // Generate NEG and CMOV for integer abs.
28530 static SDValue combineIntegerAbs(SDNode *N, SelectionDAG &DAG) {
28531 EVT VT = N->getValueType(0);
28533 // Since X86 does not have CMOV for 8-bit integer, we don't convert
28534 // 8-bit integer abs to NEG and CMOV.
28535 if (VT.isInteger() && VT.getSizeInBits() == 8)
28538 SDValue N0 = N->getOperand(0);
28539 SDValue N1 = N->getOperand(1);
28542 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
28543 // and change it to SUB and CMOV.
28544 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
28545 N0.getOpcode() == ISD::ADD &&
28546 N0.getOperand(1) == N1 &&
28547 N1.getOpcode() == ISD::SRA &&
28548 N1.getOperand(0) == N0.getOperand(0))
28549 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
28550 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
28551 // Generate SUB & CMOV.
28552 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
28553 DAG.getConstant(0, DL, VT), N0.getOperand(0));
28555 SDValue Ops[] = { N0.getOperand(0), Neg,
28556 DAG.getConstant(X86::COND_GE, DL, MVT::i8),
28557 SDValue(Neg.getNode(), 1) };
28558 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
28563 /// Try to turn tests against the signbit in the form of:
28564 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
28567 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
28568 // This is only worth doing if the output type is i8 or i1.
28569 EVT ResultType = N->getValueType(0);
28570 if (ResultType != MVT::i8 && ResultType != MVT::i1)
28573 SDValue N0 = N->getOperand(0);
28574 SDValue N1 = N->getOperand(1);
28576 // We should be performing an xor against a truncated shift.
28577 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
28580 // Make sure we are performing an xor against one.
28581 if (!isOneConstant(N1))
28584 // SetCC on x86 zero extends so only act on this if it's a logical shift.
28585 SDValue Shift = N0.getOperand(0);
28586 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
28589 // Make sure we are truncating from one of i16, i32 or i64.
28590 EVT ShiftTy = Shift.getValueType();
28591 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
28594 // Make sure the shift amount extracts the sign bit.
28595 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
28596 Shift.getConstantOperandVal(1) != ShiftTy.getSizeInBits() - 1)
28599 // Create a greater-than comparison against -1.
28600 // N.B. Using SETGE against 0 works but we want a canonical looking
28601 // comparison, using SETGT matches up with what TranslateX86CC.
28603 SDValue ShiftOp = Shift.getOperand(0);
28604 EVT ShiftOpTy = ShiftOp.getValueType();
28605 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28606 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
28607 *DAG.getContext(), ResultType);
28608 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
28609 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
28610 if (SetCCResultType != ResultType)
28611 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
28615 /// Turn vector tests of the signbit in the form of:
28616 /// xor (sra X, elt_size(X)-1), -1
28620 /// This should be called before type legalization because the pattern may not
28621 /// persist after that.
28622 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
28623 const X86Subtarget &Subtarget) {
28624 EVT VT = N->getValueType(0);
28625 if (!VT.isSimple())
28628 switch (VT.getSimpleVT().SimpleTy) {
28629 default: return SDValue();
28632 case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
28633 case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
28637 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
28640 // There must be a shift right algebraic before the xor, and the xor must be a
28641 // 'not' operation.
28642 SDValue Shift = N->getOperand(0);
28643 SDValue Ones = N->getOperand(1);
28644 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
28645 !ISD::isBuildVectorAllOnes(Ones.getNode()))
28648 // The shift should be smearing the sign bit across each vector element.
28649 auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1));
28653 EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
28654 auto *ShiftAmt = ShiftBV->getConstantSplatNode();
28655 if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
28658 // Create a greater-than comparison against -1. We don't use the more obvious
28659 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
28660 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
28663 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
28664 TargetLowering::DAGCombinerInfo &DCI,
28665 const X86Subtarget &Subtarget) {
28666 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
28669 if (DCI.isBeforeLegalizeOps())
28672 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
28675 if (Subtarget.hasCMov())
28676 if (SDValue RV = combineIntegerAbs(N, DAG))
28679 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
28685 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
28686 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
28687 /// X86ISD::AVG instruction.
28688 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
28689 const X86Subtarget &Subtarget,
28691 if (!VT.isVector() || !VT.isSimple())
28693 EVT InVT = In.getValueType();
28694 unsigned NumElems = VT.getVectorNumElements();
28696 EVT ScalarVT = VT.getVectorElementType();
28697 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
28698 isPowerOf2_32(NumElems)))
28701 // InScalarVT is the intermediate type in AVG pattern and it should be greater
28702 // than the original input type (i8/i16).
28703 EVT InScalarVT = InVT.getVectorElementType();
28704 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
28707 if (!Subtarget.hasSSE2())
28709 if (Subtarget.hasAVX512()) {
28710 if (VT.getSizeInBits() > 512)
28712 } else if (Subtarget.hasAVX2()) {
28713 if (VT.getSizeInBits() > 256)
28716 if (VT.getSizeInBits() > 128)
28720 // Detect the following pattern:
28722 // %1 = zext <N x i8> %a to <N x i32>
28723 // %2 = zext <N x i8> %b to <N x i32>
28724 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
28725 // %4 = add nuw nsw <N x i32> %3, %2
28726 // %5 = lshr <N x i32> %N, <i32 1 x N>
28727 // %6 = trunc <N x i32> %5 to <N x i8>
28729 // In AVX512, the last instruction can also be a trunc store.
28731 if (In.getOpcode() != ISD::SRL)
28734 // A lambda checking the given SDValue is a constant vector and each element
28735 // is in the range [Min, Max].
28736 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
28737 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
28738 if (!BV || !BV->isConstant())
28740 for (unsigned i = 0, e = V.getNumOperands(); i < e; i++) {
28741 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(i));
28744 uint64_t Val = C->getZExtValue();
28745 if (Val < Min || Val > Max)
28751 // Check if each element of the vector is left-shifted by one.
28752 auto LHS = In.getOperand(0);
28753 auto RHS = In.getOperand(1);
28754 if (!IsConstVectorInRange(RHS, 1, 1))
28756 if (LHS.getOpcode() != ISD::ADD)
28759 // Detect a pattern of a + b + 1 where the order doesn't matter.
28760 SDValue Operands[3];
28761 Operands[0] = LHS.getOperand(0);
28762 Operands[1] = LHS.getOperand(1);
28764 // Take care of the case when one of the operands is a constant vector whose
28765 // element is in the range [1, 256].
28766 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
28767 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
28768 Operands[0].getOperand(0).getValueType() == VT) {
28769 // The pattern is detected. Subtract one from the constant vector, then
28770 // demote it and emit X86ISD::AVG instruction.
28771 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
28772 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
28773 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
28774 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
28778 if (Operands[0].getOpcode() == ISD::ADD)
28779 std::swap(Operands[0], Operands[1]);
28780 else if (Operands[1].getOpcode() != ISD::ADD)
28782 Operands[2] = Operands[1].getOperand(0);
28783 Operands[1] = Operands[1].getOperand(1);
28785 // Now we have three operands of two additions. Check that one of them is a
28786 // constant vector with ones, and the other two are promoted from i8/i16.
28787 for (int i = 0; i < 3; ++i) {
28788 if (!IsConstVectorInRange(Operands[i], 1, 1))
28790 std::swap(Operands[i], Operands[2]);
28792 // Check if Operands[0] and Operands[1] are results of type promotion.
28793 for (int j = 0; j < 2; ++j)
28794 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
28795 Operands[j].getOperand(0).getValueType() != VT)
28798 // The pattern is detected, emit X86ISD::AVG instruction.
28799 return DAG.getNode(X86ISD::AVG, DL, VT, Operands[0].getOperand(0),
28800 Operands[1].getOperand(0));
28806 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
28807 TargetLowering::DAGCombinerInfo &DCI,
28808 const X86Subtarget &Subtarget) {
28809 LoadSDNode *Ld = cast<LoadSDNode>(N);
28810 EVT RegVT = Ld->getValueType(0);
28811 EVT MemVT = Ld->getMemoryVT();
28813 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28815 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
28816 // into two 16-byte operations.
28817 ISD::LoadExtType Ext = Ld->getExtensionType();
28819 unsigned AddressSpace = Ld->getAddressSpace();
28820 unsigned Alignment = Ld->getAlignment();
28821 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
28822 Ext == ISD::NON_EXTLOAD &&
28823 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
28824 AddressSpace, Alignment, &Fast) && !Fast) {
28825 unsigned NumElems = RegVT.getVectorNumElements();
28829 SDValue Ptr = Ld->getBasePtr();
28831 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
28834 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
28835 Alignment, Ld->getMemOperand()->getFlags());
28837 Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl);
28839 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
28840 std::min(16U, Alignment), Ld->getMemOperand()->getFlags());
28841 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
28843 Load2.getValue(1));
28845 SDValue NewVec = DAG.getUNDEF(RegVT);
28846 NewVec = insert128BitVector(NewVec, Load1, 0, DAG, dl);
28847 NewVec = insert128BitVector(NewVec, Load2, NumElems / 2, DAG, dl);
28848 return DCI.CombineTo(N, NewVec, TF, true);
28854 /// If V is a build vector of boolean constants and exactly one of those
28855 /// constants is true, return the operand index of that true element.
28856 /// Otherwise, return -1.
28857 static int getOneTrueElt(SDValue V) {
28858 // This needs to be a build vector of booleans.
28859 // TODO: Checking for the i1 type matches the IR definition for the mask,
28860 // but the mask check could be loosened to i8 or other types. That might
28861 // also require checking more than 'allOnesValue'; eg, the x86 HW
28862 // instructions only require that the MSB is set for each mask element.
28863 // The ISD::MSTORE comments/definition do not specify how the mask operand
28865 auto *BV = dyn_cast<BuildVectorSDNode>(V);
28866 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
28869 int TrueIndex = -1;
28870 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
28871 for (unsigned i = 0; i < NumElts; ++i) {
28872 const SDValue &Op = BV->getOperand(i);
28875 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
28878 if (ConstNode->getAPIntValue().isAllOnesValue()) {
28879 // If we already found a one, this is too many.
28880 if (TrueIndex >= 0)
28888 /// Given a masked memory load/store operation, return true if it has one mask
28889 /// bit set. If it has one mask bit set, then also return the memory address of
28890 /// the scalar element to load/store, the vector index to insert/extract that
28891 /// scalar element, and the alignment for the scalar memory access.
28892 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
28893 SelectionDAG &DAG, SDValue &Addr,
28894 SDValue &Index, unsigned &Alignment) {
28895 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
28896 if (TrueMaskElt < 0)
28899 // Get the address of the one scalar element that is specified by the mask
28900 // using the appropriate offset from the base pointer.
28901 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
28902 Addr = MaskedOp->getBasePtr();
28903 if (TrueMaskElt != 0) {
28904 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
28905 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
28908 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
28909 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
28913 /// If exactly one element of the mask is set for a non-extending masked load,
28914 /// it is a scalar load and vector insert.
28915 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
28916 /// mask have already been optimized in IR, so we don't bother with those here.
28918 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
28919 TargetLowering::DAGCombinerInfo &DCI) {
28920 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
28921 // However, some target hooks may need to be added to know when the transform
28922 // is profitable. Endianness would also have to be considered.
28924 SDValue Addr, VecIndex;
28925 unsigned Alignment;
28926 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
28929 // Load the one scalar element that is specified by the mask using the
28930 // appropriate offset from the base pointer.
28932 EVT VT = ML->getValueType(0);
28933 EVT EltVT = VT.getVectorElementType();
28935 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
28936 Alignment, ML->getMemOperand()->getFlags());
28938 // Insert the loaded element into the appropriate place in the vector.
28939 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, ML->getSrc0(),
28941 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
28945 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
28946 TargetLowering::DAGCombinerInfo &DCI) {
28947 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
28951 EVT VT = ML->getValueType(0);
28953 // If we are loading the first and last elements of a vector, it is safe and
28954 // always faster to load the whole vector. Replace the masked load with a
28955 // vector load and select.
28956 unsigned NumElts = VT.getVectorNumElements();
28957 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
28958 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
28959 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
28960 if (LoadFirstElt && LoadLastElt) {
28961 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
28962 ML->getMemOperand());
28963 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, ML->getSrc0());
28964 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
28967 // Convert a masked load with a constant mask into a masked load and a select.
28968 // This allows the select operation to use a faster kind of select instruction
28969 // (for example, vblendvps -> vblendps).
28971 // Don't try this if the pass-through operand is already undefined. That would
28972 // cause an infinite loop because that's what we're about to create.
28973 if (ML->getSrc0().isUndef())
28976 // The new masked load has an undef pass-through operand. The select uses the
28977 // original pass-through operand.
28978 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
28979 ML->getMask(), DAG.getUNDEF(VT),
28980 ML->getMemoryVT(), ML->getMemOperand(),
28981 ML->getExtensionType());
28982 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, ML->getSrc0());
28984 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
28987 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
28988 TargetLowering::DAGCombinerInfo &DCI,
28989 const X86Subtarget &Subtarget) {
28990 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
28991 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
28992 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
28994 // TODO: Do some AVX512 subsets benefit from this transform?
28995 if (!Subtarget.hasAVX512())
28996 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
29000 if (Mld->getExtensionType() != ISD::SEXTLOAD)
29003 // Resolve extending loads.
29004 EVT VT = Mld->getValueType(0);
29005 unsigned NumElems = VT.getVectorNumElements();
29006 EVT LdVT = Mld->getMemoryVT();
29009 assert(LdVT != VT && "Cannot extend to the same type");
29010 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
29011 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
29012 // From/To sizes and ElemCount must be pow of two.
29013 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
29014 "Unexpected size for extending masked load");
29016 unsigned SizeRatio = ToSz / FromSz;
29017 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
29019 // Create a type on which we perform the shuffle.
29020 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
29021 LdVT.getScalarType(), NumElems*SizeRatio);
29022 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
29024 // Convert Src0 value.
29025 SDValue WideSrc0 = DAG.getBitcast(WideVecVT, Mld->getSrc0());
29026 if (!Mld->getSrc0().isUndef()) {
29027 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
29028 for (unsigned i = 0; i != NumElems; ++i)
29029 ShuffleVec[i] = i * SizeRatio;
29031 // Can't shuffle using an illegal type.
29032 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
29033 "WideVecVT should be legal");
29034 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
29035 DAG.getUNDEF(WideVecVT), ShuffleVec);
29037 // Prepare the new mask.
29039 SDValue Mask = Mld->getMask();
29040 if (Mask.getValueType() == VT) {
29041 // Mask and original value have the same type.
29042 NewMask = DAG.getBitcast(WideVecVT, Mask);
29043 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
29044 for (unsigned i = 0; i != NumElems; ++i)
29045 ShuffleVec[i] = i * SizeRatio;
29046 for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
29047 ShuffleVec[i] = NumElems * SizeRatio;
29048 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
29049 DAG.getConstant(0, dl, WideVecVT),
29052 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
29053 unsigned WidenNumElts = NumElems*SizeRatio;
29054 unsigned MaskNumElts = VT.getVectorNumElements();
29055 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
29058 unsigned NumConcat = WidenNumElts / MaskNumElts;
29059 SmallVector<SDValue, 16> Ops(NumConcat);
29060 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
29062 for (unsigned i = 1; i != NumConcat; ++i)
29065 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
29068 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
29069 Mld->getBasePtr(), NewMask, WideSrc0,
29070 Mld->getMemoryVT(), Mld->getMemOperand(),
29072 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
29073 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
29076 /// If exactly one element of the mask is set for a non-truncating masked store,
29077 /// it is a vector extract and scalar store.
29078 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
29079 /// mask have already been optimized in IR, so we don't bother with those here.
29080 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
29081 SelectionDAG &DAG) {
29082 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
29083 // However, some target hooks may need to be added to know when the transform
29084 // is profitable. Endianness would also have to be considered.
29086 SDValue Addr, VecIndex;
29087 unsigned Alignment;
29088 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
29091 // Extract the one scalar element that is actually being stored.
29093 EVT VT = MS->getValue().getValueType();
29094 EVT EltVT = VT.getVectorElementType();
29095 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
29096 MS->getValue(), VecIndex);
29098 // Store that element at the appropriate offset from the base pointer.
29099 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
29100 Alignment, MS->getMemOperand()->getFlags());
29103 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
29104 const X86Subtarget &Subtarget) {
29105 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
29106 if (!Mst->isTruncatingStore())
29107 return reduceMaskedStoreToScalarStore(Mst, DAG);
29109 // Resolve truncating stores.
29110 EVT VT = Mst->getValue().getValueType();
29111 unsigned NumElems = VT.getVectorNumElements();
29112 EVT StVT = Mst->getMemoryVT();
29115 assert(StVT != VT && "Cannot truncate to the same type");
29116 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
29117 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
29119 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29121 // The truncating store is legal in some cases. For example
29122 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
29123 // are designated for truncate store.
29124 // In this case we don't need any further transformations.
29125 if (TLI.isTruncStoreLegal(VT, StVT))
29128 // From/To sizes and ElemCount must be pow of two.
29129 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
29130 "Unexpected size for truncating masked store");
29131 // We are going to use the original vector elt for storing.
29132 // Accumulated smaller vector elements must be a multiple of the store size.
29133 assert (((NumElems * FromSz) % ToSz) == 0 &&
29134 "Unexpected ratio for truncating masked store");
29136 unsigned SizeRatio = FromSz / ToSz;
29137 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
29139 // Create a type on which we perform the shuffle.
29140 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
29141 StVT.getScalarType(), NumElems*SizeRatio);
29143 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
29145 SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
29146 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
29147 for (unsigned i = 0; i != NumElems; ++i)
29148 ShuffleVec[i] = i * SizeRatio;
29150 // Can't shuffle using an illegal type.
29151 assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
29152 "WideVecVT should be legal");
29154 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
29155 DAG.getUNDEF(WideVecVT),
29159 SDValue Mask = Mst->getMask();
29160 if (Mask.getValueType() == VT) {
29161 // Mask and original value have the same type.
29162 NewMask = DAG.getBitcast(WideVecVT, Mask);
29163 for (unsigned i = 0; i != NumElems; ++i)
29164 ShuffleVec[i] = i * SizeRatio;
29165 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
29166 ShuffleVec[i] = NumElems*SizeRatio;
29167 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
29168 DAG.getConstant(0, dl, WideVecVT),
29171 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
29172 unsigned WidenNumElts = NumElems*SizeRatio;
29173 unsigned MaskNumElts = VT.getVectorNumElements();
29174 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
29177 unsigned NumConcat = WidenNumElts / MaskNumElts;
29178 SmallVector<SDValue, 16> Ops(NumConcat);
29179 SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
29181 for (unsigned i = 1; i != NumConcat; ++i)
29184 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
29187 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
29188 Mst->getBasePtr(), NewMask, StVT,
29189 Mst->getMemOperand(), false);
29192 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
29193 const X86Subtarget &Subtarget) {
29194 StoreSDNode *St = cast<StoreSDNode>(N);
29195 EVT VT = St->getValue().getValueType();
29196 EVT StVT = St->getMemoryVT();
29198 SDValue StoredVal = St->getOperand(1);
29199 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29201 // If we are saving a concatenation of two XMM registers and 32-byte stores
29202 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
29204 unsigned AddressSpace = St->getAddressSpace();
29205 unsigned Alignment = St->getAlignment();
29206 if (VT.is256BitVector() && StVT == VT &&
29207 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
29208 AddressSpace, Alignment, &Fast) &&
29210 unsigned NumElems = VT.getVectorNumElements();
29214 SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl);
29215 SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl);
29217 SDValue Ptr0 = St->getBasePtr();
29218 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl);
29221 DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(),
29222 Alignment, St->getMemOperand()->getFlags());
29224 DAG.getStore(St->getChain(), dl, Value1, Ptr1, St->getPointerInfo(),
29225 std::min(16U, Alignment), St->getMemOperand()->getFlags());
29226 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
29229 // Optimize trunc store (of multiple scalars) to shuffle and store.
29230 // First, pack all of the elements in one place. Next, store to memory
29231 // in fewer chunks.
29232 if (St->isTruncatingStore() && VT.isVector()) {
29233 // Check if we can detect an AVG pattern from the truncation. If yes,
29234 // replace the trunc store by a normal store with the result of X86ISD::AVG
29236 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
29238 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
29239 St->getPointerInfo(), St->getAlignment(),
29240 St->getMemOperand()->getFlags());
29242 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29243 unsigned NumElems = VT.getVectorNumElements();
29244 assert(StVT != VT && "Cannot truncate to the same type");
29245 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
29246 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
29248 // The truncating store is legal in some cases. For example
29249 // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
29250 // are designated for truncate store.
29251 // In this case we don't need any further transformations.
29252 if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
29255 // From, To sizes and ElemCount must be pow of two
29256 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
29257 // We are going to use the original vector elt for storing.
29258 // Accumulated smaller vector elements must be a multiple of the store size.
29259 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
29261 unsigned SizeRatio = FromSz / ToSz;
29263 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
29265 // Create a type on which we perform the shuffle
29266 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
29267 StVT.getScalarType(), NumElems*SizeRatio);
29269 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
29271 SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
29272 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
29273 for (unsigned i = 0; i != NumElems; ++i)
29274 ShuffleVec[i] = i * SizeRatio;
29276 // Can't shuffle using an illegal type.
29277 if (!TLI.isTypeLegal(WideVecVT))
29280 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
29281 DAG.getUNDEF(WideVecVT),
29283 // At this point all of the data is stored at the bottom of the
29284 // register. We now need to save it to mem.
29286 // Find the largest store unit
29287 MVT StoreType = MVT::i8;
29288 for (MVT Tp : MVT::integer_valuetypes()) {
29289 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
29293 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
29294 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
29295 (64 <= NumElems * ToSz))
29296 StoreType = MVT::f64;
29298 // Bitcast the original vector into a vector of store-size units
29299 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
29300 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
29301 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
29302 SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
29303 SmallVector<SDValue, 8> Chains;
29304 SDValue Ptr = St->getBasePtr();
29306 // Perform one or more big stores into memory.
29307 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
29308 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
29309 StoreType, ShuffWide,
29310 DAG.getIntPtrConstant(i, dl));
29312 DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
29313 St->getAlignment(), St->getMemOperand()->getFlags());
29314 Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
29315 Chains.push_back(Ch);
29318 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
29321 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
29322 // the FP state in cases where an emms may be missing.
29323 // A preferable solution to the general problem is to figure out the right
29324 // places to insert EMMS. This qualifies as a quick hack.
29326 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
29327 if (VT.getSizeInBits() != 64)
29330 const Function *F = DAG.getMachineFunction().getFunction();
29331 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
29333 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
29334 if ((VT.isVector() ||
29335 (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
29336 isa<LoadSDNode>(St->getValue()) &&
29337 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
29338 St->getChain().hasOneUse() && !St->isVolatile()) {
29339 SDNode* LdVal = St->getValue().getNode();
29340 LoadSDNode *Ld = nullptr;
29341 int TokenFactorIndex = -1;
29342 SmallVector<SDValue, 8> Ops;
29343 SDNode* ChainVal = St->getChain().getNode();
29344 // Must be a store of a load. We currently handle two cases: the load
29345 // is a direct child, and it's under an intervening TokenFactor. It is
29346 // possible to dig deeper under nested TokenFactors.
29347 if (ChainVal == LdVal)
29348 Ld = cast<LoadSDNode>(St->getChain());
29349 else if (St->getValue().hasOneUse() &&
29350 ChainVal->getOpcode() == ISD::TokenFactor) {
29351 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
29352 if (ChainVal->getOperand(i).getNode() == LdVal) {
29353 TokenFactorIndex = i;
29354 Ld = cast<LoadSDNode>(St->getValue());
29356 Ops.push_back(ChainVal->getOperand(i));
29360 if (!Ld || !ISD::isNormalLoad(Ld))
29363 // If this is not the MMX case, i.e. we are just turning i64 load/store
29364 // into f64 load/store, avoid the transformation if there are multiple
29365 // uses of the loaded value.
29366 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
29371 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
29372 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
29374 if (Subtarget.is64Bit() || F64IsLegal) {
29375 MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
29376 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
29377 Ld->getPointerInfo(), Ld->getAlignment(),
29378 Ld->getMemOperand()->getFlags());
29379 SDValue NewChain = NewLd.getValue(1);
29380 if (TokenFactorIndex >= 0) {
29381 Ops.push_back(NewChain);
29382 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
29384 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
29385 St->getPointerInfo(), St->getAlignment(),
29386 St->getMemOperand()->getFlags());
29389 // Otherwise, lower to two pairs of 32-bit loads / stores.
29390 SDValue LoAddr = Ld->getBasePtr();
29391 SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
29393 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
29394 Ld->getPointerInfo(), Ld->getAlignment(),
29395 Ld->getMemOperand()->getFlags());
29396 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
29397 Ld->getPointerInfo().getWithOffset(4),
29398 MinAlign(Ld->getAlignment(), 4),
29399 Ld->getMemOperand()->getFlags());
29401 SDValue NewChain = LoLd.getValue(1);
29402 if (TokenFactorIndex >= 0) {
29403 Ops.push_back(LoLd);
29404 Ops.push_back(HiLd);
29405 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
29408 LoAddr = St->getBasePtr();
29409 HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
29412 DAG.getStore(NewChain, StDL, LoLd, LoAddr, St->getPointerInfo(),
29413 St->getAlignment(), St->getMemOperand()->getFlags());
29414 SDValue HiSt = DAG.getStore(
29415 NewChain, StDL, HiLd, HiAddr, St->getPointerInfo().getWithOffset(4),
29416 MinAlign(St->getAlignment(), 4), St->getMemOperand()->getFlags());
29417 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
29420 // This is similar to the above case, but here we handle a scalar 64-bit
29421 // integer store that is extracted from a vector on a 32-bit target.
29422 // If we have SSE2, then we can treat it like a floating-point double
29423 // to get past legalization. The execution dependencies fixup pass will
29424 // choose the optimal machine instruction for the store if this really is
29425 // an integer or v2f32 rather than an f64.
29426 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
29427 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
29428 SDValue OldExtract = St->getOperand(1);
29429 SDValue ExtOp0 = OldExtract.getOperand(0);
29430 unsigned VecSize = ExtOp0.getValueSizeInBits();
29431 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
29432 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
29433 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
29434 BitCast, OldExtract.getOperand(1));
29435 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
29436 St->getPointerInfo(), St->getAlignment(),
29437 St->getMemOperand()->getFlags());
29443 /// Return 'true' if this vector operation is "horizontal"
29444 /// and return the operands for the horizontal operation in LHS and RHS. A
29445 /// horizontal operation performs the binary operation on successive elements
29446 /// of its first operand, then on successive elements of its second operand,
29447 /// returning the resulting values in a vector. For example, if
29448 /// A = < float a0, float a1, float a2, float a3 >
29450 /// B = < float b0, float b1, float b2, float b3 >
29451 /// then the result of doing a horizontal operation on A and B is
29452 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
29453 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
29454 /// A horizontal-op B, for some already available A and B, and if so then LHS is
29455 /// set to A, RHS to B, and the routine returns 'true'.
29456 /// Note that the binary operation should have the property that if one of the
29457 /// operands is UNDEF then the result is UNDEF.
29458 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
29459 // Look for the following pattern: if
29460 // A = < float a0, float a1, float a2, float a3 >
29461 // B = < float b0, float b1, float b2, float b3 >
29463 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
29464 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
29465 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
29466 // which is A horizontal-op B.
29468 // At least one of the operands should be a vector shuffle.
29469 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
29470 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
29473 MVT VT = LHS.getSimpleValueType();
29475 assert((VT.is128BitVector() || VT.is256BitVector()) &&
29476 "Unsupported vector type for horizontal add/sub");
29478 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
29479 // operate independently on 128-bit lanes.
29480 unsigned NumElts = VT.getVectorNumElements();
29481 unsigned NumLanes = VT.getSizeInBits()/128;
29482 unsigned NumLaneElts = NumElts / NumLanes;
29483 assert((NumLaneElts % 2 == 0) &&
29484 "Vector type should have an even number of elements in each lane");
29485 unsigned HalfLaneElts = NumLaneElts/2;
29487 // View LHS in the form
29488 // LHS = VECTOR_SHUFFLE A, B, LMask
29489 // If LHS is not a shuffle then pretend it is the shuffle
29490 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
29491 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
29494 SmallVector<int, 16> LMask(NumElts);
29495 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
29496 if (!LHS.getOperand(0).isUndef())
29497 A = LHS.getOperand(0);
29498 if (!LHS.getOperand(1).isUndef())
29499 B = LHS.getOperand(1);
29500 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
29501 std::copy(Mask.begin(), Mask.end(), LMask.begin());
29503 if (!LHS.isUndef())
29505 for (unsigned i = 0; i != NumElts; ++i)
29509 // Likewise, view RHS in the form
29510 // RHS = VECTOR_SHUFFLE C, D, RMask
29512 SmallVector<int, 16> RMask(NumElts);
29513 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
29514 if (!RHS.getOperand(0).isUndef())
29515 C = RHS.getOperand(0);
29516 if (!RHS.getOperand(1).isUndef())
29517 D = RHS.getOperand(1);
29518 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
29519 std::copy(Mask.begin(), Mask.end(), RMask.begin());
29521 if (!RHS.isUndef())
29523 for (unsigned i = 0; i != NumElts; ++i)
29527 // Check that the shuffles are both shuffling the same vectors.
29528 if (!(A == C && B == D) && !(A == D && B == C))
29531 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
29532 if (!A.getNode() && !B.getNode())
29535 // If A and B occur in reverse order in RHS, then "swap" them (which means
29536 // rewriting the mask).
29538 ShuffleVectorSDNode::commuteMask(RMask);
29540 // At this point LHS and RHS are equivalent to
29541 // LHS = VECTOR_SHUFFLE A, B, LMask
29542 // RHS = VECTOR_SHUFFLE A, B, RMask
29543 // Check that the masks correspond to performing a horizontal operation.
29544 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
29545 for (unsigned i = 0; i != NumLaneElts; ++i) {
29546 int LIdx = LMask[i+l], RIdx = RMask[i+l];
29548 // Ignore any UNDEF components.
29549 if (LIdx < 0 || RIdx < 0 ||
29550 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
29551 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
29554 // Check that successive elements are being operated on. If not, this is
29555 // not a horizontal operation.
29556 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
29557 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
29558 if (!(LIdx == Index && RIdx == Index + 1) &&
29559 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
29564 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
29565 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
29569 /// Do target-specific dag combines on floating-point adds/subs.
29570 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
29571 const X86Subtarget &Subtarget) {
29572 EVT VT = N->getValueType(0);
29573 SDValue LHS = N->getOperand(0);
29574 SDValue RHS = N->getOperand(1);
29575 bool IsFadd = N->getOpcode() == ISD::FADD;
29576 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
29578 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
29579 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
29580 (Subtarget.hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
29581 isHorizontalBinOp(LHS, RHS, IsFadd)) {
29582 auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
29583 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
29588 /// Truncate a group of v4i32 into v16i8/v8i16 using X86ISD::PACKUS.
29590 combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG,
29591 SmallVector<SDValue, 8> &Regs) {
29592 assert(Regs.size() > 0 && (Regs[0].getValueType() == MVT::v4i32 ||
29593 Regs[0].getValueType() == MVT::v2i64));
29594 EVT OutVT = N->getValueType(0);
29595 EVT OutSVT = OutVT.getVectorElementType();
29596 EVT InVT = Regs[0].getValueType();
29597 EVT InSVT = InVT.getVectorElementType();
29600 // First, use mask to unset all bits that won't appear in the result.
29601 assert((OutSVT == MVT::i8 || OutSVT == MVT::i16) &&
29602 "OutSVT can only be either i8 or i16.");
29604 APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits());
29605 SDValue MaskVal = DAG.getConstant(Mask, DL, InVT);
29606 for (auto &Reg : Regs)
29607 Reg = DAG.getNode(ISD::AND, DL, InVT, MaskVal, Reg);
29609 MVT UnpackedVT, PackedVT;
29610 if (OutSVT == MVT::i8) {
29611 UnpackedVT = MVT::v8i16;
29612 PackedVT = MVT::v16i8;
29614 UnpackedVT = MVT::v4i32;
29615 PackedVT = MVT::v8i16;
29618 // In each iteration, truncate the type by a half size.
29619 auto RegNum = Regs.size();
29620 for (unsigned j = 1, e = InSVT.getSizeInBits() / OutSVT.getSizeInBits();
29621 j < e; j *= 2, RegNum /= 2) {
29622 for (unsigned i = 0; i < RegNum; i++)
29623 Regs[i] = DAG.getBitcast(UnpackedVT, Regs[i]);
29624 for (unsigned i = 0; i < RegNum / 2; i++)
29625 Regs[i] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[i * 2],
29629 // If the type of the result is v8i8, we need do one more X86ISD::PACKUS, and
29630 // then extract a subvector as the result since v8i8 is not a legal type.
29631 if (OutVT == MVT::v8i8) {
29632 Regs[0] = DAG.getNode(X86ISD::PACKUS, DL, PackedVT, Regs[0], Regs[0]);
29633 Regs[0] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, Regs[0],
29634 DAG.getIntPtrConstant(0, DL));
29636 } else if (RegNum > 1) {
29637 Regs.resize(RegNum);
29638 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Regs);
29643 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
29645 combineVectorTruncationWithPACKSS(SDNode *N, SelectionDAG &DAG,
29646 SmallVector<SDValue, 8> &Regs) {
29647 assert(Regs.size() > 0 && Regs[0].getValueType() == MVT::v4i32);
29648 EVT OutVT = N->getValueType(0);
29651 // Shift left by 16 bits, then arithmetic-shift right by 16 bits.
29652 SDValue ShAmt = DAG.getConstant(16, DL, MVT::i32);
29653 for (auto &Reg : Regs) {
29654 Reg = getTargetVShiftNode(X86ISD::VSHLI, DL, MVT::v4i32, Reg, ShAmt, DAG);
29655 Reg = getTargetVShiftNode(X86ISD::VSRAI, DL, MVT::v4i32, Reg, ShAmt, DAG);
29658 for (unsigned i = 0, e = Regs.size() / 2; i < e; i++)
29659 Regs[i] = DAG.getNode(X86ISD::PACKSS, DL, MVT::v8i16, Regs[i * 2],
29662 if (Regs.size() > 2) {
29663 Regs.resize(Regs.size() / 2);
29664 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Regs);
29669 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
29670 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
29671 /// legalization the truncation will be translated into a BUILD_VECTOR with each
29672 /// element that is extracted from a vector and then truncated, and it is
29673 /// diffcult to do this optimization based on them.
29674 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
29675 const X86Subtarget &Subtarget) {
29676 EVT OutVT = N->getValueType(0);
29677 if (!OutVT.isVector())
29680 SDValue In = N->getOperand(0);
29681 if (!In.getValueType().isSimple())
29684 EVT InVT = In.getValueType();
29685 unsigned NumElems = OutVT.getVectorNumElements();
29687 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
29688 // SSE2, and we need to take care of it specially.
29689 // AVX512 provides vpmovdb.
29690 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
29693 EVT OutSVT = OutVT.getVectorElementType();
29694 EVT InSVT = InVT.getVectorElementType();
29695 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
29696 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
29700 // SSSE3's pshufb results in less instructions in the cases below.
29701 if (Subtarget.hasSSSE3() && NumElems == 8 &&
29702 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
29703 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
29708 // Split a long vector into vectors of legal type.
29709 unsigned RegNum = InVT.getSizeInBits() / 128;
29710 SmallVector<SDValue, 8> SubVec(RegNum);
29711 unsigned NumSubRegElts = 128 / InSVT.getSizeInBits();
29712 EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts);
29714 for (unsigned i = 0; i < RegNum; i++)
29715 SubVec[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In,
29716 DAG.getIntPtrConstant(i * NumSubRegElts, DL));
29718 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
29719 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
29720 // truncate 2 x v4i32 to v8i16.
29721 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
29722 return combineVectorTruncationWithPACKUS(N, DAG, SubVec);
29723 else if (InSVT == MVT::i32)
29724 return combineVectorTruncationWithPACKSS(N, DAG, SubVec);
29729 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
29730 const X86Subtarget &Subtarget) {
29731 EVT VT = N->getValueType(0);
29732 SDValue Src = N->getOperand(0);
29735 // Try to detect AVG pattern first.
29736 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
29739 // The bitcast source is a direct mmx result.
29740 // Detect bitcasts between i32 to x86mmx
29741 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
29742 SDValue BCSrc = Src.getOperand(0);
29743 if (BCSrc.getValueType() == MVT::x86mmx)
29744 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
29747 return combineVectorTruncation(N, DAG, Subtarget);
29750 /// Do target-specific dag combines on floating point negations.
29751 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
29752 const X86Subtarget &Subtarget) {
29753 EVT VT = N->getValueType(0);
29754 EVT SVT = VT.getScalarType();
29755 SDValue Arg = N->getOperand(0);
29758 // Let legalize expand this if it isn't a legal type yet.
29759 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
29762 // If we're negating a FMUL node on a target with FMA, then we can avoid the
29763 // use of a constant by performing (-0 - A*B) instead.
29764 // FIXME: Check rounding control flags as well once it becomes available.
29765 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
29766 Arg->getFlags()->hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
29767 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
29768 return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
29769 Arg.getOperand(1), Zero);
29772 // If we're negating a FMA node, then we can adjust the
29773 // instruction to include the extra negation.
29774 if (Arg.hasOneUse()) {
29775 switch (Arg.getOpcode()) {
29776 case X86ISD::FMADD:
29777 return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
29778 Arg.getOperand(1), Arg.getOperand(2));
29779 case X86ISD::FMSUB:
29780 return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0),
29781 Arg.getOperand(1), Arg.getOperand(2));
29782 case X86ISD::FNMADD:
29783 return DAG.getNode(X86ISD::FMSUB, DL, VT, Arg.getOperand(0),
29784 Arg.getOperand(1), Arg.getOperand(2));
29785 case X86ISD::FNMSUB:
29786 return DAG.getNode(X86ISD::FMADD, DL, VT, Arg.getOperand(0),
29787 Arg.getOperand(1), Arg.getOperand(2));
29793 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
29794 const X86Subtarget &Subtarget) {
29795 EVT VT = N->getValueType(0);
29796 if (VT.is512BitVector() && !Subtarget.hasDQI()) {
29797 // VXORPS, VORPS, VANDPS, VANDNPS are supported only under DQ extention.
29798 // These logic operations may be executed in the integer domain.
29800 MVT IntScalar = MVT::getIntegerVT(VT.getScalarSizeInBits());
29801 MVT IntVT = MVT::getVectorVT(IntScalar, VT.getVectorNumElements());
29803 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
29804 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
29805 unsigned IntOpcode = 0;
29806 switch (N->getOpcode()) {
29807 default: llvm_unreachable("Unexpected FP logic op");
29808 case X86ISD::FOR: IntOpcode = ISD::OR; break;
29809 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
29810 case X86ISD::FAND: IntOpcode = ISD::AND; break;
29811 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
29813 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
29814 return DAG.getBitcast(VT, IntOp);
29818 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
29819 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
29820 const X86Subtarget &Subtarget) {
29821 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
29823 // F[X]OR(0.0, x) -> x
29824 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
29825 if (C->getValueAPF().isPosZero())
29826 return N->getOperand(1);
29828 // F[X]OR(x, 0.0) -> x
29829 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
29830 if (C->getValueAPF().isPosZero())
29831 return N->getOperand(0);
29833 return lowerX86FPLogicOp(N, DAG, Subtarget);
29836 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
29837 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
29838 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
29840 // Only perform optimizations if UnsafeMath is used.
29841 if (!DAG.getTarget().Options.UnsafeFPMath)
29844 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
29845 // into FMINC and FMAXC, which are Commutative operations.
29846 unsigned NewOp = 0;
29847 switch (N->getOpcode()) {
29848 default: llvm_unreachable("unknown opcode");
29849 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
29850 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
29853 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
29854 N->getOperand(0), N->getOperand(1));
29857 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
29858 const X86Subtarget &Subtarget) {
29859 if (Subtarget.useSoftFloat())
29862 // TODO: Check for global or instruction-level "nnan". In that case, we
29863 // should be able to lower to FMAX/FMIN alone.
29864 // TODO: If an operand is already known to be a NaN or not a NaN, this
29865 // should be an optional swap and FMAX/FMIN.
29867 EVT VT = N->getValueType(0);
29868 if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
29869 (Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
29870 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
29873 // This takes at least 3 instructions, so favor a library call when operating
29874 // on a scalar and minimizing code size.
29875 if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize())
29878 SDValue Op0 = N->getOperand(0);
29879 SDValue Op1 = N->getOperand(1);
29881 EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType(
29882 DAG.getDataLayout(), *DAG.getContext(), VT);
29884 // There are 4 possibilities involving NaN inputs, and these are the required
29888 // ----------------
29889 // Num | Max | Op0 |
29890 // Op0 ----------------
29891 // NaN | Op1 | NaN |
29892 // ----------------
29894 // The SSE FP max/min instructions were not designed for this case, but rather
29896 // Min = Op1 < Op0 ? Op1 : Op0
29897 // Max = Op1 > Op0 ? Op1 : Op0
29899 // So they always return Op0 if either input is a NaN. However, we can still
29900 // use those instructions for fmaxnum by selecting away a NaN input.
29902 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
29903 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
29904 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
29905 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType , Op0, Op0, ISD::SETUO);
29907 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
29908 // are NaN, the NaN value of Op1 is the result.
29909 auto SelectOpcode = VT.isVector() ? ISD::VSELECT : ISD::SELECT;
29910 return DAG.getNode(SelectOpcode, DL, VT, IsOp0Nan, Op1, MinOrMax);
29913 /// Do target-specific dag combines on X86ISD::FAND nodes.
29914 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
29915 const X86Subtarget &Subtarget) {
29916 // FAND(0.0, x) -> 0.0
29917 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
29918 if (C->getValueAPF().isPosZero())
29919 return N->getOperand(0);
29921 // FAND(x, 0.0) -> 0.0
29922 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
29923 if (C->getValueAPF().isPosZero())
29924 return N->getOperand(1);
29926 return lowerX86FPLogicOp(N, DAG, Subtarget);
29929 /// Do target-specific dag combines on X86ISD::FANDN nodes
29930 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
29931 const X86Subtarget &Subtarget) {
29932 // FANDN(0.0, x) -> x
29933 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
29934 if (C->getValueAPF().isPosZero())
29935 return N->getOperand(1);
29937 // FANDN(x, 0.0) -> 0.0
29938 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
29939 if (C->getValueAPF().isPosZero())
29940 return N->getOperand(1);
29942 return lowerX86FPLogicOp(N, DAG, Subtarget);
29945 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
29946 TargetLowering::DAGCombinerInfo &DCI) {
29947 // BT ignores high bits in the bit index operand.
29948 SDValue Op1 = N->getOperand(1);
29949 if (Op1.hasOneUse()) {
29950 unsigned BitWidth = Op1.getValueSizeInBits();
29951 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
29952 APInt KnownZero, KnownOne;
29953 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
29954 !DCI.isBeforeLegalizeOps());
29955 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29956 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
29957 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
29958 DCI.CommitTargetLoweringOpt(TLO);
29963 static SDValue combineVZextMovl(SDNode *N, SelectionDAG &DAG) {
29964 SDValue Op = peekThroughBitcasts(N->getOperand(0));
29965 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
29966 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
29967 VT.getVectorElementType().getSizeInBits() ==
29968 OpVT.getVectorElementType().getSizeInBits()) {
29969 return DAG.getBitcast(VT, Op);
29974 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
29975 const X86Subtarget &Subtarget) {
29976 EVT VT = N->getValueType(0);
29977 if (!VT.isVector())
29980 SDValue N0 = N->getOperand(0);
29981 SDValue N1 = N->getOperand(1);
29982 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
29985 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
29986 // both SSE and AVX2 since there is no sign-extended shift right
29987 // operation on a vector with 64-bit elements.
29988 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
29989 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
29990 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
29991 N0.getOpcode() == ISD::SIGN_EXTEND)) {
29992 SDValue N00 = N0.getOperand(0);
29994 // EXTLOAD has a better solution on AVX2,
29995 // it may be replaced with X86ISD::VSEXT node.
29996 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
29997 if (!ISD::isNormalLoad(N00.getNode()))
30000 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
30001 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
30003 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
30009 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
30010 /// Promoting a sign extension ahead of an 'add nsw' exposes opportunities
30011 /// to combine math ops, use an LEA, or use a complex addressing mode. This can
30012 /// eliminate extend, add, and shift instructions.
30013 static SDValue promoteSextBeforeAddNSW(SDNode *Sext, SelectionDAG &DAG,
30014 const X86Subtarget &Subtarget) {
30015 // TODO: This should be valid for other integer types.
30016 EVT VT = Sext->getValueType(0);
30017 if (VT != MVT::i64)
30020 // We need an 'add nsw' feeding into the 'sext'.
30021 SDValue Add = Sext->getOperand(0);
30022 if (Add.getOpcode() != ISD::ADD || !Add->getFlags()->hasNoSignedWrap())
30025 // Having a constant operand to the 'add' ensures that we are not increasing
30026 // the instruction count because the constant is extended for free below.
30027 // A constant operand can also become the displacement field of an LEA.
30028 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
30032 // Don't make the 'add' bigger if there's no hope of combining it with some
30033 // other 'add' or 'shl' instruction.
30034 // TODO: It may be profitable to generate simpler LEA instructions in place
30035 // of single 'add' instructions, but the cost model for selecting an LEA
30036 // currently has a high threshold.
30037 bool HasLEAPotential = false;
30038 for (auto *User : Sext->uses()) {
30039 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
30040 HasLEAPotential = true;
30044 if (!HasLEAPotential)
30047 // Everything looks good, so pull the 'sext' ahead of the 'add'.
30048 int64_t AddConstant = AddOp1->getSExtValue();
30049 SDValue AddOp0 = Add.getOperand(0);
30050 SDValue NewSext = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(Sext), VT, AddOp0);
30051 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
30053 // The wider add is guaranteed to not wrap because both operands are
30056 Flags.setNoSignedWrap(true);
30057 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewSext, NewConstant, &Flags);
30060 /// (i8,i32 {s/z}ext ({s/u}divrem (i8 x, i8 y)) ->
30061 /// (i8,i32 ({s/u}divrem_sext_hreg (i8 x, i8 y)
30062 /// This exposes the {s/z}ext to the sdivrem lowering, so that it directly
30063 /// extends from AH (which we otherwise need to do contortions to access).
30064 static SDValue getDivRem8(SDNode *N, SelectionDAG &DAG) {
30065 SDValue N0 = N->getOperand(0);
30066 auto OpcodeN = N->getOpcode();
30067 auto OpcodeN0 = N0.getOpcode();
30068 if (!((OpcodeN == ISD::SIGN_EXTEND && OpcodeN0 == ISD::SDIVREM) ||
30069 (OpcodeN == ISD::ZERO_EXTEND && OpcodeN0 == ISD::UDIVREM)))
30072 EVT VT = N->getValueType(0);
30073 EVT InVT = N0.getValueType();
30074 if (N0.getResNo() != 1 || InVT != MVT::i8 || VT != MVT::i32)
30077 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
30078 auto DivRemOpcode = OpcodeN0 == ISD::SDIVREM ? X86ISD::SDIVREM8_SEXT_HREG
30079 : X86ISD::UDIVREM8_ZEXT_HREG;
30080 SDValue R = DAG.getNode(DivRemOpcode, SDLoc(N), NodeTys, N0.getOperand(0),
30082 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
30083 return R.getValue(1);
30086 /// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
30087 /// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
30088 /// with UNDEFs) of the input to vectors of the same size as the target type
30089 /// which then extends the lowest elements.
30090 static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
30091 TargetLowering::DAGCombinerInfo &DCI,
30092 const X86Subtarget &Subtarget) {
30093 unsigned Opcode = N->getOpcode();
30094 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
30096 if (!DCI.isBeforeLegalizeOps())
30098 if (!Subtarget.hasSSE2())
30101 SDValue N0 = N->getOperand(0);
30102 EVT VT = N->getValueType(0);
30103 EVT SVT = VT.getScalarType();
30104 EVT InVT = N0.getValueType();
30105 EVT InSVT = InVT.getScalarType();
30107 // Input type must be a vector and we must be extending legal integer types.
30108 if (!VT.isVector())
30110 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
30112 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
30115 // On AVX2+ targets, if the input/output types are both legal then we will be
30116 // able to use SIGN_EXTEND/ZERO_EXTEND directly.
30117 if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
30118 DAG.getTargetLoweringInfo().isTypeLegal(InVT))
30123 auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
30124 EVT InVT = N.getValueType();
30125 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
30126 Size / InVT.getScalarSizeInBits());
30127 SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
30128 DAG.getUNDEF(InVT));
30130 return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
30133 // If target-size is less than 128-bits, extend to a type that would extend
30134 // to 128 bits, extend that and extract the original target vector.
30135 if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
30136 unsigned Scale = 128 / VT.getSizeInBits();
30138 EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
30139 SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
30140 SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
30141 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
30142 DAG.getIntPtrConstant(0, DL));
30145 // If target-size is 128-bits (or 256-bits on AVX2 target), then convert to
30146 // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
30147 // Also use this if we don't have SSE41 to allow the legalizer do its job.
30148 if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
30149 (VT.is256BitVector() && Subtarget.hasInt256())) {
30150 SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
30151 return Opcode == ISD::SIGN_EXTEND
30152 ? DAG.getSignExtendVectorInReg(ExOp, DL, VT)
30153 : DAG.getZeroExtendVectorInReg(ExOp, DL, VT);
30156 // On pre-AVX2 targets, split into 128-bit nodes of
30157 // ISD::*_EXTEND_VECTOR_INREG.
30158 if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128)) {
30159 unsigned NumVecs = VT.getSizeInBits() / 128;
30160 unsigned NumSubElts = 128 / SVT.getSizeInBits();
30161 EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
30162 EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
30164 SmallVector<SDValue, 8> Opnds;
30165 for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
30166 SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
30167 DAG.getIntPtrConstant(Offset, DL));
30168 SrcVec = ExtendVecSize(DL, SrcVec, 128);
30169 SrcVec = Opcode == ISD::SIGN_EXTEND
30170 ? DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT)
30171 : DAG.getZeroExtendVectorInReg(SrcVec, DL, SubVT);
30172 Opnds.push_back(SrcVec);
30174 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
30180 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
30181 TargetLowering::DAGCombinerInfo &DCI,
30182 const X86Subtarget &Subtarget) {
30183 SDValue N0 = N->getOperand(0);
30184 EVT VT = N->getValueType(0);
30185 EVT InVT = N0.getValueType();
30188 if (SDValue DivRem8 = getDivRem8(N, DAG))
30191 if (!DCI.isBeforeLegalizeOps()) {
30192 if (InVT == MVT::i1) {
30193 SDValue Zero = DAG.getConstant(0, DL, VT);
30195 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
30196 return DAG.getNode(ISD::SELECT, DL, VT, N0, AllOnes, Zero);
30201 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
30204 if (Subtarget.hasAVX() && VT.is256BitVector())
30205 if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
30208 if (SDValue NewAdd = promoteSextBeforeAddNSW(N, DAG, Subtarget))
30214 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
30215 const X86Subtarget &Subtarget) {
30217 EVT VT = N->getValueType(0);
30219 // Let legalize expand this if it isn't a legal type yet.
30220 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
30223 EVT ScalarVT = VT.getScalarType();
30224 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
30227 SDValue A = N->getOperand(0);
30228 SDValue B = N->getOperand(1);
30229 SDValue C = N->getOperand(2);
30231 bool NegA = (A.getOpcode() == ISD::FNEG);
30232 bool NegB = (B.getOpcode() == ISD::FNEG);
30233 bool NegC = (C.getOpcode() == ISD::FNEG);
30235 // Negative multiplication when NegA xor NegB
30236 bool NegMul = (NegA != NegB);
30238 A = A.getOperand(0);
30240 B = B.getOperand(0);
30242 C = C.getOperand(0);
30246 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
30248 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
30250 return DAG.getNode(Opcode, dl, VT, A, B, C);
30253 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
30254 TargetLowering::DAGCombinerInfo &DCI,
30255 const X86Subtarget &Subtarget) {
30256 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
30257 // (and (i32 x86isd::setcc_carry), 1)
30258 // This eliminates the zext. This transformation is necessary because
30259 // ISD::SETCC is always legalized to i8.
30261 SDValue N0 = N->getOperand(0);
30262 EVT VT = N->getValueType(0);
30264 if (N0.getOpcode() == ISD::AND &&
30266 N0.getOperand(0).hasOneUse()) {
30267 SDValue N00 = N0.getOperand(0);
30268 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
30269 if (!isOneConstant(N0.getOperand(1)))
30271 return DAG.getNode(ISD::AND, dl, VT,
30272 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
30273 N00.getOperand(0), N00.getOperand(1)),
30274 DAG.getConstant(1, dl, VT));
30278 if (N0.getOpcode() == ISD::TRUNCATE &&
30280 N0.getOperand(0).hasOneUse()) {
30281 SDValue N00 = N0.getOperand(0);
30282 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
30283 return DAG.getNode(ISD::AND, dl, VT,
30284 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
30285 N00.getOperand(0), N00.getOperand(1)),
30286 DAG.getConstant(1, dl, VT));
30290 if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
30293 if (VT.is256BitVector())
30294 if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget))
30297 if (SDValue DivRem8 = getDivRem8(N, DAG))
30303 /// Optimize x == -y --> x+y == 0
30304 /// x != -y --> x+y != 0
30305 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
30306 const X86Subtarget &Subtarget) {
30307 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
30308 SDValue LHS = N->getOperand(0);
30309 SDValue RHS = N->getOperand(1);
30310 EVT VT = N->getValueType(0);
30313 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
30314 if (isNullConstant(LHS.getOperand(0)) && LHS.hasOneUse()) {
30315 SDValue addV = DAG.getNode(ISD::ADD, DL, LHS.getValueType(), RHS,
30316 LHS.getOperand(1));
30317 return DAG.getSetCC(DL, N->getValueType(0), addV,
30318 DAG.getConstant(0, DL, addV.getValueType()), CC);
30320 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
30321 if (isNullConstant(RHS.getOperand(0)) && RHS.hasOneUse()) {
30322 SDValue addV = DAG.getNode(ISD::ADD, DL, RHS.getValueType(), LHS,
30323 RHS.getOperand(1));
30324 return DAG.getSetCC(DL, N->getValueType(0), addV,
30325 DAG.getConstant(0, DL, addV.getValueType()), CC);
30328 if (VT.getScalarType() == MVT::i1 &&
30329 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
30331 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
30332 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
30333 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
30335 if (!IsSEXT0 || !IsVZero1) {
30336 // Swap the operands and update the condition code.
30337 std::swap(LHS, RHS);
30338 CC = ISD::getSetCCSwappedOperands(CC);
30340 IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
30341 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
30342 IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
30345 if (IsSEXT0 && IsVZero1) {
30346 assert(VT == LHS.getOperand(0).getValueType() &&
30347 "Uexpected operand type");
30348 if (CC == ISD::SETGT)
30349 return DAG.getConstant(0, DL, VT);
30350 if (CC == ISD::SETLE)
30351 return DAG.getConstant(1, DL, VT);
30352 if (CC == ISD::SETEQ || CC == ISD::SETGE)
30353 return DAG.getNOT(DL, LHS.getOperand(0), VT);
30355 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
30356 "Unexpected condition code!");
30357 return LHS.getOperand(0);
30361 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
30362 // to avoid scalarization via legalization because v4i32 is not a legal type.
30363 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
30364 LHS.getValueType() == MVT::v4f32)
30365 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
30370 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG) {
30372 // Gather and Scatter instructions use k-registers for masks. The type of
30373 // the masks is v*i1. So the mask will be truncated anyway.
30374 // The SIGN_EXTEND_INREG my be dropped.
30375 SDValue Mask = N->getOperand(2);
30376 if (Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
30377 SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
30378 NewOps[2] = Mask.getOperand(0);
30379 DAG.UpdateNodeOperands(N, NewOps);
30384 // Helper function of performSETCCCombine. It is to materialize "setb reg"
30385 // as "sbb reg,reg", since it can be extended without zext and produces
30386 // an all-ones bit which is more useful than 0/1 in some cases.
30387 static SDValue MaterializeSETB(const SDLoc &DL, SDValue EFLAGS,
30388 SelectionDAG &DAG, MVT VT) {
30390 return DAG.getNode(ISD::AND, DL, VT,
30391 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
30392 DAG.getConstant(X86::COND_B, DL, MVT::i8),
30394 DAG.getConstant(1, DL, VT));
30395 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
30396 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
30397 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
30398 DAG.getConstant(X86::COND_B, DL, MVT::i8),
30402 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
30403 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
30404 TargetLowering::DAGCombinerInfo &DCI,
30405 const X86Subtarget &Subtarget) {
30407 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
30408 SDValue EFLAGS = N->getOperand(1);
30410 if (CC == X86::COND_A) {
30411 // Try to convert COND_A into COND_B in an attempt to facilitate
30412 // materializing "setb reg".
30414 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
30415 // cannot take an immediate as its first operand.
30417 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
30418 EFLAGS.getValueType().isInteger() &&
30419 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
30420 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
30421 EFLAGS.getNode()->getVTList(),
30422 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
30423 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
30424 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
30428 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
30429 // a zext and produces an all-ones bit which is more useful than 0/1 in some
30431 if (CC == X86::COND_B)
30432 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
30434 // Try to simplify the EFLAGS and condition code operands.
30435 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
30436 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
30437 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
30443 /// Optimize branch condition evaluation.
30444 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
30445 TargetLowering::DAGCombinerInfo &DCI,
30446 const X86Subtarget &Subtarget) {
30448 SDValue EFLAGS = N->getOperand(3);
30449 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
30451 // Try to simplify the EFLAGS and condition code operands.
30452 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
30453 // RAUW them under us.
30454 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG)) {
30455 SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
30456 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
30457 N->getOperand(1), Cond, Flags);
30463 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
30464 SelectionDAG &DAG) {
30465 // Take advantage of vector comparisons producing 0 or -1 in each lane to
30466 // optimize away operation when it's from a constant.
30468 // The general transformation is:
30469 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
30470 // AND(VECTOR_CMP(x,y), constant2)
30471 // constant2 = UNARYOP(constant)
30473 // Early exit if this isn't a vector operation, the operand of the
30474 // unary operation isn't a bitwise AND, or if the sizes of the operations
30475 // aren't the same.
30476 EVT VT = N->getValueType(0);
30477 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
30478 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
30479 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
30482 // Now check that the other operand of the AND is a constant. We could
30483 // make the transformation for non-constant splats as well, but it's unclear
30484 // that would be a benefit as it would not eliminate any operations, just
30485 // perform one more step in scalar code before moving to the vector unit.
30486 if (BuildVectorSDNode *BV =
30487 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
30488 // Bail out if the vector isn't a constant.
30489 if (!BV->isConstant())
30492 // Everything checks out. Build up the new and improved node.
30494 EVT IntVT = BV->getValueType(0);
30495 // Create a new constant of the appropriate type for the transformed
30497 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
30498 // The AND node needs bitcasts to/from an integer vector type around it.
30499 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
30500 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
30501 N->getOperand(0)->getOperand(0), MaskConst);
30502 SDValue Res = DAG.getBitcast(VT, NewAnd);
30509 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
30510 const X86Subtarget &Subtarget) {
30511 SDValue Op0 = N->getOperand(0);
30512 EVT VT = N->getValueType(0);
30513 EVT InVT = Op0.getValueType();
30514 EVT InSVT = InVT.getScalarType();
30515 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
30517 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
30518 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
30519 if (InVT.isVector() && (InSVT == MVT::i8 || InSVT == MVT::i16)) {
30521 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
30522 InVT.getVectorNumElements());
30523 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
30525 if (TLI.isOperationLegal(ISD::UINT_TO_FP, DstVT))
30526 return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
30528 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
30534 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
30535 const X86Subtarget &Subtarget) {
30536 // First try to optimize away the conversion entirely when it's
30537 // conditionally from a constant. Vectors only.
30538 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
30541 // Now move on to more general possibilities.
30542 SDValue Op0 = N->getOperand(0);
30543 EVT VT = N->getValueType(0);
30544 EVT InVT = Op0.getValueType();
30545 EVT InSVT = InVT.getScalarType();
30547 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
30548 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
30549 if (InVT.isVector() && (InSVT == MVT::i8 || InSVT == MVT::i16)) {
30551 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
30552 InVT.getVectorNumElements());
30553 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
30554 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
30557 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
30558 // a 32-bit target where SSE doesn't support i64->FP operations.
30559 if (!Subtarget.useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
30560 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
30561 EVT LdVT = Ld->getValueType(0);
30563 // This transformation is not supported if the result type is f16 or f128.
30564 if (VT == MVT::f16 || VT == MVT::f128)
30567 if (!Ld->isVolatile() && !VT.isVector() &&
30568 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
30569 !Subtarget.is64Bit() && LdVT == MVT::i64) {
30570 SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
30571 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
30572 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
30579 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
30580 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
30581 X86TargetLowering::DAGCombinerInfo &DCI) {
30582 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
30583 // the result is either zero or one (depending on the input carry bit).
30584 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
30585 if (X86::isZeroNode(N->getOperand(0)) &&
30586 X86::isZeroNode(N->getOperand(1)) &&
30587 // We don't have a good way to replace an EFLAGS use, so only do this when
30589 SDValue(N, 1).use_empty()) {
30591 EVT VT = N->getValueType(0);
30592 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
30593 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
30594 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
30595 DAG.getConstant(X86::COND_B, DL,
30598 DAG.getConstant(1, DL, VT));
30599 return DCI.CombineTo(N, Res1, CarryOut);
30605 /// fold (add Y, (sete X, 0)) -> adc 0, Y
30606 /// (add Y, (setne X, 0)) -> sbb -1, Y
30607 /// (sub (sete X, 0), Y) -> sbb 0, Y
30608 /// (sub (setne X, 0), Y) -> adc -1, Y
30609 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
30612 // Look through ZExts.
30613 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
30614 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
30617 SDValue SetCC = Ext.getOperand(0);
30618 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
30621 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
30622 if (CC != X86::COND_E && CC != X86::COND_NE)
30625 SDValue Cmp = SetCC.getOperand(1);
30626 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
30627 !X86::isZeroNode(Cmp.getOperand(1)) ||
30628 !Cmp.getOperand(0).getValueType().isInteger())
30631 SDValue CmpOp0 = Cmp.getOperand(0);
30632 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
30633 DAG.getConstant(1, DL, CmpOp0.getValueType()));
30635 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
30636 if (CC == X86::COND_NE)
30637 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
30638 DL, OtherVal.getValueType(), OtherVal,
30639 DAG.getConstant(-1ULL, DL, OtherVal.getValueType()),
30641 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
30642 DL, OtherVal.getValueType(), OtherVal,
30643 DAG.getConstant(0, DL, OtherVal.getValueType()), NewCmp);
30646 static SDValue detectSADPattern(SDNode *N, SelectionDAG &DAG,
30647 const X86Subtarget &Subtarget) {
30649 EVT VT = N->getValueType(0);
30650 SDValue Op0 = N->getOperand(0);
30651 SDValue Op1 = N->getOperand(1);
30653 if (!VT.isVector() || !VT.isSimple() ||
30654 !(VT.getVectorElementType() == MVT::i32))
30657 unsigned RegSize = 128;
30658 if (Subtarget.hasBWI())
30660 else if (Subtarget.hasAVX2())
30663 // We only handle v16i32 for SSE2 / v32i32 for AVX2 / v64i32 for AVX512.
30664 if (VT.getSizeInBits() / 4 > RegSize)
30667 // Detect the following pattern:
30669 // 1: %2 = zext <N x i8> %0 to <N x i32>
30670 // 2: %3 = zext <N x i8> %1 to <N x i32>
30671 // 3: %4 = sub nsw <N x i32> %2, %3
30672 // 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N]
30673 // 5: %6 = sub nsw <N x i32> zeroinitializer, %4
30674 // 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6
30675 // 7: %8 = add nsw <N x i32> %7, %vec.phi
30677 // The last instruction must be a reduction add. The instructions 3-6 forms an
30678 // ABSDIFF pattern.
30680 // The two operands of reduction add are from PHI and a select-op as in line 7
30682 SDValue SelectOp, Phi;
30683 if (Op0.getOpcode() == ISD::VSELECT) {
30686 } else if (Op1.getOpcode() == ISD::VSELECT) {
30692 // Check the condition of the select instruction is greater-than.
30693 SDValue SetCC = SelectOp->getOperand(0);
30694 if (SetCC.getOpcode() != ISD::SETCC)
30696 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
30697 if (CC != ISD::SETGT)
30700 Op0 = SelectOp->getOperand(1);
30701 Op1 = SelectOp->getOperand(2);
30703 // The second operand of SelectOp Op1 is the negation of the first operand
30704 // Op0, which is implemented as 0 - Op0.
30705 if (!(Op1.getOpcode() == ISD::SUB &&
30706 ISD::isBuildVectorAllZeros(Op1.getOperand(0).getNode()) &&
30707 Op1.getOperand(1) == Op0))
30710 // The first operand of SetCC is the first operand of SelectOp, which is the
30711 // difference between two input vectors.
30712 if (SetCC.getOperand(0) != Op0)
30715 // The second operand of > comparison can be either -1 or 0.
30716 if (!(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) ||
30717 ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode())))
30720 // The first operand of SelectOp is the difference between two input vectors.
30721 if (Op0.getOpcode() != ISD::SUB)
30724 Op1 = Op0.getOperand(1);
30725 Op0 = Op0.getOperand(0);
30727 // Check if the operands of the diff are zero-extended from vectors of i8.
30728 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
30729 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
30730 Op1.getOpcode() != ISD::ZERO_EXTEND ||
30731 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
30734 // SAD pattern detected. Now build a SAD instruction and an addition for
30735 // reduction. Note that the number of elments of the result of SAD is less
30736 // than the number of elements of its input. Therefore, we could only update
30737 // part of elements in the reduction vector.
30739 // Legalize the type of the inputs of PSADBW.
30740 EVT InVT = Op0.getOperand(0).getValueType();
30741 if (InVT.getSizeInBits() <= 128)
30743 else if (InVT.getSizeInBits() <= 256)
30746 unsigned NumConcat = RegSize / InVT.getSizeInBits();
30747 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
30748 Ops[0] = Op0.getOperand(0);
30749 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
30750 Op0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
30751 Ops[0] = Op1.getOperand(0);
30752 Op1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
30754 // The output of PSADBW is a vector of i64.
30755 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
30756 SDValue Sad = DAG.getNode(X86ISD::PSADBW, DL, SadVT, Op0, Op1);
30758 // We need to turn the vector of i64 into a vector of i32.
30759 // If the reduction vector is at least as wide as the psadbw result, just
30760 // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
30762 MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
30763 if (VT.getSizeInBits() >= ResVT.getSizeInBits())
30764 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
30766 Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
30768 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
30769 // Update part of elements of the reduction vector. This is done by first
30770 // extracting a sub-vector from it, updating this sub-vector, and inserting
30772 SDValue SubPhi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, Phi,
30773 DAG.getIntPtrConstant(0, DL));
30774 SDValue Res = DAG.getNode(ISD::ADD, DL, ResVT, Sad, SubPhi);
30775 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Phi, Res,
30776 DAG.getIntPtrConstant(0, DL));
30778 return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi);
30781 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
30782 const X86Subtarget &Subtarget) {
30783 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
30784 if (Flags->hasVectorReduction()) {
30785 if (SDValue Sad = detectSADPattern(N, DAG, Subtarget))
30788 EVT VT = N->getValueType(0);
30789 SDValue Op0 = N->getOperand(0);
30790 SDValue Op1 = N->getOperand(1);
30792 // Try to synthesize horizontal adds from adds of shuffles.
30793 if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
30794 (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
30795 isHorizontalBinOp(Op0, Op1, true))
30796 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
30798 return OptimizeConditionalInDecrement(N, DAG);
30801 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
30802 const X86Subtarget &Subtarget) {
30803 SDValue Op0 = N->getOperand(0);
30804 SDValue Op1 = N->getOperand(1);
30806 // X86 can't encode an immediate LHS of a sub. See if we can push the
30807 // negation into a preceding instruction.
30808 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
30809 // If the RHS of the sub is a XOR with one use and a constant, invert the
30810 // immediate. Then add one to the LHS of the sub so we can turn
30811 // X-Y -> X+~Y+1, saving one register.
30812 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
30813 isa<ConstantSDNode>(Op1.getOperand(1))) {
30814 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
30815 EVT VT = Op0.getValueType();
30816 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
30818 DAG.getConstant(~XorC, SDLoc(Op1), VT));
30819 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
30820 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
30824 // Try to synthesize horizontal adds from adds of shuffles.
30825 EVT VT = N->getValueType(0);
30826 if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
30827 (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
30828 isHorizontalBinOp(Op0, Op1, true))
30829 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
30831 return OptimizeConditionalInDecrement(N, DAG);
30834 static SDValue combineVZext(SDNode *N, SelectionDAG &DAG,
30835 TargetLowering::DAGCombinerInfo &DCI,
30836 const X86Subtarget &Subtarget) {
30838 MVT VT = N->getSimpleValueType(0);
30839 MVT SVT = VT.getVectorElementType();
30840 SDValue Op = N->getOperand(0);
30841 MVT OpVT = Op.getSimpleValueType();
30842 MVT OpEltVT = OpVT.getVectorElementType();
30843 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
30845 // Perform any constant folding.
30846 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
30847 SmallVector<SDValue, 4> Vals;
30848 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
30849 SDValue OpElt = Op.getOperand(i);
30850 if (OpElt.getOpcode() == ISD::UNDEF) {
30851 Vals.push_back(DAG.getUNDEF(SVT));
30854 APInt Cst = cast<ConstantSDNode>(OpElt.getNode())->getAPIntValue();
30855 assert(Cst.getBitWidth() == OpEltVT.getSizeInBits());
30856 Cst = Cst.zextOrTrunc(SVT.getSizeInBits());
30857 Vals.push_back(DAG.getConstant(Cst, DL, SVT));
30859 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Vals);
30862 // (vzext (bitcast (vzext (x)) -> (vzext x)
30863 SDValue V = peekThroughBitcasts(Op);
30864 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
30865 MVT InnerVT = V.getSimpleValueType();
30866 MVT InnerEltVT = InnerVT.getVectorElementType();
30868 // If the element sizes match exactly, we can just do one larger vzext. This
30869 // is always an exact type match as vzext operates on integer types.
30870 if (OpEltVT == InnerEltVT) {
30871 assert(OpVT == InnerVT && "Types must match for vzext!");
30872 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
30875 // The only other way we can combine them is if only a single element of the
30876 // inner vzext is used in the input to the outer vzext.
30877 if (InnerEltVT.getSizeInBits() < InputBits)
30880 // In this case, the inner vzext is completely dead because we're going to
30881 // only look at bits inside of the low element. Just do the outer vzext on
30882 // a bitcast of the input to the inner.
30883 return DAG.getNode(X86ISD::VZEXT, DL, VT, DAG.getBitcast(OpVT, V));
30886 // Check if we can bypass extracting and re-inserting an element of an input
30887 // vector. Essentially:
30888 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
30889 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
30890 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
30891 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
30892 SDValue ExtractedV = V.getOperand(0);
30893 SDValue OrigV = ExtractedV.getOperand(0);
30894 if (isNullConstant(ExtractedV.getOperand(1))) {
30895 MVT OrigVT = OrigV.getSimpleValueType();
30896 // Extract a subvector if necessary...
30897 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
30898 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
30899 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
30900 OrigVT.getVectorNumElements() / Ratio);
30901 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
30902 DAG.getIntPtrConstant(0, DL));
30904 Op = DAG.getBitcast(OpVT, OrigV);
30905 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
30912 /// Canonicalize (LSUB p, 1) -> (LADD p, -1).
30913 static SDValue combineLockSub(SDNode *N, SelectionDAG &DAG,
30914 const X86Subtarget &Subtarget) {
30915 SDValue Chain = N->getOperand(0);
30916 SDValue LHS = N->getOperand(1);
30917 SDValue RHS = N->getOperand(2);
30918 MVT VT = RHS.getSimpleValueType();
30921 auto *C = dyn_cast<ConstantSDNode>(RHS);
30922 if (!C || C->getZExtValue() != 1)
30925 RHS = DAG.getConstant(-1, DL, VT);
30926 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
30927 return DAG.getMemIntrinsicNode(X86ISD::LADD, DL,
30928 DAG.getVTList(MVT::i32, MVT::Other),
30929 {Chain, LHS, RHS}, VT, MMO);
30932 // TEST (AND a, b) ,(AND a, b) -> TEST a, b
30933 static SDValue combineTestM(SDNode *N, SelectionDAG &DAG) {
30934 SDValue Op0 = N->getOperand(0);
30935 SDValue Op1 = N->getOperand(1);
30937 if (Op0 != Op1 || Op1->getOpcode() != ISD::AND)
30940 EVT VT = N->getValueType(0);
30943 return DAG.getNode(X86ISD::TESTM, DL, VT,
30944 Op0->getOperand(0), Op0->getOperand(1));
30947 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
30948 const X86Subtarget &Subtarget) {
30949 MVT VT = N->getSimpleValueType(0);
30952 if (N->getOperand(0) == N->getOperand(1)) {
30953 if (N->getOpcode() == X86ISD::PCMPEQ)
30954 return getOnesVector(VT, Subtarget, DAG, DL);
30955 if (N->getOpcode() == X86ISD::PCMPGT)
30956 return getZeroVector(VT, Subtarget, DAG, DL);
30963 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
30964 DAGCombinerInfo &DCI) const {
30965 SelectionDAG &DAG = DCI.DAG;
30966 switch (N->getOpcode()) {
30968 case ISD::EXTRACT_VECTOR_ELT: return combineExtractVectorElt(N, DAG, DCI);
30971 case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
30972 case ISD::BITCAST: return combineBitcast(N, DAG, Subtarget);
30973 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
30974 case ISD::ADD: return combineAdd(N, DAG, Subtarget);
30975 case ISD::SUB: return combineSub(N, DAG, Subtarget);
30976 case X86ISD::ADC: return combineADC(N, DAG, DCI);
30977 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
30980 case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget);
30981 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
30982 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
30983 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
30984 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
30985 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
30986 case ISD::STORE: return combineStore(N, DAG, Subtarget);
30987 case ISD::MSTORE: return combineMaskedStore(N, DAG, Subtarget);
30988 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
30989 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
30991 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
30992 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
30993 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
30995 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
30997 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
30999 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
31000 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
31001 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
31002 case X86ISD::BT: return combineBT(N, DAG, DCI);
31003 case X86ISD::VZEXT_MOVL: return combineVZextMovl(N, DAG);
31004 case ISD::ANY_EXTEND:
31005 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
31006 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
31007 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
31008 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
31009 case X86ISD::SETCC: return combineX86SetCC(N, DAG, DCI, Subtarget);
31010 case X86ISD::BRCOND: return combineBrCond(N, DAG, DCI, Subtarget);
31011 case X86ISD::VZEXT: return combineVZext(N, DAG, DCI, Subtarget);
31012 case X86ISD::SHUFP: // Handle all target specific shuffles
31013 case X86ISD::INSERTPS:
31014 case X86ISD::PALIGNR:
31015 case X86ISD::VSHLDQ:
31016 case X86ISD::VSRLDQ:
31017 case X86ISD::BLENDI:
31018 case X86ISD::UNPCKH:
31019 case X86ISD::UNPCKL:
31020 case X86ISD::MOVHLPS:
31021 case X86ISD::MOVLHPS:
31022 case X86ISD::PSHUFB:
31023 case X86ISD::PSHUFD:
31024 case X86ISD::PSHUFHW:
31025 case X86ISD::PSHUFLW:
31026 case X86ISD::MOVSHDUP:
31027 case X86ISD::MOVSLDUP:
31028 case X86ISD::MOVDDUP:
31029 case X86ISD::MOVSS:
31030 case X86ISD::MOVSD:
31031 case X86ISD::VPPERM:
31032 case X86ISD::VPERMI:
31033 case X86ISD::VPERMV:
31034 case X86ISD::VPERMV3:
31035 case X86ISD::VPERMIL2:
31036 case X86ISD::VPERMILPI:
31037 case X86ISD::VPERMILPV:
31038 case X86ISD::VPERM2X128:
31039 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
31040 case ISD::FMA: return combineFMA(N, DAG, Subtarget);
31042 case ISD::MSCATTER: return combineGatherScatter(N, DAG);
31043 case X86ISD::LSUB: return combineLockSub(N, DAG, Subtarget);
31044 case X86ISD::TESTM: return combineTestM(N, DAG);
31045 case X86ISD::PCMPEQ:
31046 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
31052 /// Return true if the target has native support for the specified value type
31053 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
31054 /// i16 is legal, but undesirable since i16 instruction encodings are longer and
31055 /// some i16 instructions are slow.
31056 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
31057 if (!isTypeLegal(VT))
31059 if (VT != MVT::i16)
31066 case ISD::SIGN_EXTEND:
31067 case ISD::ZERO_EXTEND:
31068 case ISD::ANY_EXTEND:
31081 /// This function checks if any of the users of EFLAGS copies the EFLAGS. We
31082 /// know that the code that lowers COPY of EFLAGS has to use the stack, and if
31083 /// we don't adjust the stack we clobber the first frame index.
31084 /// See X86InstrInfo::copyPhysReg.
31085 bool X86TargetLowering::hasCopyImplyingStackAdjustment(
31086 MachineFunction *MF) const {
31087 const MachineRegisterInfo &MRI = MF->getRegInfo();
31089 return any_of(MRI.reg_instructions(X86::EFLAGS),
31090 [](const MachineInstr &RI) { return RI.isCopy(); });
31093 /// This method query the target whether it is beneficial for dag combiner to
31094 /// promote the specified node. If true, it should return the desired promotion
31095 /// type by reference.
31096 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
31097 EVT VT = Op.getValueType();
31098 if (VT != MVT::i16)
31101 bool Promote = false;
31102 bool Commute = false;
31103 switch (Op.getOpcode()) {
31105 case ISD::SIGN_EXTEND:
31106 case ISD::ZERO_EXTEND:
31107 case ISD::ANY_EXTEND:
31112 SDValue N0 = Op.getOperand(0);
31113 // Look out for (store (shl (load), x)).
31114 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
31127 SDValue N0 = Op.getOperand(0);
31128 SDValue N1 = Op.getOperand(1);
31129 if (!Commute && MayFoldLoad(N1))
31131 // Avoid disabling potential load folding opportunities.
31132 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
31134 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
31144 //===----------------------------------------------------------------------===//
31145 // X86 Inline Assembly Support
31146 //===----------------------------------------------------------------------===//
31148 // Helper to match a string separated by whitespace.
31149 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
31150 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
31152 for (StringRef Piece : Pieces) {
31153 if (!S.startswith(Piece)) // Check if the piece matches.
31156 S = S.substr(Piece.size());
31157 StringRef::size_type Pos = S.find_first_not_of(" \t");
31158 if (Pos == 0) // We matched a prefix.
31167 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
31169 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
31170 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
31171 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
31172 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
31174 if (AsmPieces.size() == 3)
31176 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
31183 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
31184 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
31186 const std::string &AsmStr = IA->getAsmString();
31188 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
31189 if (!Ty || Ty->getBitWidth() % 16 != 0)
31192 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
31193 SmallVector<StringRef, 4> AsmPieces;
31194 SplitString(AsmStr, AsmPieces, ";\n");
31196 switch (AsmPieces.size()) {
31197 default: return false;
31199 // FIXME: this should verify that we are targeting a 486 or better. If not,
31200 // we will turn this bswap into something that will be lowered to logical
31201 // ops instead of emitting the bswap asm. For now, we don't support 486 or
31202 // lower so don't worry about this.
31204 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
31205 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
31206 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
31207 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
31208 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
31209 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
31210 // No need to check constraints, nothing other than the equivalent of
31211 // "=r,0" would be valid here.
31212 return IntrinsicLowering::LowerToByteSwap(CI);
31215 // rorw $$8, ${0:w} --> llvm.bswap.i16
31216 if (CI->getType()->isIntegerTy(16) &&
31217 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
31218 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
31219 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
31221 StringRef ConstraintsStr = IA->getConstraintString();
31222 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
31223 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
31224 if (clobbersFlagRegisters(AsmPieces))
31225 return IntrinsicLowering::LowerToByteSwap(CI);
31229 if (CI->getType()->isIntegerTy(32) &&
31230 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
31231 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
31232 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
31233 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
31235 StringRef ConstraintsStr = IA->getConstraintString();
31236 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
31237 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
31238 if (clobbersFlagRegisters(AsmPieces))
31239 return IntrinsicLowering::LowerToByteSwap(CI);
31242 if (CI->getType()->isIntegerTy(64)) {
31243 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
31244 if (Constraints.size() >= 2 &&
31245 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
31246 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
31247 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
31248 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
31249 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
31250 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
31251 return IntrinsicLowering::LowerToByteSwap(CI);
31259 /// Given a constraint letter, return the type of constraint for this target.
31260 X86TargetLowering::ConstraintType
31261 X86TargetLowering::getConstraintType(StringRef Constraint) const {
31262 if (Constraint.size() == 1) {
31263 switch (Constraint[0]) {
31274 return C_RegisterClass;
31298 return TargetLowering::getConstraintType(Constraint);
31301 /// Examine constraint type and operand type and determine a weight value.
31302 /// This object must already have been set up with the operand type
31303 /// and the current alternative constraint selected.
31304 TargetLowering::ConstraintWeight
31305 X86TargetLowering::getSingleConstraintMatchWeight(
31306 AsmOperandInfo &info, const char *constraint) const {
31307 ConstraintWeight weight = CW_Invalid;
31308 Value *CallOperandVal = info.CallOperandVal;
31309 // If we don't have a value, we can't do a match,
31310 // but allow it at the lowest weight.
31311 if (!CallOperandVal)
31313 Type *type = CallOperandVal->getType();
31314 // Look at the constraint type.
31315 switch (*constraint) {
31317 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
31328 if (CallOperandVal->getType()->isIntegerTy())
31329 weight = CW_SpecificReg;
31334 if (type->isFloatingPointTy())
31335 weight = CW_SpecificReg;
31338 if (type->isX86_MMXTy() && Subtarget.hasMMX())
31339 weight = CW_SpecificReg;
31343 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
31344 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasFp256()))
31345 weight = CW_Register;
31348 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
31349 if (C->getZExtValue() <= 31)
31350 weight = CW_Constant;
31354 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31355 if (C->getZExtValue() <= 63)
31356 weight = CW_Constant;
31360 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31361 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
31362 weight = CW_Constant;
31366 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31367 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
31368 weight = CW_Constant;
31372 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31373 if (C->getZExtValue() <= 3)
31374 weight = CW_Constant;
31378 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31379 if (C->getZExtValue() <= 0xff)
31380 weight = CW_Constant;
31385 if (isa<ConstantFP>(CallOperandVal)) {
31386 weight = CW_Constant;
31390 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31391 if ((C->getSExtValue() >= -0x80000000LL) &&
31392 (C->getSExtValue() <= 0x7fffffffLL))
31393 weight = CW_Constant;
31397 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
31398 if (C->getZExtValue() <= 0xffffffff)
31399 weight = CW_Constant;
31406 /// Try to replace an X constraint, which matches anything, with another that
31407 /// has more specific requirements based on the type of the corresponding
31409 const char *X86TargetLowering::
31410 LowerXConstraint(EVT ConstraintVT) const {
31411 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
31412 // 'f' like normal targets.
31413 if (ConstraintVT.isFloatingPoint()) {
31414 if (Subtarget.hasSSE2())
31416 if (Subtarget.hasSSE1())
31420 return TargetLowering::LowerXConstraint(ConstraintVT);
31423 /// Lower the specified operand into the Ops vector.
31424 /// If it is invalid, don't add anything to Ops.
31425 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
31426 std::string &Constraint,
31427 std::vector<SDValue>&Ops,
31428 SelectionDAG &DAG) const {
31431 // Only support length 1 constraints for now.
31432 if (Constraint.length() > 1) return;
31434 char ConstraintLetter = Constraint[0];
31435 switch (ConstraintLetter) {
31438 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31439 if (C->getZExtValue() <= 31) {
31440 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31441 Op.getValueType());
31447 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31448 if (C->getZExtValue() <= 63) {
31449 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31450 Op.getValueType());
31456 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31457 if (isInt<8>(C->getSExtValue())) {
31458 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31459 Op.getValueType());
31465 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31466 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
31467 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
31468 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
31469 Op.getValueType());
31475 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31476 if (C->getZExtValue() <= 3) {
31477 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31478 Op.getValueType());
31484 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31485 if (C->getZExtValue() <= 255) {
31486 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31487 Op.getValueType());
31493 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31494 if (C->getZExtValue() <= 127) {
31495 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31496 Op.getValueType());
31502 // 32-bit signed value
31503 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31504 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
31505 C->getSExtValue())) {
31506 // Widen to 64 bits here to get it sign extended.
31507 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
31510 // FIXME gcc accepts some relocatable values here too, but only in certain
31511 // memory models; it's complicated.
31516 // 32-bit unsigned value
31517 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
31518 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
31519 C->getZExtValue())) {
31520 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
31521 Op.getValueType());
31525 // FIXME gcc accepts some relocatable values here too, but only in certain
31526 // memory models; it's complicated.
31530 // Literal immediates are always ok.
31531 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
31532 // Widen to 64 bits here to get it sign extended.
31533 Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64);
31537 // In any sort of PIC mode addresses need to be computed at runtime by
31538 // adding in a register or some sort of table lookup. These can't
31539 // be used as immediates.
31540 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
31543 // If we are in non-pic codegen mode, we allow the address of a global (with
31544 // an optional displacement) to be used with 'i'.
31545 GlobalAddressSDNode *GA = nullptr;
31546 int64_t Offset = 0;
31548 // Match either (GA), (GA+C), (GA+C1+C2), etc.
31550 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
31551 Offset += GA->getOffset();
31553 } else if (Op.getOpcode() == ISD::ADD) {
31554 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
31555 Offset += C->getZExtValue();
31556 Op = Op.getOperand(0);
31559 } else if (Op.getOpcode() == ISD::SUB) {
31560 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
31561 Offset += -C->getZExtValue();
31562 Op = Op.getOperand(0);
31567 // Otherwise, this isn't something we can handle, reject it.
31571 const GlobalValue *GV = GA->getGlobal();
31572 // If we require an extra load to get this address, as in PIC mode, we
31573 // can't accept it.
31574 if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV)))
31577 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
31578 GA->getValueType(0), Offset);
31583 if (Result.getNode()) {
31584 Ops.push_back(Result);
31587 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
31590 /// Check if \p RC is a general purpose register class.
31591 /// I.e., GR* or one of their variant.
31592 static bool isGRClass(const TargetRegisterClass &RC) {
31593 switch (RC.getID()) {
31594 case X86::GR8RegClassID:
31595 case X86::GR8_ABCD_LRegClassID:
31596 case X86::GR8_ABCD_HRegClassID:
31597 case X86::GR8_NOREXRegClassID:
31598 case X86::GR16RegClassID:
31599 case X86::GR16_ABCDRegClassID:
31600 case X86::GR16_NOREXRegClassID:
31601 case X86::GR32RegClassID:
31602 case X86::GR32_ABCDRegClassID:
31603 case X86::GR32_TCRegClassID:
31604 case X86::GR32_NOREXRegClassID:
31605 case X86::GR32_NOAXRegClassID:
31606 case X86::GR32_NOSPRegClassID:
31607 case X86::GR32_NOREX_NOSPRegClassID:
31608 case X86::GR32_ADRegClassID:
31609 case X86::GR64RegClassID:
31610 case X86::GR64_ABCDRegClassID:
31611 case X86::GR64_TCRegClassID:
31612 case X86::GR64_TCW64RegClassID:
31613 case X86::GR64_NOREXRegClassID:
31614 case X86::GR64_NOSPRegClassID:
31615 case X86::GR64_NOREX_NOSPRegClassID:
31616 case X86::LOW32_ADDR_ACCESSRegClassID:
31617 case X86::LOW32_ADDR_ACCESS_RBPRegClassID:
31624 /// Check if \p RC is a vector register class.
31625 /// I.e., FR* / VR* or one of their variant.
31626 static bool isFRClass(const TargetRegisterClass &RC) {
31627 switch (RC.getID()) {
31628 case X86::FR32RegClassID:
31629 case X86::FR32XRegClassID:
31630 case X86::FR64RegClassID:
31631 case X86::FR64XRegClassID:
31632 case X86::FR128RegClassID:
31633 case X86::VR64RegClassID:
31634 case X86::VR128RegClassID:
31635 case X86::VR128LRegClassID:
31636 case X86::VR128HRegClassID:
31637 case X86::VR128XRegClassID:
31638 case X86::VR256RegClassID:
31639 case X86::VR256LRegClassID:
31640 case X86::VR256HRegClassID:
31641 case X86::VR256XRegClassID:
31642 case X86::VR512RegClassID:
31649 std::pair<unsigned, const TargetRegisterClass *>
31650 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
31651 StringRef Constraint,
31653 // First, see if this is a constraint that directly corresponds to an LLVM
31655 if (Constraint.size() == 1) {
31656 // GCC Constraint Letters
31657 switch (Constraint[0]) {
31659 // TODO: Slight differences here in allocation order and leaving
31660 // RIP in the class. Do they matter any more here than they do
31661 // in the normal allocation?
31662 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
31663 if (Subtarget.is64Bit()) {
31664 if (VT == MVT::i32 || VT == MVT::f32)
31665 return std::make_pair(0U, &X86::GR32RegClass);
31666 if (VT == MVT::i16)
31667 return std::make_pair(0U, &X86::GR16RegClass);
31668 if (VT == MVT::i8 || VT == MVT::i1)
31669 return std::make_pair(0U, &X86::GR8RegClass);
31670 if (VT == MVT::i64 || VT == MVT::f64)
31671 return std::make_pair(0U, &X86::GR64RegClass);
31674 // 32-bit fallthrough
31675 case 'Q': // Q_REGS
31676 if (VT == MVT::i32 || VT == MVT::f32)
31677 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
31678 if (VT == MVT::i16)
31679 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
31680 if (VT == MVT::i8 || VT == MVT::i1)
31681 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
31682 if (VT == MVT::i64)
31683 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
31685 case 'r': // GENERAL_REGS
31686 case 'l': // INDEX_REGS
31687 if (VT == MVT::i8 || VT == MVT::i1)
31688 return std::make_pair(0U, &X86::GR8RegClass);
31689 if (VT == MVT::i16)
31690 return std::make_pair(0U, &X86::GR16RegClass);
31691 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
31692 return std::make_pair(0U, &X86::GR32RegClass);
31693 return std::make_pair(0U, &X86::GR64RegClass);
31694 case 'R': // LEGACY_REGS
31695 if (VT == MVT::i8 || VT == MVT::i1)
31696 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
31697 if (VT == MVT::i16)
31698 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
31699 if (VT == MVT::i32 || !Subtarget.is64Bit())
31700 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
31701 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
31702 case 'f': // FP Stack registers.
31703 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
31704 // value to the correct fpstack register class.
31705 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
31706 return std::make_pair(0U, &X86::RFP32RegClass);
31707 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
31708 return std::make_pair(0U, &X86::RFP64RegClass);
31709 return std::make_pair(0U, &X86::RFP80RegClass);
31710 case 'y': // MMX_REGS if MMX allowed.
31711 if (!Subtarget.hasMMX()) break;
31712 return std::make_pair(0U, &X86::VR64RegClass);
31713 case 'Y': // SSE_REGS if SSE2 allowed
31714 if (!Subtarget.hasSSE2()) break;
31716 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
31717 if (!Subtarget.hasSSE1()) break;
31719 switch (VT.SimpleTy) {
31721 // Scalar SSE types.
31724 return std::make_pair(0U, &X86::FR32RegClass);
31727 return std::make_pair(0U, &X86::FR64RegClass);
31728 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
31736 return std::make_pair(0U, &X86::VR128RegClass);
31744 return std::make_pair(0U, &X86::VR256RegClass);
31749 return std::make_pair(0U, &X86::VR512RegClass);
31755 // Use the default implementation in TargetLowering to convert the register
31756 // constraint into a member of a register class.
31757 std::pair<unsigned, const TargetRegisterClass*> Res;
31758 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
31760 // Not found as a standard register?
31762 // Map st(0) -> st(7) -> ST0
31763 if (Constraint.size() == 7 && Constraint[0] == '{' &&
31764 tolower(Constraint[1]) == 's' &&
31765 tolower(Constraint[2]) == 't' &&
31766 Constraint[3] == '(' &&
31767 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
31768 Constraint[5] == ')' &&
31769 Constraint[6] == '}') {
31771 Res.first = X86::FP0+Constraint[4]-'0';
31772 Res.second = &X86::RFP80RegClass;
31776 // GCC allows "st(0)" to be called just plain "st".
31777 if (StringRef("{st}").equals_lower(Constraint)) {
31778 Res.first = X86::FP0;
31779 Res.second = &X86::RFP80RegClass;
31784 if (StringRef("{flags}").equals_lower(Constraint)) {
31785 Res.first = X86::EFLAGS;
31786 Res.second = &X86::CCRRegClass;
31790 // 'A' means EAX + EDX.
31791 if (Constraint == "A") {
31792 Res.first = X86::EAX;
31793 Res.second = &X86::GR32_ADRegClass;
31799 // Otherwise, check to see if this is a register class of the wrong value
31800 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
31801 // turn into {ax},{dx}.
31802 // MVT::Other is used to specify clobber names.
31803 if (Res.second->hasType(VT) || VT == MVT::Other)
31804 return Res; // Correct type already, nothing to do.
31806 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
31807 // return "eax". This should even work for things like getting 64bit integer
31808 // registers when given an f64 type.
31809 const TargetRegisterClass *Class = Res.second;
31810 // The generic code will match the first register class that contains the
31811 // given register. Thus, based on the ordering of the tablegened file,
31812 // the "plain" GR classes might not come first.
31813 // Therefore, use a helper method.
31814 if (isGRClass(*Class)) {
31815 unsigned Size = VT.getSizeInBits();
31816 if (Size == 1) Size = 8;
31817 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
31819 Res.first = DestReg;
31820 Res.second = Size == 8 ? &X86::GR8RegClass
31821 : Size == 16 ? &X86::GR16RegClass
31822 : Size == 32 ? &X86::GR32RegClass
31823 : &X86::GR64RegClass;
31824 assert(Res.second->contains(Res.first) && "Register in register class");
31826 // No register found/type mismatch.
31828 Res.second = nullptr;
31830 } else if (isFRClass(*Class)) {
31831 // Handle references to XMM physical registers that got mapped into the
31832 // wrong class. This can happen with constraints like {xmm0} where the
31833 // target independent register mapper will just pick the first match it can
31834 // find, ignoring the required type.
31836 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
31837 if (VT == MVT::f32 || VT == MVT::i32)
31838 Res.second = &X86::FR32RegClass;
31839 else if (VT == MVT::f64 || VT == MVT::i64)
31840 Res.second = &X86::FR64RegClass;
31841 else if (X86::VR128RegClass.hasType(VT))
31842 Res.second = &X86::VR128RegClass;
31843 else if (X86::VR256RegClass.hasType(VT))
31844 Res.second = &X86::VR256RegClass;
31845 else if (X86::VR512RegClass.hasType(VT))
31846 Res.second = &X86::VR512RegClass;
31848 // Type mismatch and not a clobber: Return an error;
31850 Res.second = nullptr;
31857 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
31858 const AddrMode &AM, Type *Ty,
31859 unsigned AS) const {
31860 // Scaling factors are not free at all.
31861 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
31862 // will take 2 allocations in the out of order engine instead of 1
31863 // for plain addressing mode, i.e. inst (reg1).
31865 // vaddps (%rsi,%drx), %ymm0, %ymm1
31866 // Requires two allocations (one for the load, one for the computation)
31868 // vaddps (%rsi), %ymm0, %ymm1
31869 // Requires just 1 allocation, i.e., freeing allocations for other operations
31870 // and having less micro operations to execute.
31872 // For some X86 architectures, this is even worse because for instance for
31873 // stores, the complex addressing mode forces the instruction to use the
31874 // "load" ports instead of the dedicated "store" port.
31875 // E.g., on Haswell:
31876 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
31877 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
31878 if (isLegalAddressingMode(DL, AM, Ty, AS))
31879 // Scale represents reg2 * scale, thus account for 1
31880 // as soon as we use a second register.
31881 return AM.Scale != 0;
31885 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
31886 // Integer division on x86 is expensive. However, when aggressively optimizing
31887 // for code size, we prefer to use a div instruction, as it is usually smaller
31888 // than the alternative sequence.
31889 // The exception to this is vector division. Since x86 doesn't have vector
31890 // integer division, leaving the division as-is is a loss even in terms of
31891 // size, because it will have to be scalarized, while the alternative code
31892 // sequence can be performed in vector form.
31893 bool OptSize = Attr.hasAttribute(AttributeSet::FunctionIndex,
31894 Attribute::MinSize);
31895 return OptSize && !VT.isVector();
31898 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
31899 if (!Subtarget.is64Bit())
31902 // Update IsSplitCSR in X86MachineFunctionInfo.
31903 X86MachineFunctionInfo *AFI =
31904 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
31905 AFI->setIsSplitCSR(true);
31908 void X86TargetLowering::insertCopiesSplitCSR(
31909 MachineBasicBlock *Entry,
31910 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
31911 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
31912 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
31916 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31917 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
31918 MachineBasicBlock::iterator MBBI = Entry->begin();
31919 for (const MCPhysReg *I = IStart; *I; ++I) {
31920 const TargetRegisterClass *RC = nullptr;
31921 if (X86::GR64RegClass.contains(*I))
31922 RC = &X86::GR64RegClass;
31924 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
31926 unsigned NewVR = MRI->createVirtualRegister(RC);
31927 // Create copy from CSR to a virtual register.
31928 // FIXME: this currently does not emit CFI pseudo-instructions, it works
31929 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
31930 // nounwind. If we want to generalize this later, we may need to emit
31931 // CFI pseudo-instructions.
31932 assert(Entry->getParent()->getFunction()->hasFnAttribute(
31933 Attribute::NoUnwind) &&
31934 "Function should be nounwind in insertCopiesSplitCSR!");
31935 Entry->addLiveIn(*I);
31936 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
31939 // Insert the copy-back instructions right before the terminator.
31940 for (auto *Exit : Exits)
31941 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
31942 TII->get(TargetOpcode::COPY), *I)