1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/IntrinsicLowering.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/CodeGen/TargetLowering.h"
41 #include "llvm/CodeGen/WinEHFuncInfo.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/DiagnosticInfo.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/MC/MCAsmInfo.h"
52 #include "llvm/MC/MCContext.h"
53 #include "llvm/MC/MCExpr.h"
54 #include "llvm/MC/MCSymbol.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/KnownBits.h"
59 #include "llvm/Support/MathExtras.h"
60 #include "llvm/Target/TargetOptions.h"
67 #define DEBUG_TYPE "x86-isel"
69 STATISTIC(NumTailCalls, "Number of tail calls");
71 static cl::opt<int> ExperimentalPrefLoopAlignment(
72 "x86-experimental-pref-loop-alignment", cl::init(4),
74 "Sets the preferable loop alignment for experiments (as log2 bytes)"
75 "(the last x86-experimental-pref-loop-alignment bits"
76 " of the loop header PC will be 0)."),
79 static cl::opt<bool> MulConstantOptimization(
80 "mul-constant-optimization", cl::init(true),
81 cl::desc("Replace 'mul x, Const' with more effective instructions like "
85 static cl::opt<bool> ExperimentalUnorderedISEL(
86 "x86-experimental-unordered-atomic-isel", cl::init(false),
87 cl::desc("Use LoadSDNode and StoreSDNode instead of "
88 "AtomicSDNode for unordered atomic loads and "
89 "stores respectively."),
92 /// Call this when the user attempts to do something unsupported, like
93 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
94 /// report_fatal_error, so calling code should attempt to recover without
96 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
98 MachineFunction &MF = DAG.getMachineFunction();
99 DAG.getContext()->diagnose(
100 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
103 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
104 const X86Subtarget &STI)
105 : TargetLowering(TM), Subtarget(STI) {
106 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
107 X86ScalarSSEf64 = Subtarget.hasSSE2();
108 X86ScalarSSEf32 = Subtarget.hasSSE1();
109 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
111 // Set up the TargetLowering object.
113 // X86 is weird. It always uses i8 for shift amounts and setcc results.
114 setBooleanContents(ZeroOrOneBooleanContent);
115 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
116 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
118 // For 64-bit, since we have so many registers, use the ILP scheduler.
119 // For 32-bit, use the register pressure specific scheduling.
120 // For Atom, always use ILP scheduling.
121 if (Subtarget.isAtom())
122 setSchedulingPreference(Sched::ILP);
123 else if (Subtarget.is64Bit())
124 setSchedulingPreference(Sched::ILP);
126 setSchedulingPreference(Sched::RegPressure);
127 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
128 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
130 // Bypass expensive divides and use cheaper ones.
131 if (TM.getOptLevel() >= CodeGenOpt::Default) {
132 if (Subtarget.hasSlowDivide32())
133 addBypassSlowDiv(32, 8);
134 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
135 addBypassSlowDiv(64, 32);
138 if (Subtarget.isTargetWindowsMSVC() ||
139 Subtarget.isTargetWindowsItanium()) {
140 // Setup Windows compiler runtime calls.
141 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
142 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
143 setLibcallName(RTLIB::SREM_I64, "_allrem");
144 setLibcallName(RTLIB::UREM_I64, "_aullrem");
145 setLibcallName(RTLIB::MUL_I64, "_allmul");
146 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
147 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
148 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
149 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
150 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
153 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
154 // MSVCRT doesn't have powi; fall back to pow
155 setLibcallName(RTLIB::POWI_F32, nullptr);
156 setLibcallName(RTLIB::POWI_F64, nullptr);
159 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
160 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
161 // FIXME: Should we be limiting the atomic size on other configs? Default is
163 if (!Subtarget.hasCmpxchg8b())
164 setMaxAtomicSizeInBitsSupported(32);
166 // Set up the register classes.
167 addRegisterClass(MVT::i8, &X86::GR8RegClass);
168 addRegisterClass(MVT::i16, &X86::GR16RegClass);
169 addRegisterClass(MVT::i32, &X86::GR32RegClass);
170 if (Subtarget.is64Bit())
171 addRegisterClass(MVT::i64, &X86::GR64RegClass);
173 for (MVT VT : MVT::integer_valuetypes())
174 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
176 // We don't accept any truncstore of integer registers.
177 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
178 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
179 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
180 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
181 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
182 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
184 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
186 // SETOEQ and SETUNE require checking two conditions.
187 for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
188 setCondCodeAction(ISD::SETOEQ, VT, Expand);
189 setCondCodeAction(ISD::SETUNE, VT, Expand);
193 if (Subtarget.hasCMov()) {
194 setOperationAction(ISD::ABS , MVT::i16 , Custom);
195 setOperationAction(ISD::ABS , MVT::i32 , Custom);
197 setOperationAction(ISD::ABS , MVT::i64 , Custom);
200 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
201 // For slow shld targets we only lower for code size.
202 LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
204 setOperationAction(ShiftOp , MVT::i8 , Custom);
205 setOperationAction(ShiftOp , MVT::i16 , Custom);
206 setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction);
207 if (Subtarget.is64Bit())
208 setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction);
211 if (!Subtarget.useSoftFloat()) {
212 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
214 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
215 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
216 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
217 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
218 // We have an algorithm for SSE2, and we turn this into a 64-bit
219 // FILD or VCVTUSI2SS/SD for other targets.
220 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
221 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
222 // We have an algorithm for SSE2->double, and we turn this into a
223 // 64-bit FILD followed by conditional FADD for other targets.
224 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
225 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
227 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
229 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
230 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
231 // SSE has no i16 to fp conversion, only i32. We promote in the handler
232 // to allow f80 to use i16 and f64 to use i16 with sse1 only
233 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
234 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
235 // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
236 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
237 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
238 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
239 // are Legal, f80 is custom lowered.
240 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
241 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
243 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
245 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
246 // FIXME: This doesn't generate invalid exception when it should. PR44019.
247 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
248 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
249 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
250 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
251 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
252 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
253 // are Legal, f80 is custom lowered.
254 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
255 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
257 // Handle FP_TO_UINT by promoting the destination to a larger signed
259 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
260 // FIXME: This doesn't generate invalid exception when it should. PR44019.
261 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
262 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
263 // FIXME: This doesn't generate invalid exception when it should. PR44019.
264 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
265 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
266 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
267 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
268 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
270 setOperationAction(ISD::LRINT, MVT::f32, Custom);
271 setOperationAction(ISD::LRINT, MVT::f64, Custom);
272 setOperationAction(ISD::LLRINT, MVT::f32, Custom);
273 setOperationAction(ISD::LLRINT, MVT::f64, Custom);
275 if (!Subtarget.is64Bit()) {
276 setOperationAction(ISD::LRINT, MVT::i64, Custom);
277 setOperationAction(ISD::LLRINT, MVT::i64, Custom);
281 // Handle address space casts between mixed sized pointers.
282 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
283 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
285 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
286 if (!X86ScalarSSEf64) {
287 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
288 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
289 if (Subtarget.is64Bit()) {
290 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
291 // Without SSE, i64->f64 goes through memory.
292 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
294 } else if (!Subtarget.is64Bit())
295 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
297 // Scalar integer divide and remainder are lowered to use operations that
298 // produce two results, to match the available instructions. This exposes
299 // the two-result form to trivial CSE, which is able to combine x/y and x%y
300 // into a single instruction.
302 // Scalar integer multiply-high is also lowered to use two-result
303 // operations, to match the available instructions. However, plain multiply
304 // (low) operations are left as Legal, as there are single-result
305 // instructions for this in x86. Using the two-result multiply instructions
306 // when both high and low results are needed must be arranged by dagcombine.
307 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
308 setOperationAction(ISD::MULHS, VT, Expand);
309 setOperationAction(ISD::MULHU, VT, Expand);
310 setOperationAction(ISD::SDIV, VT, Expand);
311 setOperationAction(ISD::UDIV, VT, Expand);
312 setOperationAction(ISD::SREM, VT, Expand);
313 setOperationAction(ISD::UREM, VT, Expand);
316 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
317 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
318 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
319 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
320 setOperationAction(ISD::BR_CC, VT, Expand);
321 setOperationAction(ISD::SELECT_CC, VT, Expand);
323 if (Subtarget.is64Bit())
324 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
325 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
327 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
329 setOperationAction(ISD::FREM , MVT::f32 , Expand);
330 setOperationAction(ISD::FREM , MVT::f64 , Expand);
331 setOperationAction(ISD::FREM , MVT::f80 , Expand);
332 setOperationAction(ISD::FREM , MVT::f128 , Expand);
333 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
335 // Promote the i8 variants and force them on up to i32 which has a shorter
337 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
338 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
339 if (!Subtarget.hasBMI()) {
340 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
341 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
342 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
343 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
344 if (Subtarget.is64Bit()) {
345 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
346 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
350 if (Subtarget.hasLZCNT()) {
351 // When promoting the i8 variants, force them to i32 for a shorter
353 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
354 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
356 for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
357 if (VT == MVT::i64 && !Subtarget.is64Bit())
359 setOperationAction(ISD::CTLZ , VT, Custom);
360 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
364 for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
365 ISD::STRICT_FP_TO_FP16}) {
366 // Special handling for half-precision floating point conversions.
367 // If we don't have F16C support, then lower half float conversions
368 // into library calls.
371 (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
372 // There's never any support for operations beyond MVT::f32.
373 setOperationAction(Op, MVT::f64, Expand);
374 setOperationAction(Op, MVT::f80, Expand);
375 setOperationAction(Op, MVT::f128, Expand);
378 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
379 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
380 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
381 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
382 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
383 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
384 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
385 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
387 if (Subtarget.hasPOPCNT()) {
388 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
390 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
391 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
392 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
393 if (Subtarget.is64Bit())
394 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
396 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
399 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
401 if (!Subtarget.hasMOVBE())
402 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
404 // X86 wants to expand cmov itself.
405 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
406 setOperationAction(ISD::SELECT, VT, Custom);
407 setOperationAction(ISD::SETCC, VT, Custom);
408 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
409 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
411 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
412 if (VT == MVT::i64 && !Subtarget.is64Bit())
414 setOperationAction(ISD::SELECT, VT, Custom);
415 setOperationAction(ISD::SETCC, VT, Custom);
418 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
419 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
420 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
422 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
423 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
424 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
425 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
426 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
427 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
428 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
429 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
432 for (auto VT : { MVT::i32, MVT::i64 }) {
433 if (VT == MVT::i64 && !Subtarget.is64Bit())
435 setOperationAction(ISD::ConstantPool , VT, Custom);
436 setOperationAction(ISD::JumpTable , VT, Custom);
437 setOperationAction(ISD::GlobalAddress , VT, Custom);
438 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
439 setOperationAction(ISD::ExternalSymbol , VT, Custom);
440 setOperationAction(ISD::BlockAddress , VT, Custom);
443 // 64-bit shl, sra, srl (iff 32-bit x86)
444 for (auto VT : { MVT::i32, MVT::i64 }) {
445 if (VT == MVT::i64 && !Subtarget.is64Bit())
447 setOperationAction(ISD::SHL_PARTS, VT, Custom);
448 setOperationAction(ISD::SRA_PARTS, VT, Custom);
449 setOperationAction(ISD::SRL_PARTS, VT, Custom);
452 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
453 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
455 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
457 // Expand certain atomics
458 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
459 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
460 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
461 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
462 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
463 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
464 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
465 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
468 if (!Subtarget.is64Bit())
469 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
471 if (Subtarget.hasCmpxchg16b()) {
472 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
475 // FIXME - use subtarget debug flags
476 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
477 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
478 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
479 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
482 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
483 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
485 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
486 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
488 setOperationAction(ISD::TRAP, MVT::Other, Legal);
489 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
491 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
492 setOperationAction(ISD::VASTART , MVT::Other, Custom);
493 setOperationAction(ISD::VAEND , MVT::Other, Expand);
494 bool Is64Bit = Subtarget.is64Bit();
495 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
496 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
498 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
499 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
501 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
503 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
504 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
505 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
507 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
508 // f32 and f64 use SSE.
509 // Set up the FP register classes.
510 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
511 : &X86::FR32RegClass);
512 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
513 : &X86::FR64RegClass);
515 // Disable f32->f64 extload as we can only generate this in one instruction
516 // under optsize. So its easier to pattern match (fpext (load)) for that
517 // case instead of needing to emit 2 instructions for extload in the
519 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
521 for (auto VT : { MVT::f32, MVT::f64 }) {
522 // Use ANDPD to simulate FABS.
523 setOperationAction(ISD::FABS, VT, Custom);
525 // Use XORP to simulate FNEG.
526 setOperationAction(ISD::FNEG, VT, Custom);
528 // Use ANDPD and ORPD to simulate FCOPYSIGN.
529 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
531 // These might be better off as horizontal vector ops.
532 setOperationAction(ISD::FADD, VT, Custom);
533 setOperationAction(ISD::FSUB, VT, Custom);
535 // We don't support sin/cos/fmod
536 setOperationAction(ISD::FSIN , VT, Expand);
537 setOperationAction(ISD::FCOS , VT, Expand);
538 setOperationAction(ISD::FSINCOS, VT, Expand);
541 // Lower this to MOVMSK plus an AND.
542 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
543 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
545 } else if (!Subtarget.useSoftFloat() && X86ScalarSSEf32 &&
546 (UseX87 || Is64Bit)) {
547 // Use SSE for f32, x87 for f64.
548 // Set up the FP register classes.
549 addRegisterClass(MVT::f32, &X86::FR32RegClass);
551 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
553 // Use ANDPS to simulate FABS.
554 setOperationAction(ISD::FABS , MVT::f32, Custom);
556 // Use XORP to simulate FNEG.
557 setOperationAction(ISD::FNEG , MVT::f32, Custom);
560 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
562 // Use ANDPS and ORPS to simulate FCOPYSIGN.
564 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
565 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
567 // We don't support sin/cos/fmod
568 setOperationAction(ISD::FSIN , MVT::f32, Expand);
569 setOperationAction(ISD::FCOS , MVT::f32, Expand);
570 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
573 // Always expand sin/cos functions even though x87 has an instruction.
574 setOperationAction(ISD::FSIN, MVT::f64, Expand);
575 setOperationAction(ISD::FCOS, MVT::f64, Expand);
576 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
579 // f32 and f64 in x87.
580 // Set up the FP register classes.
581 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
582 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
584 for (auto VT : { MVT::f32, MVT::f64 }) {
585 setOperationAction(ISD::UNDEF, VT, Expand);
586 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
588 // Always expand sin/cos functions even though x87 has an instruction.
589 setOperationAction(ISD::FSIN , VT, Expand);
590 setOperationAction(ISD::FCOS , VT, Expand);
591 setOperationAction(ISD::FSINCOS, VT, Expand);
595 // Expand FP32 immediates into loads from the stack, save special cases.
596 if (isTypeLegal(MVT::f32)) {
597 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
598 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
599 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
600 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
601 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
602 } else // SSE immediates.
603 addLegalFPImmediate(APFloat(+0.0f)); // xorps
605 // Expand FP64 immediates into loads from the stack, save special cases.
606 if (isTypeLegal(MVT::f64)) {
607 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
608 addLegalFPImmediate(APFloat(+0.0)); // FLD0
609 addLegalFPImmediate(APFloat(+1.0)); // FLD1
610 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
611 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
612 } else // SSE immediates.
613 addLegalFPImmediate(APFloat(+0.0)); // xorpd
615 // Handle constrained floating-point operations of scalar.
616 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
617 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
618 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
619 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
620 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
621 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
622 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
623 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
624 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
625 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
626 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
627 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
628 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
630 // We don't support FMA.
631 setOperationAction(ISD::FMA, MVT::f64, Expand);
632 setOperationAction(ISD::FMA, MVT::f32, Expand);
634 // f80 always uses X87.
636 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
637 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
638 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
640 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
641 addLegalFPImmediate(TmpFlt); // FLD0
643 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
646 APFloat TmpFlt2(+1.0);
647 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
649 addLegalFPImmediate(TmpFlt2); // FLD1
650 TmpFlt2.changeSign();
651 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
654 // Always expand sin/cos functions even though x87 has an instruction.
655 setOperationAction(ISD::FSIN , MVT::f80, Expand);
656 setOperationAction(ISD::FCOS , MVT::f80, Expand);
657 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
659 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
660 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
661 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
662 setOperationAction(ISD::FRINT, MVT::f80, Expand);
663 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
664 setOperationAction(ISD::FMA, MVT::f80, Expand);
665 setOperationAction(ISD::LROUND, MVT::f80, Expand);
666 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
667 setOperationAction(ISD::LRINT, MVT::f80, Custom);
668 setOperationAction(ISD::LLRINT, MVT::f80, Custom);
670 // Handle constrained floating-point operations of scalar.
671 setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
672 setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal);
673 setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal);
674 setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal);
675 setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal);
676 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
677 // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
679 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
682 // f128 uses xmm registers, but most operations require libcalls.
683 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
684 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
685 : &X86::VR128RegClass);
687 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
689 setOperationAction(ISD::FADD, MVT::f128, LibCall);
690 setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
691 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
692 setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
693 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
694 setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
695 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
696 setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
697 setOperationAction(ISD::FMA, MVT::f128, LibCall);
698 setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall);
700 setOperationAction(ISD::FABS, MVT::f128, Custom);
701 setOperationAction(ISD::FNEG, MVT::f128, Custom);
702 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
704 setOperationAction(ISD::FSIN, MVT::f128, LibCall);
705 setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall);
706 setOperationAction(ISD::FCOS, MVT::f128, LibCall);
707 setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall);
708 setOperationAction(ISD::FSINCOS, MVT::f128, LibCall);
710 setOperationAction(ISD::FSQRT, MVT::f128, LibCall);
711 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
713 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
714 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
715 // We need to custom handle any FP_ROUND with an f128 input, but
716 // LegalizeDAG uses the result type to know when to run a custom handler.
717 // So we have to list all legal floating point result types here.
718 if (isTypeLegal(MVT::f32)) {
719 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
720 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
722 if (isTypeLegal(MVT::f64)) {
723 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
724 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
726 if (isTypeLegal(MVT::f80)) {
727 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
728 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
731 setOperationAction(ISD::SETCC, MVT::f128, Custom);
733 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
734 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
735 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
736 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
737 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
738 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
741 // Always use a library call for pow.
742 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
743 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
744 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
745 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
747 setOperationAction(ISD::FLOG, MVT::f80, Expand);
748 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
749 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
750 setOperationAction(ISD::FEXP, MVT::f80, Expand);
751 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
752 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
753 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
755 // Some FP actions are always expanded for vector types.
756 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
757 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
758 setOperationAction(ISD::FSIN, VT, Expand);
759 setOperationAction(ISD::FSINCOS, VT, Expand);
760 setOperationAction(ISD::FCOS, VT, Expand);
761 setOperationAction(ISD::FREM, VT, Expand);
762 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
763 setOperationAction(ISD::FPOW, VT, Expand);
764 setOperationAction(ISD::FLOG, VT, Expand);
765 setOperationAction(ISD::FLOG2, VT, Expand);
766 setOperationAction(ISD::FLOG10, VT, Expand);
767 setOperationAction(ISD::FEXP, VT, Expand);
768 setOperationAction(ISD::FEXP2, VT, Expand);
771 // First set operation action for all vector types to either promote
772 // (for widening) or expand (for scalarization). Then we will selectively
773 // turn on ones that can be effectively codegen'd.
774 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
775 setOperationAction(ISD::SDIV, VT, Expand);
776 setOperationAction(ISD::UDIV, VT, Expand);
777 setOperationAction(ISD::SREM, VT, Expand);
778 setOperationAction(ISD::UREM, VT, Expand);
779 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
780 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
781 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
782 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
783 setOperationAction(ISD::FMA, VT, Expand);
784 setOperationAction(ISD::FFLOOR, VT, Expand);
785 setOperationAction(ISD::FCEIL, VT, Expand);
786 setOperationAction(ISD::FTRUNC, VT, Expand);
787 setOperationAction(ISD::FRINT, VT, Expand);
788 setOperationAction(ISD::FNEARBYINT, VT, Expand);
789 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
790 setOperationAction(ISD::MULHS, VT, Expand);
791 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
792 setOperationAction(ISD::MULHU, VT, Expand);
793 setOperationAction(ISD::SDIVREM, VT, Expand);
794 setOperationAction(ISD::UDIVREM, VT, Expand);
795 setOperationAction(ISD::CTPOP, VT, Expand);
796 setOperationAction(ISD::CTTZ, VT, Expand);
797 setOperationAction(ISD::CTLZ, VT, Expand);
798 setOperationAction(ISD::ROTL, VT, Expand);
799 setOperationAction(ISD::ROTR, VT, Expand);
800 setOperationAction(ISD::BSWAP, VT, Expand);
801 setOperationAction(ISD::SETCC, VT, Expand);
802 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
803 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
804 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
805 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
806 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
807 setOperationAction(ISD::TRUNCATE, VT, Expand);
808 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
809 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
810 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
811 setOperationAction(ISD::SELECT_CC, VT, Expand);
812 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
813 setTruncStoreAction(InnerVT, VT, Expand);
815 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
816 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
818 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
819 // types, we have to deal with them whether we ask for Expansion or not.
820 // Setting Expand causes its own optimisation problems though, so leave
822 if (VT.getVectorElementType() == MVT::i1)
823 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
825 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
826 // split/scalarized right now.
827 if (VT.getVectorElementType() == MVT::f16)
828 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
832 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
833 // with -msoft-float, disable use of MMX as well.
834 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
835 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
836 // No operations on x86mmx supported, everything uses intrinsics.
839 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
840 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
841 : &X86::VR128RegClass);
843 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
844 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
845 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
846 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
847 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
848 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
849 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
850 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
852 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
853 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
855 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
856 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
857 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
858 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
859 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
862 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
863 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
864 : &X86::VR128RegClass);
866 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
867 // registers cannot be used even for integer operations.
868 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
869 : &X86::VR128RegClass);
870 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
871 : &X86::VR128RegClass);
872 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
873 : &X86::VR128RegClass);
874 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
875 : &X86::VR128RegClass);
877 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
878 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
879 setOperationAction(ISD::SDIV, VT, Custom);
880 setOperationAction(ISD::SREM, VT, Custom);
881 setOperationAction(ISD::UDIV, VT, Custom);
882 setOperationAction(ISD::UREM, VT, Custom);
885 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
886 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
887 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
889 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
890 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
891 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
892 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
893 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
894 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
895 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
896 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
897 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
898 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
899 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
900 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
901 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
903 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
904 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
905 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
906 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
907 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
910 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
911 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
912 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
913 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
914 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
915 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
916 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
917 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
918 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
919 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
920 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
921 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
923 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
924 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
925 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
927 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
928 setOperationAction(ISD::SETCC, VT, Custom);
929 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
930 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
931 setOperationAction(ISD::CTPOP, VT, Custom);
932 setOperationAction(ISD::ABS, VT, Custom);
934 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
935 // setcc all the way to isel and prefer SETGT in some isel patterns.
936 setCondCodeAction(ISD::SETLT, VT, Custom);
937 setCondCodeAction(ISD::SETLE, VT, Custom);
940 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
941 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
942 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
943 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
944 setOperationAction(ISD::VSELECT, VT, Custom);
945 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
948 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
949 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
950 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
951 setOperationAction(ISD::VSELECT, VT, Custom);
953 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
956 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
957 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
960 // Custom lower v2i64 and v2f64 selects.
961 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
962 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
963 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
964 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
965 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
967 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
968 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
969 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
970 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom);
972 // Custom legalize these to avoid over promotion or custom promotion.
973 for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
974 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
975 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
976 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
977 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
980 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
981 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
982 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
983 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom);
985 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
986 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom);
988 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
989 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom);
991 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
992 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
993 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom);
994 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
995 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom);
997 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
998 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom);
999 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1000 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom);
1002 // We want to legalize this to an f64 load rather than an i64 load on
1003 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1005 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
1006 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1007 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
1008 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
1009 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
1010 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
1012 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1013 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1014 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1015 if (!Subtarget.hasAVX512())
1016 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1018 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1019 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1020 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1022 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1024 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
1025 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
1026 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1027 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1028 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1029 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1031 // In the customized shift lowering, the legal v4i32/v2i64 cases
1032 // in AVX2 will be recognized.
1033 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1034 setOperationAction(ISD::SRL, VT, Custom);
1035 setOperationAction(ISD::SHL, VT, Custom);
1036 setOperationAction(ISD::SRA, VT, Custom);
1039 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1040 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1042 // With 512-bit registers or AVX512VL+BW, expanding (and promoting the
1043 // shifts) is better.
1044 if (!Subtarget.useAVX512Regs() &&
1045 !(Subtarget.hasBWI() && Subtarget.hasVLX()))
1046 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1048 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1049 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1051 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1052 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1055 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1056 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1057 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1058 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1059 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1060 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1061 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1062 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1063 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1065 // These might be better off as horizontal vector ops.
1066 setOperationAction(ISD::ADD, MVT::i16, Custom);
1067 setOperationAction(ISD::ADD, MVT::i32, Custom);
1068 setOperationAction(ISD::SUB, MVT::i16, Custom);
1069 setOperationAction(ISD::SUB, MVT::i32, Custom);
1072 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1073 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1074 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1075 setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal);
1076 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1077 setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal);
1078 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1079 setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal);
1080 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1081 setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
1082 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1083 setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
1085 setOperationAction(ISD::FROUND, RoundedTy, Custom);
1088 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1089 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1090 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1091 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1092 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1093 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1094 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1095 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1097 // FIXME: Do we need to handle scalar-to-vector here?
1098 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1100 // We directly match byte blends in the backend as they match the VSELECT
1102 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1104 // SSE41 brings specific instructions for doing vector sign extend even in
1105 // cases where we don't have SRA.
1106 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1107 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1108 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1111 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1112 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1113 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1114 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1115 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1116 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1117 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1118 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1121 // i8 vectors are custom because the source register and source
1122 // source memory operand types are not the same width.
1123 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1125 if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1126 // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1127 // do the pre and post work in the vector domain.
1128 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom);
1129 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1130 // We need to mark SINT_TO_FP as Custom even though we want to expand it
1131 // so that DAG combine doesn't try to turn it into uint_to_fp.
1132 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom);
1133 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1137 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1138 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1139 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1140 setOperationAction(ISD::ROTL, VT, Custom);
1142 // XOP can efficiently perform BITREVERSE with VPPERM.
1143 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1144 setOperationAction(ISD::BITREVERSE, VT, Custom);
1146 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1147 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1148 setOperationAction(ISD::BITREVERSE, VT, Custom);
1151 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1152 bool HasInt256 = Subtarget.hasInt256();
1154 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1155 : &X86::VR256RegClass);
1156 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1157 : &X86::VR256RegClass);
1158 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1159 : &X86::VR256RegClass);
1160 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1161 : &X86::VR256RegClass);
1162 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1163 : &X86::VR256RegClass);
1164 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1165 : &X86::VR256RegClass);
1167 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1168 setOperationAction(ISD::FFLOOR, VT, Legal);
1169 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1170 setOperationAction(ISD::FCEIL, VT, Legal);
1171 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1172 setOperationAction(ISD::FTRUNC, VT, Legal);
1173 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1174 setOperationAction(ISD::FRINT, VT, Legal);
1175 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1176 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1177 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1179 setOperationAction(ISD::FROUND, VT, Custom);
1181 setOperationAction(ISD::FNEG, VT, Custom);
1182 setOperationAction(ISD::FABS, VT, Custom);
1183 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1186 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1187 // even though v8i16 is a legal type.
1188 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1189 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1190 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1191 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1192 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1193 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i32, Legal);
1195 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1196 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Legal);
1198 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal);
1199 setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal);
1200 setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal);
1201 setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal);
1202 setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal);
1204 setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal);
1205 setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal);
1206 setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal);
1207 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal);
1208 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal);
1209 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal);
1211 if (!Subtarget.hasAVX512())
1212 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1214 // In the customized shift lowering, the legal v8i32/v4i64 cases
1215 // in AVX2 will be recognized.
1216 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1217 setOperationAction(ISD::SRL, VT, Custom);
1218 setOperationAction(ISD::SHL, VT, Custom);
1219 setOperationAction(ISD::SRA, VT, Custom);
1222 // These types need custom splitting if their input is a 128-bit vector.
1223 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1224 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1225 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1226 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1228 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1229 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1231 // With BWI, expanding (and promoting the shifts) is the better.
1232 if (!Subtarget.useBWIRegs())
1233 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1235 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1236 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1237 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1238 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1239 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1242 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1243 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1244 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1245 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1248 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1249 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1250 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1251 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1253 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1254 setOperationAction(ISD::SETCC, VT, Custom);
1255 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1256 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1257 setOperationAction(ISD::CTPOP, VT, Custom);
1258 setOperationAction(ISD::CTLZ, VT, Custom);
1260 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1261 // setcc all the way to isel and prefer SETGT in some isel patterns.
1262 setCondCodeAction(ISD::SETLT, VT, Custom);
1263 setCondCodeAction(ISD::SETLE, VT, Custom);
1266 if (Subtarget.hasAnyFMA()) {
1267 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1268 MVT::v2f64, MVT::v4f64 }) {
1269 setOperationAction(ISD::FMA, VT, Legal);
1270 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1274 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1275 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1276 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1279 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1280 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1281 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1282 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1284 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1285 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1286 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1287 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1288 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1289 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1291 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1292 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1293 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1294 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1295 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1297 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1298 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1299 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1300 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1301 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1302 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1303 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1304 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1306 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1307 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1308 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1309 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1310 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1311 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1314 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1315 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1316 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1320 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1321 // when we have a 256bit-wide blend with immediate.
1322 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1323 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1325 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1326 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1327 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1328 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1329 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1330 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1331 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1332 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1336 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1337 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1338 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1339 setOperationAction(ISD::MSTORE, VT, Legal);
1342 // Extract subvector is special because the value type
1343 // (result) is 128-bit but the source is 256-bit wide.
1344 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1345 MVT::v4f32, MVT::v2f64 }) {
1346 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1349 // Custom lower several nodes for 256-bit types.
1350 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1351 MVT::v8f32, MVT::v4f64 }) {
1352 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1353 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1354 setOperationAction(ISD::VSELECT, VT, Custom);
1355 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1356 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1357 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1358 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1359 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1360 setOperationAction(ISD::STORE, VT, Custom);
1364 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1366 // Custom legalize 2x32 to get a little better code.
1367 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1368 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1370 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1371 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1372 setOperationAction(ISD::MGATHER, VT, Custom);
1376 // This block controls legalization of the mask vector sizes that are
1377 // available with AVX512. 512-bit vectors are in a separate block controlled
1378 // by useAVX512Regs.
1379 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1380 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1381 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1382 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1383 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1384 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1386 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1387 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1388 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1390 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1391 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1392 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1393 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1394 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1395 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1396 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1397 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1398 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1399 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1400 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
1401 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
1403 // There is no byte sized k-register load or store without AVX512DQ.
1404 if (!Subtarget.hasDQI()) {
1405 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1406 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1407 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1408 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1410 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1411 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1412 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1413 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1416 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1417 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1418 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1419 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1420 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1423 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1424 setOperationAction(ISD::ADD, VT, Custom);
1425 setOperationAction(ISD::SUB, VT, Custom);
1426 setOperationAction(ISD::MUL, VT, Custom);
1427 setOperationAction(ISD::UADDSAT, VT, Custom);
1428 setOperationAction(ISD::SADDSAT, VT, Custom);
1429 setOperationAction(ISD::USUBSAT, VT, Custom);
1430 setOperationAction(ISD::SSUBSAT, VT, Custom);
1431 setOperationAction(ISD::VSELECT, VT, Expand);
1434 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1435 setOperationAction(ISD::SETCC, VT, Custom);
1436 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1437 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1438 setOperationAction(ISD::SELECT, VT, Custom);
1439 setOperationAction(ISD::TRUNCATE, VT, Custom);
1441 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1442 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1443 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1444 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1445 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1446 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1449 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1450 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1453 // This block controls legalization for 512-bit operations with 32/64 bit
1454 // elements. 512-bits can be disabled based on prefer-vector-width and
1455 // required-vector-width function attributes.
1456 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1457 bool HasBWI = Subtarget.hasBWI();
1459 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1460 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1461 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1462 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1463 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1464 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1466 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1467 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1468 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1469 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1470 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1471 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1473 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1476 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1477 setOperationAction(ISD::FNEG, VT, Custom);
1478 setOperationAction(ISD::FABS, VT, Custom);
1479 setOperationAction(ISD::FMA, VT, Legal);
1480 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1481 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1484 for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1485 setOperationPromotedToType(ISD::FP_TO_SINT , VT, MVT::v16i32);
1486 setOperationPromotedToType(ISD::FP_TO_UINT , VT, MVT::v16i32);
1487 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1488 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1490 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1491 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1492 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal);
1493 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal);
1494 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1495 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1496 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal);
1497 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal);
1499 setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal);
1500 setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal);
1501 setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal);
1502 setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal);
1503 setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal);
1504 setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal);
1505 setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal);
1506 setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal);
1507 setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal);
1508 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal);
1509 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal);
1510 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal);
1512 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1513 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1514 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1515 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1516 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1518 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1520 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1521 // to 512-bit rather than use the AVX2 instructions so that we can use
1523 if (!Subtarget.hasVLX()) {
1524 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1525 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1526 setOperationAction(ISD::MLOAD, VT, Custom);
1527 setOperationAction(ISD::MSTORE, VT, Custom);
1531 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal);
1532 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal);
1533 setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom);
1534 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1535 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1536 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1537 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1538 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1539 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1540 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1541 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1542 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1543 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1546 // Extends from v64i1 masks to 512-bit vectors.
1547 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1548 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1549 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1552 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1553 setOperationAction(ISD::FFLOOR, VT, Legal);
1554 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1555 setOperationAction(ISD::FCEIL, VT, Legal);
1556 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1557 setOperationAction(ISD::FTRUNC, VT, Legal);
1558 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1559 setOperationAction(ISD::FRINT, VT, Legal);
1560 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1561 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1562 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1564 setOperationAction(ISD::FROUND, VT, Custom);
1567 for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1568 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1569 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1572 setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1573 setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1574 setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom);
1575 setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom);
1577 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1578 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1579 setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1580 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1582 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1583 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1584 setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1585 setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1586 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1587 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1589 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1591 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1592 setOperationAction(ISD::SRL, VT, Custom);
1593 setOperationAction(ISD::SHL, VT, Custom);
1594 setOperationAction(ISD::SRA, VT, Custom);
1595 setOperationAction(ISD::SETCC, VT, Custom);
1597 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1598 // setcc all the way to isel and prefer SETGT in some isel patterns.
1599 setCondCodeAction(ISD::SETLT, VT, Custom);
1600 setCondCodeAction(ISD::SETLE, VT, Custom);
1602 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1603 setOperationAction(ISD::SMAX, VT, Legal);
1604 setOperationAction(ISD::UMAX, VT, Legal);
1605 setOperationAction(ISD::SMIN, VT, Legal);
1606 setOperationAction(ISD::UMIN, VT, Legal);
1607 setOperationAction(ISD::ABS, VT, Legal);
1608 setOperationAction(ISD::CTPOP, VT, Custom);
1609 setOperationAction(ISD::ROTL, VT, Custom);
1610 setOperationAction(ISD::ROTR, VT, Custom);
1611 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1612 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1615 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1616 setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom);
1617 setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom);
1618 setOperationAction(ISD::CTLZ, VT, Custom);
1619 setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom);
1620 setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom);
1621 setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom);
1622 setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom);
1623 setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1624 setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1625 setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1626 setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1629 if (Subtarget.hasDQI()) {
1630 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1631 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1632 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal);
1633 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal);
1634 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1635 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1636 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal);
1637 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal);
1639 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1642 if (Subtarget.hasCDI()) {
1643 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1644 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1645 setOperationAction(ISD::CTLZ, VT, Legal);
1647 } // Subtarget.hasCDI()
1649 if (Subtarget.hasVPOPCNTDQ()) {
1650 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1651 setOperationAction(ISD::CTPOP, VT, Legal);
1654 // Extract subvector is special because the value type
1655 // (result) is 256-bit but the source is 512-bit wide.
1656 // 128-bit was made Legal under AVX1.
1657 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1658 MVT::v8f32, MVT::v4f64 })
1659 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1661 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1662 MVT::v16f32, MVT::v8f64 }) {
1663 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1664 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1665 setOperationAction(ISD::SELECT, VT, Custom);
1666 setOperationAction(ISD::VSELECT, VT, Custom);
1667 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1668 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1669 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1670 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1671 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1674 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1675 setOperationAction(ISD::MLOAD, VT, Legal);
1676 setOperationAction(ISD::MSTORE, VT, Legal);
1677 setOperationAction(ISD::MGATHER, VT, Custom);
1678 setOperationAction(ISD::MSCATTER, VT, Custom);
1681 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1682 setOperationAction(ISD::MLOAD, VT, Legal);
1683 setOperationAction(ISD::MSTORE, VT, Legal);
1686 setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1687 setOperationAction(ISD::STORE, MVT::v64i8, Custom);
1690 if (Subtarget.hasVBMI2()) {
1691 for (auto VT : { MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1692 setOperationAction(ISD::FSHL, VT, Custom);
1693 setOperationAction(ISD::FSHR, VT, Custom);
1698 // This block controls legalization for operations that don't have
1699 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1701 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1702 // These operations are handled on non-VLX by artificially widening in
1705 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32,
1706 Subtarget.hasVLX() ? Legal : Custom);
1707 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32,
1708 Subtarget.hasVLX() ? Legal : Custom);
1709 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1710 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32,
1711 Subtarget.hasVLX() ? Legal : Custom);
1712 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32,
1713 Subtarget.hasVLX() ? Legal : Custom);
1714 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom);
1715 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32,
1716 Subtarget.hasVLX() ? Legal : Custom);
1717 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32,
1718 Subtarget.hasVLX() ? Legal : Custom);
1719 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32,
1720 Subtarget.hasVLX() ? Legal : Custom);
1721 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32,
1722 Subtarget.hasVLX() ? Legal : Custom);
1724 if (Subtarget.hasDQI()) {
1725 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1726 // v2f32 UINT_TO_FP is already custom under SSE2.
1727 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1728 isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1729 "Unexpected operation action!");
1730 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1731 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1732 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1733 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1734 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1737 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1738 setOperationAction(ISD::SMAX, VT, Legal);
1739 setOperationAction(ISD::UMAX, VT, Legal);
1740 setOperationAction(ISD::SMIN, VT, Legal);
1741 setOperationAction(ISD::UMIN, VT, Legal);
1742 setOperationAction(ISD::ABS, VT, Legal);
1745 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1746 setOperationAction(ISD::ROTL, VT, Custom);
1747 setOperationAction(ISD::ROTR, VT, Custom);
1750 // Custom legalize 2x32 to get a little better code.
1751 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1752 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1754 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1755 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1756 setOperationAction(ISD::MSCATTER, VT, Custom);
1758 if (Subtarget.hasDQI()) {
1759 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1760 setOperationAction(ISD::SINT_TO_FP, VT,
1761 Subtarget.hasVLX() ? Legal : Custom);
1762 setOperationAction(ISD::UINT_TO_FP, VT,
1763 Subtarget.hasVLX() ? Legal : Custom);
1764 setOperationAction(ISD::STRICT_SINT_TO_FP, VT,
1765 Subtarget.hasVLX() ? Legal : Custom);
1766 setOperationAction(ISD::STRICT_UINT_TO_FP, VT,
1767 Subtarget.hasVLX() ? Legal : Custom);
1768 setOperationAction(ISD::FP_TO_SINT, VT,
1769 Subtarget.hasVLX() ? Legal : Custom);
1770 setOperationAction(ISD::FP_TO_UINT, VT,
1771 Subtarget.hasVLX() ? Legal : Custom);
1772 setOperationAction(ISD::STRICT_FP_TO_SINT, VT,
1773 Subtarget.hasVLX() ? Legal : Custom);
1774 setOperationAction(ISD::STRICT_FP_TO_UINT, VT,
1775 Subtarget.hasVLX() ? Legal : Custom);
1776 setOperationAction(ISD::MUL, VT, Legal);
1780 if (Subtarget.hasCDI()) {
1781 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1782 setOperationAction(ISD::CTLZ, VT, Legal);
1784 } // Subtarget.hasCDI()
1786 if (Subtarget.hasVPOPCNTDQ()) {
1787 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1788 setOperationAction(ISD::CTPOP, VT, Legal);
1792 // This block control legalization of v32i1/v64i1 which are available with
1793 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1795 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1796 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1797 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1799 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1800 setOperationAction(ISD::ADD, VT, Custom);
1801 setOperationAction(ISD::SUB, VT, Custom);
1802 setOperationAction(ISD::MUL, VT, Custom);
1803 setOperationAction(ISD::VSELECT, VT, Expand);
1804 setOperationAction(ISD::UADDSAT, VT, Custom);
1805 setOperationAction(ISD::SADDSAT, VT, Custom);
1806 setOperationAction(ISD::USUBSAT, VT, Custom);
1807 setOperationAction(ISD::SSUBSAT, VT, Custom);
1809 setOperationAction(ISD::TRUNCATE, VT, Custom);
1810 setOperationAction(ISD::SETCC, VT, Custom);
1811 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1812 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1813 setOperationAction(ISD::SELECT, VT, Custom);
1814 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1815 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1816 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1817 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1820 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1821 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1823 // Extends from v32i1 masks to 256-bit vectors.
1824 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1825 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1826 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1828 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1829 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1830 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1833 // These operations are handled on non-VLX by artificially widening in
1835 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1837 if (Subtarget.hasBITALG()) {
1838 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1839 setOperationAction(ISD::CTPOP, VT, Legal);
1843 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1844 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1845 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1846 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1847 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1848 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1850 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1851 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1852 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1853 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1854 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1856 if (Subtarget.hasBWI()) {
1857 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1858 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1861 if (Subtarget.hasVBMI2()) {
1862 // TODO: Make these legal even without VLX?
1863 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1864 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1865 setOperationAction(ISD::FSHL, VT, Custom);
1866 setOperationAction(ISD::FSHR, VT, Custom);
1870 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1871 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1872 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1875 // We want to custom lower some of our intrinsics.
1876 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1877 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1878 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1879 if (!Subtarget.is64Bit()) {
1880 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1883 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1884 // handle type legalization for these operations here.
1886 // FIXME: We really should do custom legalization for addition and
1887 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1888 // than generic legalization for 64-bit multiplication-with-overflow, though.
1889 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1890 if (VT == MVT::i64 && !Subtarget.is64Bit())
1892 // Add/Sub/Mul with overflow operations are custom lowered.
1893 setOperationAction(ISD::SADDO, VT, Custom);
1894 setOperationAction(ISD::UADDO, VT, Custom);
1895 setOperationAction(ISD::SSUBO, VT, Custom);
1896 setOperationAction(ISD::USUBO, VT, Custom);
1897 setOperationAction(ISD::SMULO, VT, Custom);
1898 setOperationAction(ISD::UMULO, VT, Custom);
1900 // Support carry in as value rather than glue.
1901 setOperationAction(ISD::ADDCARRY, VT, Custom);
1902 setOperationAction(ISD::SUBCARRY, VT, Custom);
1903 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1906 if (!Subtarget.is64Bit()) {
1907 // These libcalls are not available in 32-bit.
1908 setLibcallName(RTLIB::SHL_I128, nullptr);
1909 setLibcallName(RTLIB::SRL_I128, nullptr);
1910 setLibcallName(RTLIB::SRA_I128, nullptr);
1911 setLibcallName(RTLIB::MUL_I128, nullptr);
1914 // Combine sin / cos into _sincos_stret if it is available.
1915 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1916 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1917 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1918 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1921 if (Subtarget.isTargetWin64()) {
1922 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1923 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1924 setOperationAction(ISD::SREM, MVT::i128, Custom);
1925 setOperationAction(ISD::UREM, MVT::i128, Custom);
1926 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1927 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1930 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1931 // is. We should promote the value to 64-bits to solve this.
1932 // This is what the CRT headers do - `fmodf` is an inline header
1933 // function casting to f64 and calling `fmod`.
1934 if (Subtarget.is32Bit() &&
1935 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1936 for (ISD::NodeType Op :
1937 {ISD::FCEIL, ISD::STRICT_FCEIL,
1938 ISD::FCOS, ISD::STRICT_FCOS,
1939 ISD::FEXP, ISD::STRICT_FEXP,
1940 ISD::FFLOOR, ISD::STRICT_FFLOOR,
1941 ISD::FREM, ISD::STRICT_FREM,
1942 ISD::FLOG, ISD::STRICT_FLOG,
1943 ISD::FLOG10, ISD::STRICT_FLOG10,
1944 ISD::FPOW, ISD::STRICT_FPOW,
1945 ISD::FSIN, ISD::STRICT_FSIN})
1946 if (isOperationExpand(Op, MVT::f32))
1947 setOperationAction(Op, MVT::f32, Promote);
1949 // We have target-specific dag combine patterns for the following nodes:
1950 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1951 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1952 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
1953 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1954 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1955 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1956 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1957 setTargetDAGCombine(ISD::BITCAST);
1958 setTargetDAGCombine(ISD::VSELECT);
1959 setTargetDAGCombine(ISD::SELECT);
1960 setTargetDAGCombine(ISD::SHL);
1961 setTargetDAGCombine(ISD::SRA);
1962 setTargetDAGCombine(ISD::SRL);
1963 setTargetDAGCombine(ISD::OR);
1964 setTargetDAGCombine(ISD::AND);
1965 setTargetDAGCombine(ISD::ADD);
1966 setTargetDAGCombine(ISD::FADD);
1967 setTargetDAGCombine(ISD::FSUB);
1968 setTargetDAGCombine(ISD::FNEG);
1969 setTargetDAGCombine(ISD::FMA);
1970 setTargetDAGCombine(ISD::STRICT_FMA);
1971 setTargetDAGCombine(ISD::FMINNUM);
1972 setTargetDAGCombine(ISD::FMAXNUM);
1973 setTargetDAGCombine(ISD::SUB);
1974 setTargetDAGCombine(ISD::LOAD);
1975 setTargetDAGCombine(ISD::MLOAD);
1976 setTargetDAGCombine(ISD::STORE);
1977 setTargetDAGCombine(ISD::MSTORE);
1978 setTargetDAGCombine(ISD::TRUNCATE);
1979 setTargetDAGCombine(ISD::ZERO_EXTEND);
1980 setTargetDAGCombine(ISD::ANY_EXTEND);
1981 setTargetDAGCombine(ISD::SIGN_EXTEND);
1982 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1983 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
1984 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1985 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1986 setTargetDAGCombine(ISD::SINT_TO_FP);
1987 setTargetDAGCombine(ISD::UINT_TO_FP);
1988 setTargetDAGCombine(ISD::STRICT_SINT_TO_FP);
1989 setTargetDAGCombine(ISD::STRICT_UINT_TO_FP);
1990 setTargetDAGCombine(ISD::SETCC);
1991 setTargetDAGCombine(ISD::MUL);
1992 setTargetDAGCombine(ISD::XOR);
1993 setTargetDAGCombine(ISD::MSCATTER);
1994 setTargetDAGCombine(ISD::MGATHER);
1995 setTargetDAGCombine(ISD::FP16_TO_FP);
1996 setTargetDAGCombine(ISD::FP_EXTEND);
1997 setTargetDAGCombine(ISD::STRICT_FP_EXTEND);
1998 setTargetDAGCombine(ISD::FP_ROUND);
2000 computeRegisterProperties(Subtarget.getRegisterInfo());
2002 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2003 MaxStoresPerMemsetOptSize = 8;
2004 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2005 MaxStoresPerMemcpyOptSize = 4;
2006 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2007 MaxStoresPerMemmoveOptSize = 4;
2009 // TODO: These control memcmp expansion in CGP and could be raised higher, but
2010 // that needs to benchmarked and balanced with the potential use of vector
2011 // load/store types (PR33329, PR33914).
2012 MaxLoadsPerMemcmp = 2;
2013 MaxLoadsPerMemcmpOptSize = 2;
2015 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
2016 setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
2018 // An out-of-order CPU can speculatively execute past a predictable branch,
2019 // but a conditional move could be stalled by an expensive earlier operation.
2020 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2021 EnableExtLdPromotion = true;
2022 setPrefFunctionAlignment(Align(16));
2024 verifyIntrinsicTables();
2026 // Default to having -disable-strictnode-mutation on
2027 IsStrictFPEnabled = true;
2030 // This has so far only been implemented for 64-bit MachO.
2031 bool X86TargetLowering::useLoadStackGuardNode() const {
2032 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2035 bool X86TargetLowering::useStackGuardXorFP() const {
2036 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2037 return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2040 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2041 const SDLoc &DL) const {
2042 EVT PtrTy = getPointerTy(DAG.getDataLayout());
2043 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2044 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2045 return SDValue(Node, 0);
2048 TargetLoweringBase::LegalizeTypeAction
2049 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2050 if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2051 !Subtarget.hasBWI())
2052 return TypeSplitVector;
2054 if (VT.getVectorNumElements() != 1 &&
2055 VT.getVectorElementType() != MVT::i1)
2056 return TypeWidenVector;
2058 return TargetLoweringBase::getPreferredVectorAction(VT);
2061 static std::pair<MVT, unsigned>
2062 handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC,
2063 const X86Subtarget &Subtarget) {
2064 // v2i1/v4i1/v8i1/v16i1 all pass in xmm registers unless the calling
2065 // convention is one that uses k registers.
2067 return {MVT::v2i64, 1};
2069 return {MVT::v4i32, 1};
2070 if (NumElts == 8 && CC != CallingConv::X86_RegCall &&
2071 CC != CallingConv::Intel_OCL_BI)
2072 return {MVT::v8i16, 1};
2073 if (NumElts == 16 && CC != CallingConv::X86_RegCall &&
2074 CC != CallingConv::Intel_OCL_BI)
2075 return {MVT::v16i8, 1};
2076 // v32i1 passes in ymm unless we have BWI and the calling convention is
2078 if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall))
2079 return {MVT::v32i8, 1};
2080 // Split v64i1 vectors if we don't have v64i8 available.
2081 if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) {
2082 if (Subtarget.useAVX512Regs())
2083 return {MVT::v64i8, 1};
2084 return {MVT::v32i8, 2};
2087 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2088 if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2090 return {MVT::i8, NumElts};
2092 return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
2095 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2098 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2099 Subtarget.hasAVX512()) {
2100 unsigned NumElts = VT.getVectorNumElements();
2103 unsigned NumRegisters;
2104 std::tie(RegisterVT, NumRegisters) =
2105 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2106 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2110 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2113 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2116 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2117 Subtarget.hasAVX512()) {
2118 unsigned NumElts = VT.getVectorNumElements();
2121 unsigned NumRegisters;
2122 std::tie(RegisterVT, NumRegisters) =
2123 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2124 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2125 return NumRegisters;
2128 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2131 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2132 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2133 unsigned &NumIntermediates, MVT &RegisterVT) const {
2134 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2135 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2136 Subtarget.hasAVX512() &&
2137 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2138 (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) ||
2139 VT.getVectorNumElements() > 64)) {
2140 RegisterVT = MVT::i8;
2141 IntermediateVT = MVT::i1;
2142 NumIntermediates = VT.getVectorNumElements();
2143 return NumIntermediates;
2146 // Split v64i1 vectors if we don't have v64i8 available.
2147 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2148 CC != CallingConv::X86_RegCall) {
2149 RegisterVT = MVT::v32i8;
2150 IntermediateVT = MVT::v32i1;
2151 NumIntermediates = 2;
2155 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2156 NumIntermediates, RegisterVT);
2159 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2160 LLVMContext& Context,
2165 if (Subtarget.hasAVX512()) {
2166 const unsigned NumElts = VT.getVectorNumElements();
2168 // Figure out what this type will be legalized to.
2170 while (getTypeAction(Context, LegalVT) != TypeLegal)
2171 LegalVT = getTypeToTransformTo(Context, LegalVT);
2173 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2174 if (LegalVT.getSimpleVT().is512BitVector())
2175 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2177 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2178 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2179 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2181 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2182 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2183 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2187 return VT.changeVectorElementTypeToInteger();
2190 /// Helper for getByValTypeAlignment to determine
2191 /// the desired ByVal argument alignment.
2192 static void getMaxByValAlign(Type *Ty, Align &MaxAlign) {
2195 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2196 if (VTy->getPrimitiveSizeInBits().getFixedSize() == 128)
2197 MaxAlign = Align(16);
2198 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2200 getMaxByValAlign(ATy->getElementType(), EltAlign);
2201 if (EltAlign > MaxAlign)
2202 MaxAlign = EltAlign;
2203 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2204 for (auto *EltTy : STy->elements()) {
2206 getMaxByValAlign(EltTy, EltAlign);
2207 if (EltAlign > MaxAlign)
2208 MaxAlign = EltAlign;
2215 /// Return the desired alignment for ByVal aggregate
2216 /// function arguments in the caller parameter area. For X86, aggregates
2217 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2218 /// are at 4-byte boundaries.
2219 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2220 const DataLayout &DL) const {
2221 if (Subtarget.is64Bit()) {
2222 // Max of 8 and alignment of type.
2223 Align TyAlign = DL.getABITypeAlign(Ty);
2225 return TyAlign.value();
2230 if (Subtarget.hasSSE1())
2231 getMaxByValAlign(Ty, Alignment);
2232 return Alignment.value();
2235 /// It returns EVT::Other if the type should be determined using generic
2236 /// target-independent logic.
2237 /// For vector ops we check that the overall size isn't larger than our
2238 /// preferred vector width.
2239 EVT X86TargetLowering::getOptimalMemOpType(
2240 const MemOp &Op, const AttributeList &FuncAttributes) const {
2241 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2242 if (Op.size() >= 16 &&
2243 (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
2244 // FIXME: Check if unaligned 64-byte accesses are slow.
2245 if (Op.size() >= 64 && Subtarget.hasAVX512() &&
2246 (Subtarget.getPreferVectorWidth() >= 512)) {
2247 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2249 // FIXME: Check if unaligned 32-byte accesses are slow.
2250 if (Op.size() >= 32 && Subtarget.hasAVX() &&
2251 (Subtarget.getPreferVectorWidth() >= 256)) {
2252 // Although this isn't a well-supported type for AVX1, we'll let
2253 // legalization and shuffle lowering produce the optimal codegen. If we
2254 // choose an optimal type with a vector element larger than a byte,
2255 // getMemsetStores() may create an intermediate splat (using an integer
2256 // multiply) before we splat as a vector.
2259 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2261 // TODO: Can SSE1 handle a byte vector?
2262 // If we have SSE1 registers we should be able to use them.
2263 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2264 (Subtarget.getPreferVectorWidth() >= 128))
2266 } else if (((Op.isMemcpy() && !Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&
2267 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2268 // Do not use f64 to lower memcpy if source is string constant. It's
2269 // better to use i32 to avoid the loads.
2270 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2271 // The gymnastics of splatting a byte value into an XMM register and then
2272 // only using 8-byte stores (because this is a CPU with slow unaligned
2273 // 16-byte accesses) makes that a loser.
2277 // This is a compromise. If we reach here, unaligned accesses may be slow on
2278 // this target. However, creating smaller, aligned accesses could be even
2279 // slower and would certainly be a lot more code.
2280 if (Subtarget.is64Bit() && Op.size() >= 8)
2285 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2287 return X86ScalarSSEf32;
2288 else if (VT == MVT::f64)
2289 return X86ScalarSSEf64;
2293 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2294 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2297 switch (VT.getSizeInBits()) {
2299 // 8-byte and under are always assumed to be fast.
2303 *Fast = !Subtarget.isUnalignedMem16Slow();
2306 *Fast = !Subtarget.isUnalignedMem32Slow();
2308 // TODO: What about AVX-512 (512-bit) accesses?
2311 // NonTemporal vector memory ops must be aligned.
2312 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2313 // NT loads can only be vector aligned, so if its less aligned than the
2314 // minimum vector size (which we can split the vector down to), we might as
2315 // well use a regular unaligned vector load.
2316 // We don't have any NT loads pre-SSE41.
2317 if (!!(Flags & MachineMemOperand::MOLoad))
2318 return (Align < 16 || !Subtarget.hasSSE41());
2321 // Misaligned accesses of any size are always allowed.
2325 /// Return the entry encoding for a jump table in the
2326 /// current function. The returned value is a member of the
2327 /// MachineJumpTableInfo::JTEntryKind enum.
2328 unsigned X86TargetLowering::getJumpTableEncoding() const {
2329 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2331 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2332 return MachineJumpTableInfo::EK_Custom32;
2334 // Otherwise, use the normal jump table encoding heuristics.
2335 return TargetLowering::getJumpTableEncoding();
2338 bool X86TargetLowering::useSoftFloat() const {
2339 return Subtarget.useSoftFloat();
2342 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2343 ArgListTy &Args) const {
2345 // Only relabel X86-32 for C / Stdcall CCs.
2346 if (Subtarget.is64Bit())
2348 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2350 unsigned ParamRegs = 0;
2351 if (auto *M = MF->getFunction().getParent())
2352 ParamRegs = M->getNumberRegisterParameters();
2354 // Mark the first N int arguments as having reg
2355 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2356 Type *T = Args[Idx].Ty;
2357 if (T->isIntOrPtrTy())
2358 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2359 unsigned numRegs = 1;
2360 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2362 if (ParamRegs < numRegs)
2364 ParamRegs -= numRegs;
2365 Args[Idx].IsInReg = true;
2371 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2372 const MachineBasicBlock *MBB,
2373 unsigned uid,MCContext &Ctx) const{
2374 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2375 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2377 return MCSymbolRefExpr::create(MBB->getSymbol(),
2378 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2381 /// Returns relocation base for the given PIC jumptable.
2382 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2383 SelectionDAG &DAG) const {
2384 if (!Subtarget.is64Bit())
2385 // This doesn't have SDLoc associated with it, but is not really the
2386 // same as a Register.
2387 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2388 getPointerTy(DAG.getDataLayout()));
2392 /// This returns the relocation base for the given PIC jumptable,
2393 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2394 const MCExpr *X86TargetLowering::
2395 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2396 MCContext &Ctx) const {
2397 // X86-64 uses RIP relative addressing based on the jump table label.
2398 if (Subtarget.isPICStyleRIPRel())
2399 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2401 // Otherwise, the reference is relative to the PIC base.
2402 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2405 std::pair<const TargetRegisterClass *, uint8_t>
2406 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2408 const TargetRegisterClass *RRC = nullptr;
2410 switch (VT.SimpleTy) {
2412 return TargetLowering::findRepresentativeClass(TRI, VT);
2413 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2414 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2417 RRC = &X86::VR64RegClass;
2419 case MVT::f32: case MVT::f64:
2420 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2421 case MVT::v4f32: case MVT::v2f64:
2422 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2423 case MVT::v8f32: case MVT::v4f64:
2424 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2425 case MVT::v16f32: case MVT::v8f64:
2426 RRC = &X86::VR128XRegClass;
2429 return std::make_pair(RRC, Cost);
2432 unsigned X86TargetLowering::getAddressSpace() const {
2433 if (Subtarget.is64Bit())
2434 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2438 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2439 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2440 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2443 static Constant* SegmentOffset(IRBuilder<> &IRB,
2444 unsigned Offset, unsigned AddressSpace) {
2445 return ConstantExpr::getIntToPtr(
2446 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2447 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2450 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2451 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2452 // tcbhead_t; use it instead of the usual global variable (see
2453 // sysdeps/{i386,x86_64}/nptl/tls.h)
2454 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2455 if (Subtarget.isTargetFuchsia()) {
2456 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2457 return SegmentOffset(IRB, 0x10, getAddressSpace());
2459 // %fs:0x28, unless we're using a Kernel code model, in which case
2460 // it's %gs:0x28. gs:0x14 on i386.
2461 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2462 return SegmentOffset(IRB, Offset, getAddressSpace());
2466 return TargetLowering::getIRStackGuard(IRB);
2469 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2470 // MSVC CRT provides functionalities for stack protection.
2471 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2472 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2473 // MSVC CRT has a global variable holding security cookie.
2474 M.getOrInsertGlobal("__security_cookie",
2475 Type::getInt8PtrTy(M.getContext()));
2477 // MSVC CRT has a function to validate security cookie.
2478 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2479 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2480 Type::getInt8PtrTy(M.getContext()));
2481 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2482 F->setCallingConv(CallingConv::X86_FastCall);
2483 F->addAttribute(1, Attribute::AttrKind::InReg);
2487 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2488 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2490 TargetLowering::insertSSPDeclarations(M);
2493 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2494 // MSVC CRT has a global variable holding security cookie.
2495 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2496 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2497 return M.getGlobalVariable("__security_cookie");
2499 return TargetLowering::getSDagStackGuard(M);
2502 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2503 // MSVC CRT has a function to validate security cookie.
2504 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2505 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2506 return M.getFunction("__security_check_cookie");
2508 return TargetLowering::getSSPStackGuardCheck(M);
2511 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2512 if (Subtarget.getTargetTriple().isOSContiki())
2513 return getDefaultSafeStackPointerLocation(IRB, false);
2515 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2516 // definition of TLS_SLOT_SAFESTACK in
2517 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2518 if (Subtarget.isTargetAndroid()) {
2519 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2521 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2522 return SegmentOffset(IRB, Offset, getAddressSpace());
2525 // Fuchsia is similar.
2526 if (Subtarget.isTargetFuchsia()) {
2527 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2528 return SegmentOffset(IRB, 0x18, getAddressSpace());
2531 return TargetLowering::getSafeStackPointerLocation(IRB);
2534 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2535 unsigned DestAS) const {
2536 assert(SrcAS != DestAS && "Expected different address spaces!");
2538 const TargetMachine &TM = getTargetMachine();
2539 if (TM.getPointerSize(SrcAS) != TM.getPointerSize(DestAS))
2542 return SrcAS < 256 && DestAS < 256;
2545 //===----------------------------------------------------------------------===//
2546 // Return Value Calling Convention Implementation
2547 //===----------------------------------------------------------------------===//
2549 bool X86TargetLowering::CanLowerReturn(
2550 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2551 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2552 SmallVector<CCValAssign, 16> RVLocs;
2553 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2554 return CCInfo.CheckReturn(Outs, RetCC_X86);
2557 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2558 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2562 /// Lowers masks values (v*i1) to the local register values
2563 /// \returns DAG node after lowering to register type
2564 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2565 const SDLoc &Dl, SelectionDAG &DAG) {
2566 EVT ValVT = ValArg.getValueType();
2568 if (ValVT == MVT::v1i1)
2569 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2570 DAG.getIntPtrConstant(0, Dl));
2572 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2573 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2574 // Two stage lowering might be required
2575 // bitcast: v8i1 -> i8 / v16i1 -> i16
2576 // anyextend: i8 -> i32 / i16 -> i32
2577 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2578 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2579 if (ValLoc == MVT::i32)
2580 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2584 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2585 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2586 // One stage lowering is required
2587 // bitcast: v32i1 -> i32 / v64i1 -> i64
2588 return DAG.getBitcast(ValLoc, ValArg);
2591 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2594 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2595 static void Passv64i1ArgInRegs(
2596 const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2597 SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
2598 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2599 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2600 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2601 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2602 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2603 "The value should reside in two registers");
2605 // Before splitting the value we cast it to i64
2606 Arg = DAG.getBitcast(MVT::i64, Arg);
2608 // Splitting the value into two i32 types
2610 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2611 DAG.getConstant(0, Dl, MVT::i32));
2612 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2613 DAG.getConstant(1, Dl, MVT::i32));
2615 // Attach the two i32 types into corresponding registers
2616 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2617 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2621 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2623 const SmallVectorImpl<ISD::OutputArg> &Outs,
2624 const SmallVectorImpl<SDValue> &OutVals,
2625 const SDLoc &dl, SelectionDAG &DAG) const {
2626 MachineFunction &MF = DAG.getMachineFunction();
2627 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2629 // In some cases we need to disable registers from the default CSR list.
2630 // For example, when they are used for argument passing.
2631 bool ShouldDisableCalleeSavedRegister =
2632 CallConv == CallingConv::X86_RegCall ||
2633 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2635 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2636 report_fatal_error("X86 interrupts may not return any value");
2638 SmallVector<CCValAssign, 16> RVLocs;
2639 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2640 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2642 SmallVector<std::pair<Register, SDValue>, 4> RetVals;
2643 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2645 CCValAssign &VA = RVLocs[I];
2646 assert(VA.isRegLoc() && "Can only return in registers!");
2648 // Add the register to the CalleeSaveDisableRegs list.
2649 if (ShouldDisableCalleeSavedRegister)
2650 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2652 SDValue ValToCopy = OutVals[OutsIndex];
2653 EVT ValVT = ValToCopy.getValueType();
2655 // Promote values to the appropriate types.
2656 if (VA.getLocInfo() == CCValAssign::SExt)
2657 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2658 else if (VA.getLocInfo() == CCValAssign::ZExt)
2659 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2660 else if (VA.getLocInfo() == CCValAssign::AExt) {
2661 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2662 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2664 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2666 else if (VA.getLocInfo() == CCValAssign::BCvt)
2667 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2669 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2670 "Unexpected FP-extend for return value.");
2672 // Report an error if we have attempted to return a value via an XMM
2673 // register and SSE was disabled.
2674 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
2675 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2676 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2677 } else if (!Subtarget.hasSSE2() &&
2678 X86::FR64XRegClass.contains(VA.getLocReg()) &&
2679 ValVT == MVT::f64) {
2680 // When returning a double via an XMM register, report an error if SSE2 is
2682 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2683 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2686 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2687 // the RET instruction and handled by the FP Stackifier.
2688 if (VA.getLocReg() == X86::FP0 ||
2689 VA.getLocReg() == X86::FP1) {
2690 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2691 // change the value to the FP stack register class.
2692 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2693 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2694 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2695 // Don't emit a copytoreg.
2699 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2700 // which is returned in RAX / RDX.
2701 if (Subtarget.is64Bit()) {
2702 if (ValVT == MVT::x86mmx) {
2703 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2704 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2705 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2707 // If we don't have SSE2 available, convert to v4f32 so the generated
2708 // register is legal.
2709 if (!Subtarget.hasSSE2())
2710 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2715 if (VA.needsCustom()) {
2716 assert(VA.getValVT() == MVT::v64i1 &&
2717 "Currently the only custom case is when we split v64i1 to 2 regs");
2719 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
2722 // Add the second register to the CalleeSaveDisableRegs list.
2723 if (ShouldDisableCalleeSavedRegister)
2724 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2726 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2731 SmallVector<SDValue, 6> RetOps;
2732 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2733 // Operand #1 = Bytes To Pop
2734 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2737 // Copy the result values into the output registers.
2738 for (auto &RetVal : RetVals) {
2739 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
2740 RetOps.push_back(RetVal.second);
2741 continue; // Don't emit a copytoreg.
2744 Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Flag);
2745 Flag = Chain.getValue(1);
2747 DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
2750 // Swift calling convention does not require we copy the sret argument
2751 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2753 // All x86 ABIs require that for returning structs by value we copy
2754 // the sret argument into %rax/%eax (depending on ABI) for the return.
2755 // We saved the argument into a virtual register in the entry block,
2756 // so now we copy the value out and into %rax/%eax.
2758 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2759 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2760 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2761 // either case FuncInfo->setSRetReturnReg() will have been called.
2762 if (Register SRetReg = FuncInfo->getSRetReturnReg()) {
2763 // When we have both sret and another return value, we should use the
2764 // original Chain stored in RetOps[0], instead of the current Chain updated
2765 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2767 // For the case of sret and another return value, we have
2768 // Chain_0 at the function entry
2769 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2770 // If we use Chain_1 in getCopyFromReg, we will have
2771 // Val = getCopyFromReg(Chain_1)
2772 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2774 // getCopyToReg(Chain_0) will be glued together with
2775 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2776 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2777 // Data dependency from Unit B to Unit A due to usage of Val in
2778 // getCopyToReg(Chain_1, Val)
2779 // Chain dependency from Unit A to Unit B
2781 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2782 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2783 getPointerTy(MF.getDataLayout()));
2786 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2787 X86::RAX : X86::EAX;
2788 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2789 Flag = Chain.getValue(1);
2791 // RAX/EAX now acts like a return value.
2793 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2795 // Add the returned register to the CalleeSaveDisableRegs list.
2796 if (ShouldDisableCalleeSavedRegister)
2797 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2800 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2801 const MCPhysReg *I =
2802 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2805 if (X86::GR64RegClass.contains(*I))
2806 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2808 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2812 RetOps[0] = Chain; // Update chain.
2814 // Add the flag if we have it.
2816 RetOps.push_back(Flag);
2818 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2819 if (CallConv == CallingConv::X86_INTR)
2820 opcode = X86ISD::IRET;
2821 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2824 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2825 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2828 SDValue TCChain = Chain;
2829 SDNode *Copy = *N->use_begin();
2830 if (Copy->getOpcode() == ISD::CopyToReg) {
2831 // If the copy has a glue operand, we conservatively assume it isn't safe to
2832 // perform a tail call.
2833 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2835 TCChain = Copy->getOperand(0);
2836 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2839 bool HasRet = false;
2840 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2842 if (UI->getOpcode() != X86ISD::RET_FLAG)
2844 // If we are returning more than one value, we can definitely
2845 // not make a tail call see PR19530
2846 if (UI->getNumOperands() > 4)
2848 if (UI->getNumOperands() == 4 &&
2849 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2861 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2862 ISD::NodeType ExtendKind) const {
2863 MVT ReturnMVT = MVT::i32;
2865 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2866 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2867 // The ABI does not require i1, i8 or i16 to be extended.
2869 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2870 // always extending i8/i16 return values, so keep doing that for now.
2872 ReturnMVT = MVT::i8;
2875 EVT MinVT = getRegisterType(Context, ReturnMVT);
2876 return VT.bitsLT(MinVT) ? MinVT : VT;
2879 /// Reads two 32 bit registers and creates a 64 bit mask value.
2880 /// \param VA The current 32 bit value that need to be assigned.
2881 /// \param NextVA The next 32 bit value that need to be assigned.
2882 /// \param Root The parent DAG node.
2883 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2884 /// glue purposes. In the case the DAG is already using
2885 /// physical register instead of virtual, we should glue
2886 /// our new SDValue to InFlag SDvalue.
2887 /// \return a new SDvalue of size 64bit.
2888 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2889 SDValue &Root, SelectionDAG &DAG,
2890 const SDLoc &Dl, const X86Subtarget &Subtarget,
2891 SDValue *InFlag = nullptr) {
2892 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2893 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2894 assert(VA.getValVT() == MVT::v64i1 &&
2895 "Expecting first location of 64 bit width type");
2896 assert(NextVA.getValVT() == VA.getValVT() &&
2897 "The locations should have the same type");
2898 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2899 "The values should reside in two registers");
2902 SDValue ArgValueLo, ArgValueHi;
2904 MachineFunction &MF = DAG.getMachineFunction();
2905 const TargetRegisterClass *RC = &X86::GR32RegClass;
2907 // Read a 32 bit value from the registers.
2908 if (nullptr == InFlag) {
2909 // When no physical register is present,
2910 // create an intermediate virtual register.
2911 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
2912 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2913 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2914 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2916 // When a physical register is available read the value from it and glue
2917 // the reads together.
2919 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2920 *InFlag = ArgValueLo.getValue(2);
2922 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2923 *InFlag = ArgValueHi.getValue(2);
2926 // Convert the i32 type into v32i1 type.
2927 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2929 // Convert the i32 type into v32i1 type.
2930 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2932 // Concatenate the two values together.
2933 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2936 /// The function will lower a register of various sizes (8/16/32/64)
2937 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2938 /// \returns a DAG node contains the operand after lowering to mask type.
2939 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2940 const EVT &ValLoc, const SDLoc &Dl,
2941 SelectionDAG &DAG) {
2942 SDValue ValReturned = ValArg;
2944 if (ValVT == MVT::v1i1)
2945 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2947 if (ValVT == MVT::v64i1) {
2948 // In 32 bit machine, this case is handled by getv64i1Argument
2949 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2950 // In 64 bit machine, There is no need to truncate the value only bitcast
2953 switch (ValVT.getSimpleVT().SimpleTy) {
2964 llvm_unreachable("Expecting a vector of i1 types");
2967 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2969 return DAG.getBitcast(ValVT, ValReturned);
2972 /// Lower the result values of a call into the
2973 /// appropriate copies out of appropriate physical registers.
2975 SDValue X86TargetLowering::LowerCallResult(
2976 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2977 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2978 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2979 uint32_t *RegMask) const {
2981 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2982 // Assign locations to each value returned by this call.
2983 SmallVector<CCValAssign, 16> RVLocs;
2984 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2986 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2988 // Copy all of the result registers out of their specified physreg.
2989 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2991 CCValAssign &VA = RVLocs[I];
2992 EVT CopyVT = VA.getLocVT();
2994 // In some calling conventions we need to remove the used registers
2995 // from the register mask.
2997 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2998 SubRegs.isValid(); ++SubRegs)
2999 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3002 // Report an error if there was an attempt to return FP values via XMM
3004 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3005 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3006 if (VA.getLocReg() == X86::XMM1)
3007 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3009 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3010 } else if (!Subtarget.hasSSE2() &&
3011 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3012 CopyVT == MVT::f64) {
3013 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3014 if (VA.getLocReg() == X86::XMM1)
3015 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3017 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3020 // If we prefer to use the value in xmm registers, copy it out as f80 and
3021 // use a truncate to move it from fp stack reg to xmm reg.
3022 bool RoundAfterCopy = false;
3023 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3024 isScalarFPTypeInSSEReg(VA.getValVT())) {
3025 if (!Subtarget.hasX87())
3026 report_fatal_error("X87 register return with X87 disabled");
3028 RoundAfterCopy = (CopyVT != VA.getLocVT());
3032 if (VA.needsCustom()) {
3033 assert(VA.getValVT() == MVT::v64i1 &&
3034 "Currently the only custom case is when we split v64i1 to 2 regs");
3036 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3038 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3040 Val = Chain.getValue(0);
3041 InFlag = Chain.getValue(2);
3045 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3046 // This truncation won't change the value.
3047 DAG.getIntPtrConstant(1, dl));
3049 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
3050 if (VA.getValVT().isVector() &&
3051 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3052 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3053 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3054 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3056 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3059 if (VA.getLocInfo() == CCValAssign::BCvt)
3060 Val = DAG.getBitcast(VA.getValVT(), Val);
3062 InVals.push_back(Val);
3068 //===----------------------------------------------------------------------===//
3069 // C & StdCall & Fast Calling Convention implementation
3070 //===----------------------------------------------------------------------===//
3071 // StdCall calling convention seems to be standard for many Windows' API
3072 // routines and around. It differs from C calling convention just a little:
3073 // callee should clean up the stack, not caller. Symbols should be also
3074 // decorated in some fancy way :) It doesn't support any vector arguments.
3075 // For info on fast calling convention see Fast Calling Convention (tail call)
3076 // implementation LowerX86_32FastCCCallTo.
3078 /// CallIsStructReturn - Determines whether a call uses struct return
3080 enum StructReturnType {
3085 static StructReturnType
3086 callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
3088 return NotStructReturn;
3090 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
3091 if (!Flags.isSRet())
3092 return NotStructReturn;
3093 if (Flags.isInReg() || IsMCU)
3094 return RegStructReturn;
3095 return StackStructReturn;
3098 /// Determines whether a function uses struct return semantics.
3099 static StructReturnType
3100 argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
3102 return NotStructReturn;
3104 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
3105 if (!Flags.isSRet())
3106 return NotStructReturn;
3107 if (Flags.isInReg() || IsMCU)
3108 return RegStructReturn;
3109 return StackStructReturn;
3112 /// Make a copy of an aggregate at address specified by "Src" to address
3113 /// "Dst" with size and alignment information specified by the specific
3114 /// parameter attribute. The copy will be passed as a byval function parameter.
3115 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3116 SDValue Chain, ISD::ArgFlagsTy Flags,
3117 SelectionDAG &DAG, const SDLoc &dl) {
3118 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
3120 return DAG.getMemcpy(
3121 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
3122 /*isVolatile*/ false, /*AlwaysInline=*/true,
3123 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
3126 /// Return true if the calling convention is one that we can guarantee TCO for.
3127 static bool canGuaranteeTCO(CallingConv::ID CC) {
3128 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3129 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3130 CC == CallingConv::HHVM || CC == CallingConv::Tail);
3133 /// Return true if we might ever do TCO for calls with this calling convention.
3134 static bool mayTailCallThisCC(CallingConv::ID CC) {
3136 // C calling conventions:
3137 case CallingConv::C:
3138 case CallingConv::Win64:
3139 case CallingConv::X86_64_SysV:
3140 // Callee pop conventions:
3141 case CallingConv::X86_ThisCall:
3142 case CallingConv::X86_StdCall:
3143 case CallingConv::X86_VectorCall:
3144 case CallingConv::X86_FastCall:
3146 case CallingConv::Swift:
3149 return canGuaranteeTCO(CC);
3153 /// Return true if the function is being made into a tailcall target by
3154 /// changing its ABI.
3155 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3156 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
3159 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3160 if (!CI->isTailCall())
3163 CallingConv::ID CalleeCC = CI->getCallingConv();
3164 if (!mayTailCallThisCC(CalleeCC))
3171 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3172 const SmallVectorImpl<ISD::InputArg> &Ins,
3173 const SDLoc &dl, SelectionDAG &DAG,
3174 const CCValAssign &VA,
3175 MachineFrameInfo &MFI, unsigned i) const {
3176 // Create the nodes corresponding to a load from this parameter slot.
3177 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3178 bool AlwaysUseMutable = shouldGuaranteeTCO(
3179 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3180 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3182 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3184 // If value is passed by pointer we have address passed instead of the value
3185 // itself. No need to extend if the mask value and location share the same
3187 bool ExtendedInMem =
3188 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3189 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3191 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3192 ValVT = VA.getLocVT();
3194 ValVT = VA.getValVT();
3196 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3197 // changed with more analysis.
3198 // In case of tail call optimization mark all arguments mutable. Since they
3199 // could be overwritten by lowering of arguments in case of a tail call.
3200 if (Flags.isByVal()) {
3201 unsigned Bytes = Flags.getByValSize();
3202 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3204 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3205 // can be improved with deeper analysis.
3206 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3207 /*isAliased=*/true);
3208 return DAG.getFrameIndex(FI, PtrVT);
3211 // This is an argument in memory. We might be able to perform copy elision.
3212 // If the argument is passed directly in memory without any extension, then we
3213 // can perform copy elision. Large vector types, for example, may be passed
3214 // indirectly by pointer.
3215 if (Flags.isCopyElisionCandidate() &&
3216 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3217 EVT ArgVT = Ins[i].ArgVT;
3219 if (Ins[i].PartOffset == 0) {
3220 // If this is a one-part value or the first part of a multi-part value,
3221 // create a stack object for the entire argument value type and return a
3222 // load from our portion of it. This assumes that if the first part of an
3223 // argument is in memory, the rest will also be in memory.
3224 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3225 /*IsImmutable=*/false);
3226 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3228 ValVT, dl, Chain, PartAddr,
3229 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3231 // This is not the first piece of an argument in memory. See if there is
3232 // already a fixed stack object including this offset. If so, assume it
3233 // was created by the PartOffset == 0 branch above and create a load from
3234 // the appropriate offset into it.
3235 int64_t PartBegin = VA.getLocMemOffset();
3236 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3237 int FI = MFI.getObjectIndexBegin();
3238 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3239 int64_t ObjBegin = MFI.getObjectOffset(FI);
3240 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3241 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3244 if (MFI.isFixedObjectIndex(FI)) {
3246 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3247 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3249 ValVT, dl, Chain, Addr,
3250 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3251 Ins[i].PartOffset));
3256 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3257 VA.getLocMemOffset(), isImmutable);
3259 // Set SExt or ZExt flag.
3260 if (VA.getLocInfo() == CCValAssign::ZExt) {
3261 MFI.setObjectZExt(FI, true);
3262 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3263 MFI.setObjectSExt(FI, true);
3266 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3267 SDValue Val = DAG.getLoad(
3268 ValVT, dl, Chain, FIN,
3269 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3270 return ExtendedInMem
3271 ? (VA.getValVT().isVector()
3272 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3273 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3277 // FIXME: Get this from tablegen.
3278 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3279 const X86Subtarget &Subtarget) {
3280 assert(Subtarget.is64Bit());
3282 if (Subtarget.isCallingConvWin64(CallConv)) {
3283 static const MCPhysReg GPR64ArgRegsWin64[] = {
3284 X86::RCX, X86::RDX, X86::R8, X86::R9
3286 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3289 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3290 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3292 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3295 // FIXME: Get this from tablegen.
3296 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3297 CallingConv::ID CallConv,
3298 const X86Subtarget &Subtarget) {
3299 assert(Subtarget.is64Bit());
3300 if (Subtarget.isCallingConvWin64(CallConv)) {
3301 // The XMM registers which might contain var arg parameters are shadowed
3302 // in their paired GPR. So we only need to save the GPR to their home
3304 // TODO: __vectorcall will change this.
3308 const Function &F = MF.getFunction();
3309 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3310 bool isSoftFloat = Subtarget.useSoftFloat();
3311 assert(!(isSoftFloat && NoImplicitFloatOps) &&
3312 "SSE register cannot be used when SSE is disabled!");
3313 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3314 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3318 static const MCPhysReg XMMArgRegs64Bit[] = {
3319 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3320 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3322 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3326 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3327 return llvm::is_sorted(
3328 ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
3329 return A.getValNo() < B.getValNo();
3335 /// This is a helper class for lowering variable arguments parameters.
3336 class VarArgsLoweringHelper {
3338 VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,
3339 SelectionDAG &DAG, const X86Subtarget &Subtarget,
3340 CallingConv::ID CallConv, CCState &CCInfo)
3341 : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),
3342 TheMachineFunction(DAG.getMachineFunction()),
3343 TheFunction(TheMachineFunction.getFunction()),
3344 FrameInfo(TheMachineFunction.getFrameInfo()),
3345 FrameLowering(*Subtarget.getFrameLowering()),
3346 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
3349 // Lower variable arguments parameters.
3350 void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);
3353 void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);
3355 void forwardMustTailParameters(SDValue &Chain);
3357 bool is64Bit() { return Subtarget.is64Bit(); }
3358 bool isWin64() { return Subtarget.isCallingConvWin64(CallConv); }
3360 X86MachineFunctionInfo *FuncInfo;
3363 const X86Subtarget &Subtarget;
3364 MachineFunction &TheMachineFunction;
3365 const Function &TheFunction;
3366 MachineFrameInfo &FrameInfo;
3367 const TargetFrameLowering &FrameLowering;
3368 const TargetLowering &TargLowering;
3369 CallingConv::ID CallConv;
3374 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
3375 SDValue &Chain, unsigned StackSize) {
3376 // If the function takes variable number of arguments, make a frame index for
3377 // the start of the first vararg value... for expansion of llvm.va_start. We
3378 // can skip this if there are no va_start calls.
3379 if (is64Bit() || (CallConv != CallingConv::X86_FastCall &&
3380 CallConv != CallingConv::X86_ThisCall)) {
3381 FuncInfo->setVarArgsFrameIndex(
3382 FrameInfo.CreateFixedObject(1, StackSize, true));
3385 // Figure out if XMM registers are in use.
3386 assert(!(Subtarget.useSoftFloat() &&
3387 TheFunction.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3388 "SSE register cannot be used when SSE is disabled!");
3390 // 64-bit calling conventions support varargs and register parameters, so we
3391 // have to do extra work to spill them in the prologue.
3393 // Find the first unallocated argument registers.
3394 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3395 ArrayRef<MCPhysReg> ArgXMMs =
3396 get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget);
3397 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3398 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3400 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3401 "SSE register cannot be used when SSE is disabled!");
3404 // Get to the caller-allocated home save location. Add 8 to account
3405 // for the return address.
3406 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
3407 FuncInfo->setRegSaveFrameIndex(
3408 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3409 // Fixup to set vararg frame on shadow area (4 x i64).
3411 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3413 // For X86-64, if there are vararg parameters that are passed via
3414 // registers, then we must store them to their spots on the stack so
3415 // they may be loaded by dereferencing the result of va_next.
3416 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3417 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3418 FuncInfo->setRegSaveFrameIndex(FrameInfo.CreateStackObject(
3419 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));
3422 SmallVector<SDValue, 6>
3423 LiveGPRs; // list of SDValue for GPR registers keeping live input value
3424 SmallVector<SDValue, 8> LiveXMMRegs; // list of SDValue for XMM registers
3425 // keeping live input value
3426 SDValue ALVal; // if applicable keeps SDValue for %al register
3428 // Gather all the live in physical registers.
3429 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3430 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
3431 LiveGPRs.push_back(DAG.getCopyFromReg(Chain, DL, GPR, MVT::i64));
3433 const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);
3434 if (!AvailableXmms.empty()) {
3435 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3436 ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
3437 for (MCPhysReg Reg : AvailableXmms) {
3438 Register XMMReg = TheMachineFunction.addLiveIn(Reg, &X86::VR128RegClass);
3439 LiveXMMRegs.push_back(
3440 DAG.getCopyFromReg(Chain, DL, XMMReg, MVT::v4f32));
3444 // Store the integer parameter registers.
3445 SmallVector<SDValue, 8> MemOps;
3447 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3448 TargLowering.getPointerTy(DAG.getDataLayout()));
3449 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3450 for (SDValue Val : LiveGPRs) {
3451 SDValue FIN = DAG.getNode(ISD::ADD, DL,
3452 TargLowering.getPointerTy(DAG.getDataLayout()),
3453 RSFIN, DAG.getIntPtrConstant(Offset, DL));
3455 DAG.getStore(Val.getValue(1), DL, Val, FIN,
3456 MachinePointerInfo::getFixedStack(
3457 DAG.getMachineFunction(),
3458 FuncInfo->getRegSaveFrameIndex(), Offset));
3459 MemOps.push_back(Store);
3463 // Now store the XMM (fp + vector) parameter registers.
3464 if (!LiveXMMRegs.empty()) {
3465 SmallVector<SDValue, 12> SaveXMMOps;
3466 SaveXMMOps.push_back(Chain);
3467 SaveXMMOps.push_back(ALVal);
3468 SaveXMMOps.push_back(
3469 DAG.getIntPtrConstant(FuncInfo->getRegSaveFrameIndex(), DL));
3470 SaveXMMOps.push_back(
3471 DAG.getIntPtrConstant(FuncInfo->getVarArgsFPOffset(), DL));
3472 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3474 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, DL,
3475 MVT::Other, SaveXMMOps));
3478 if (!MemOps.empty())
3479 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3483 void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {
3484 // Find the largest legal vector type.
3485 MVT VecVT = MVT::Other;
3486 // FIXME: Only some x86_32 calling conventions support AVX512.
3487 if (Subtarget.useAVX512Regs() &&
3488 (is64Bit() || (CallConv == CallingConv::X86_VectorCall ||
3489 CallConv == CallingConv::Intel_OCL_BI)))
3490 VecVT = MVT::v16f32;
3491 else if (Subtarget.hasAVX())
3493 else if (Subtarget.hasSSE2())
3496 // We forward some GPRs and some vector types.
3497 SmallVector<MVT, 2> RegParmTypes;
3498 MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;
3499 RegParmTypes.push_back(IntVT);
3500 if (VecVT != MVT::Other)
3501 RegParmTypes.push_back(VecVT);
3503 // Compute the set of forwarded registers. The rest are scratch.
3504 SmallVectorImpl<ForwardedRegister> &Forwards =
3505 FuncInfo->getForwardedMustTailRegParms();
3506 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3508 // Forward AL for SysV x86_64 targets, since it is used for varargs.
3509 if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
3510 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3511 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3514 // Copy all forwards from physical to virtual registers.
3515 for (ForwardedRegister &FR : Forwards) {
3516 // FIXME: Can we use a less constrained schedule?
3517 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, FR.VReg, FR.VT);
3518 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
3519 TargLowering.getRegClassFor(FR.VT));
3520 Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);
3524 void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,
3525 unsigned StackSize) {
3526 // Set FrameIndex to the 0xAAAAAAA value to mark unset state.
3527 // If necessary, it would be set into the correct value later.
3528 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3529 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3531 if (FrameInfo.hasVAStart())
3532 createVarArgAreaAndStoreRegisters(Chain, StackSize);
3534 if (FrameInfo.hasMustTailInVarArgFunc())
3535 forwardMustTailParameters(Chain);
3538 SDValue X86TargetLowering::LowerFormalArguments(
3539 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3540 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3541 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3542 MachineFunction &MF = DAG.getMachineFunction();
3543 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3545 const Function &F = MF.getFunction();
3546 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3547 F.getName() == "main")
3548 FuncInfo->setForceFramePointer(true);
3550 MachineFrameInfo &MFI = MF.getFrameInfo();
3551 bool Is64Bit = Subtarget.is64Bit();
3552 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3555 !(IsVarArg && canGuaranteeTCO(CallConv)) &&
3556 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3558 // Assign locations to all of the incoming arguments.
3559 SmallVector<CCValAssign, 16> ArgLocs;
3560 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
3562 // Allocate shadow area for Win64.
3564 CCInfo.AllocateStack(32, Align(8));
3566 CCInfo.AnalyzeArguments(Ins, CC_X86);
3568 // In vectorcall calling convention a second pass is required for the HVA
3570 if (CallingConv::X86_VectorCall == CallConv) {
3571 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3574 // The next loop assumes that the locations are in the same order of the
3576 assert(isSortedByValueNo(ArgLocs) &&
3577 "Argument Location list must be sorted before lowering");
3580 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3582 assert(InsIndex < Ins.size() && "Invalid Ins index");
3583 CCValAssign &VA = ArgLocs[I];
3585 if (VA.isRegLoc()) {
3586 EVT RegVT = VA.getLocVT();
3587 if (VA.needsCustom()) {
3589 VA.getValVT() == MVT::v64i1 &&
3590 "Currently the only custom case is when we split v64i1 to 2 regs");
3592 // v64i1 values, in regcall calling convention, that are
3593 // compiled to 32 bit arch, are split up into two registers.
3595 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3597 const TargetRegisterClass *RC;
3598 if (RegVT == MVT::i8)
3599 RC = &X86::GR8RegClass;
3600 else if (RegVT == MVT::i16)
3601 RC = &X86::GR16RegClass;
3602 else if (RegVT == MVT::i32)
3603 RC = &X86::GR32RegClass;
3604 else if (Is64Bit && RegVT == MVT::i64)
3605 RC = &X86::GR64RegClass;
3606 else if (RegVT == MVT::f32)
3607 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3608 else if (RegVT == MVT::f64)
3609 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3610 else if (RegVT == MVT::f80)
3611 RC = &X86::RFP80RegClass;
3612 else if (RegVT == MVT::f128)
3613 RC = &X86::VR128RegClass;
3614 else if (RegVT.is512BitVector())
3615 RC = &X86::VR512RegClass;
3616 else if (RegVT.is256BitVector())
3617 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3618 else if (RegVT.is128BitVector())
3619 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3620 else if (RegVT == MVT::x86mmx)
3621 RC = &X86::VR64RegClass;
3622 else if (RegVT == MVT::v1i1)
3623 RC = &X86::VK1RegClass;
3624 else if (RegVT == MVT::v8i1)
3625 RC = &X86::VK8RegClass;
3626 else if (RegVT == MVT::v16i1)
3627 RC = &X86::VK16RegClass;
3628 else if (RegVT == MVT::v32i1)
3629 RC = &X86::VK32RegClass;
3630 else if (RegVT == MVT::v64i1)
3631 RC = &X86::VK64RegClass;
3633 llvm_unreachable("Unknown argument type!");
3635 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
3636 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3639 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3640 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3642 if (VA.getLocInfo() == CCValAssign::SExt)
3643 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3644 DAG.getValueType(VA.getValVT()));
3645 else if (VA.getLocInfo() == CCValAssign::ZExt)
3646 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3647 DAG.getValueType(VA.getValVT()));
3648 else if (VA.getLocInfo() == CCValAssign::BCvt)
3649 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3651 if (VA.isExtInLoc()) {
3652 // Handle MMX values passed in XMM regs.
3653 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3654 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3655 else if (VA.getValVT().isVector() &&
3656 VA.getValVT().getScalarType() == MVT::i1 &&
3657 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3658 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3659 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3660 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3662 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3665 assert(VA.isMemLoc());
3667 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3670 // If value is passed via pointer - do a load.
3671 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3673 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3675 InVals.push_back(ArgValue);
3678 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3679 // Swift calling convention does not require we copy the sret argument
3680 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3681 if (CallConv == CallingConv::Swift)
3684 // All x86 ABIs require that for returning structs by value we copy the
3685 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3686 // the argument into a virtual register so that we can access it from the
3688 if (Ins[I].Flags.isSRet()) {
3689 Register Reg = FuncInfo->getSRetReturnReg();
3691 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3692 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3693 FuncInfo->setSRetReturnReg(Reg);
3695 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3696 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3701 unsigned StackSize = CCInfo.getNextStackOffset();
3702 // Align stack specially for tail calls.
3703 if (shouldGuaranteeTCO(CallConv,
3704 MF.getTarget().Options.GuaranteedTailCallOpt))
3705 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3708 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
3709 .lowerVarArgsParameters(Chain, StackSize);
3711 // Some CCs need callee pop.
3712 if (X86::isCalleePop(CallConv, Is64Bit, IsVarArg,
3713 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3714 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3715 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3716 // X86 interrupts must pop the error code (and the alignment padding) if
3718 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3720 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3721 // If this is an sret function, the return should pop the hidden pointer.
3722 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3723 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3724 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3725 FuncInfo->setBytesToPopOnReturn(4);
3729 // RegSaveFrameIndex is X86-64 only.
3730 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3733 FuncInfo->setArgumentStackSize(StackSize);
3735 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3736 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3737 if (Personality == EHPersonality::CoreCLR) {
3739 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3740 // that we'd prefer this slot be allocated towards the bottom of the frame
3741 // (i.e. near the stack pointer after allocating the frame). Every
3742 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3743 // offset from the bottom of this and each funclet's frame must be the
3744 // same, so the size of funclets' (mostly empty) frames is dictated by
3745 // how far this slot is from the bottom (since they allocate just enough
3746 // space to accommodate holding this slot at the correct offset).
3747 int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSS=*/false);
3748 EHInfo->PSPSymFrameIdx = PSPSymFI;
3752 if (CallConv == CallingConv::X86_RegCall ||
3753 F.hasFnAttribute("no_caller_saved_registers")) {
3754 MachineRegisterInfo &MRI = MF.getRegInfo();
3755 for (std::pair<Register, Register> Pair : MRI.liveins())
3756 MRI.disableCalleeSavedRegister(Pair.first);
3762 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3763 SDValue Arg, const SDLoc &dl,
3765 const CCValAssign &VA,
3766 ISD::ArgFlagsTy Flags,
3767 bool isByVal) const {
3768 unsigned LocMemOffset = VA.getLocMemOffset();
3769 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3770 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3773 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3775 return DAG.getStore(
3776 Chain, dl, Arg, PtrOff,
3777 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3780 /// Emit a load of return address if tail call
3781 /// optimization is performed and it is required.
3782 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3783 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3784 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3785 // Adjust the Return address stack slot.
3786 EVT VT = getPointerTy(DAG.getDataLayout());
3787 OutRetAddr = getReturnAddressFrameIndex(DAG);
3789 // Load the "old" Return address.
3790 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3791 return SDValue(OutRetAddr.getNode(), 1);
3794 /// Emit a store of the return address if tail call
3795 /// optimization is performed and it is required (FPDiff!=0).
3796 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3797 SDValue Chain, SDValue RetAddrFrIdx,
3798 EVT PtrVT, unsigned SlotSize,
3799 int FPDiff, const SDLoc &dl) {
3800 // Store the return address to the appropriate stack slot.
3801 if (!FPDiff) return Chain;
3802 // Calculate the new stack slot for the return address.
3803 int NewReturnAddrFI =
3804 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3806 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3807 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3808 MachinePointerInfo::getFixedStack(
3809 DAG.getMachineFunction(), NewReturnAddrFI));
3813 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3814 /// operation of specified width.
3815 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3817 unsigned NumElems = VT.getVectorNumElements();
3818 SmallVector<int, 8> Mask;
3819 Mask.push_back(NumElems);
3820 for (unsigned i = 1; i != NumElems; ++i)
3822 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3826 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3827 SmallVectorImpl<SDValue> &InVals) const {
3828 SelectionDAG &DAG = CLI.DAG;
3830 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3831 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3832 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3833 SDValue Chain = CLI.Chain;
3834 SDValue Callee = CLI.Callee;
3835 CallingConv::ID CallConv = CLI.CallConv;
3836 bool &isTailCall = CLI.IsTailCall;
3837 bool isVarArg = CLI.IsVarArg;
3839 MachineFunction &MF = DAG.getMachineFunction();
3840 bool Is64Bit = Subtarget.is64Bit();
3841 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3842 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3843 bool IsSibcall = false;
3844 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3845 CallConv == CallingConv::Tail;
3846 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3847 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CB);
3848 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3849 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3850 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3851 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CB);
3853 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3854 const Module *M = MF.getMMI().getModule();
3855 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3857 MachineFunction::CallSiteInfo CSInfo;
3858 if (CallConv == CallingConv::X86_INTR)
3859 report_fatal_error("X86 interrupts may not be called directly");
3861 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3862 // If we are using a GOT, disable tail calls to external symbols with
3863 // default visibility. Tail calling such a symbol requires using a GOT
3864 // relocation, which forces early binding of the symbol. This breaks code
3865 // that require lazy function symbol resolution. Using musttail or
3866 // GuaranteedTailCallOpt will override this.
3867 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3868 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3869 G->getGlobal()->hasDefaultVisibility()))
3873 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
3875 // Force this to be a tail call. The verifier rules are enough to ensure
3876 // that we can lower this successfully without moving the return address
3879 } else if (isTailCall) {
3880 // Check if it's really possible to do a tail call.
3881 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3882 isVarArg, SR != NotStructReturn,
3883 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3884 Outs, OutVals, Ins, DAG);
3886 // Sibcalls are automatically detected tailcalls which do not require
3888 if (!IsGuaranteeTCO && isTailCall)
3895 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3896 "Var args not supported with calling convention fastcc, ghc or hipe");
3898 // Analyze operands of the call, assigning locations to each operand.
3899 SmallVector<CCValAssign, 16> ArgLocs;
3900 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3902 // Allocate shadow area for Win64.
3904 CCInfo.AllocateStack(32, Align(8));
3906 CCInfo.AnalyzeArguments(Outs, CC_X86);
3908 // In vectorcall calling convention a second pass is required for the HVA
3910 if (CallingConv::X86_VectorCall == CallConv) {
3911 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3914 // Get a count of how many bytes are to be pushed on the stack.
3915 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3917 // This is a sibcall. The memory operands are available in caller's
3918 // own caller's stack.
3920 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3921 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3924 if (isTailCall && !IsSibcall && !IsMustTail) {
3925 // Lower arguments at fp - stackoffset + fpdiff.
3926 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3928 FPDiff = NumBytesCallerPushed - NumBytes;
3930 // Set the delta of movement of the returnaddr stackslot.
3931 // But only set if delta is greater than previous delta.
3932 if (FPDiff < X86Info->getTCReturnAddrDelta())
3933 X86Info->setTCReturnAddrDelta(FPDiff);
3936 unsigned NumBytesToPush = NumBytes;
3937 unsigned NumBytesToPop = NumBytes;
3939 // If we have an inalloca argument, all stack space has already been allocated
3940 // for us and be right at the top of the stack. We don't support multiple
3941 // arguments passed in memory when using inalloca.
3942 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3944 if (!ArgLocs.back().isMemLoc())
3945 report_fatal_error("cannot use inalloca attribute on a register "
3947 if (ArgLocs.back().getLocMemOffset() != 0)
3948 report_fatal_error("any parameter with the inalloca attribute must be "
3949 "the only memory argument");
3950 } else if (CLI.IsPreallocated) {
3951 assert(ArgLocs.back().isMemLoc() &&
3952 "cannot use preallocated attribute on a register "
3954 SmallVector<size_t, 4> PreallocatedOffsets;
3955 for (size_t i = 0; i < CLI.OutVals.size(); ++i) {
3956 if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {
3957 PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());
3960 auto *MFI = DAG.getMachineFunction().getInfo<X86MachineFunctionInfo>();
3961 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);
3962 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
3963 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
3967 if (!IsSibcall && !IsMustTail)
3968 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3969 NumBytes - NumBytesToPush, dl);
3971 SDValue RetAddrFrIdx;
3972 // Load return address for tail calls.
3973 if (isTailCall && FPDiff)
3974 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3975 Is64Bit, FPDiff, dl);
3977 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
3978 SmallVector<SDValue, 8> MemOpChains;
3981 // The next loop assumes that the locations are in the same order of the
3983 assert(isSortedByValueNo(ArgLocs) &&
3984 "Argument Location list must be sorted before lowering");
3986 // Walk the register/memloc assignments, inserting copies/loads. In the case
3987 // of tail call optimization arguments are handle later.
3988 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3989 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3991 assert(OutIndex < Outs.size() && "Invalid Out index");
3992 // Skip inalloca/preallocated arguments, they have already been written.
3993 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3994 if (Flags.isInAlloca() || Flags.isPreallocated())
3997 CCValAssign &VA = ArgLocs[I];
3998 EVT RegVT = VA.getLocVT();
3999 SDValue Arg = OutVals[OutIndex];
4000 bool isByVal = Flags.isByVal();
4002 // Promote the value if needed.
4003 switch (VA.getLocInfo()) {
4004 default: llvm_unreachable("Unknown loc info!");
4005 case CCValAssign::Full: break;
4006 case CCValAssign::SExt:
4007 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
4009 case CCValAssign::ZExt:
4010 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
4012 case CCValAssign::AExt:
4013 if (Arg.getValueType().isVector() &&
4014 Arg.getValueType().getVectorElementType() == MVT::i1)
4015 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
4016 else if (RegVT.is128BitVector()) {
4017 // Special case: passing MMX values in XMM registers.
4018 Arg = DAG.getBitcast(MVT::i64, Arg);
4019 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
4020 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
4022 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
4024 case CCValAssign::BCvt:
4025 Arg = DAG.getBitcast(RegVT, Arg);
4027 case CCValAssign::Indirect: {
4029 // Memcpy the argument to a temporary stack slot to prevent
4030 // the caller from seeing any modifications the callee may make
4031 // as guaranteed by the `byval` attribute.
4032 int FrameIdx = MF.getFrameInfo().CreateStackObject(
4033 Flags.getByValSize(),
4034 std::max(Align(16), Flags.getNonZeroByValAlign()), false);
4036 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
4038 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
4039 // From now on treat this as a regular pointer
4043 // Store the argument.
4044 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
4045 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4046 Chain = DAG.getStore(
4047 Chain, dl, Arg, SpillSlot,
4048 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4055 if (VA.needsCustom()) {
4056 assert(VA.getValVT() == MVT::v64i1 &&
4057 "Currently the only custom case is when we split v64i1 to 2 regs");
4058 // Split v64i1 value into two registers
4059 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
4060 } else if (VA.isRegLoc()) {
4061 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4062 const TargetOptions &Options = DAG.getTarget().Options;
4063 if (Options.EmitCallSiteInfo)
4064 CSInfo.emplace_back(VA.getLocReg(), I);
4065 if (isVarArg && IsWin64) {
4066 // Win64 ABI requires argument XMM reg to be copied to the corresponding
4067 // shadow reg if callee is a varargs function.
4069 switch (VA.getLocReg()) {
4070 case X86::XMM0: ShadowReg = X86::RCX; break;
4071 case X86::XMM1: ShadowReg = X86::RDX; break;
4072 case X86::XMM2: ShadowReg = X86::R8; break;
4073 case X86::XMM3: ShadowReg = X86::R9; break;
4076 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4078 } else if (!IsSibcall && (!isTailCall || isByVal)) {
4079 assert(VA.isMemLoc());
4080 if (!StackPtr.getNode())
4081 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4082 getPointerTy(DAG.getDataLayout()));
4083 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4084 dl, DAG, VA, Flags, isByVal));
4088 if (!MemOpChains.empty())
4089 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4091 if (Subtarget.isPICStyleGOT()) {
4092 // ELF / PIC requires GOT in the EBX register before function calls via PLT
4095 RegsToPass.push_back(std::make_pair(
4096 Register(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4097 getPointerTy(DAG.getDataLayout()))));
4099 // If we are tail calling and generating PIC/GOT style code load the
4100 // address of the callee into ECX. The value in ecx is used as target of
4101 // the tail jump. This is done to circumvent the ebx/callee-saved problem
4102 // for tail calls on PIC/GOT architectures. Normally we would just put the
4103 // address of GOT into ebx and then call target@PLT. But for tail calls
4104 // ebx would be restored (since ebx is callee saved) before jumping to the
4107 // Note: The actual moving to ECX is done further down.
4108 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4109 if (G && !G->getGlobal()->hasLocalLinkage() &&
4110 G->getGlobal()->hasDefaultVisibility())
4111 Callee = LowerGlobalAddress(Callee, DAG);
4112 else if (isa<ExternalSymbolSDNode>(Callee))
4113 Callee = LowerExternalSymbol(Callee, DAG);
4117 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
4118 // From AMD64 ABI document:
4119 // For calls that may call functions that use varargs or stdargs
4120 // (prototype-less calls or calls to functions containing ellipsis (...) in
4121 // the declaration) %al is used as hidden argument to specify the number
4122 // of SSE registers used. The contents of %al do not need to match exactly
4123 // the number of registers, but must be an ubound on the number of SSE
4124 // registers used and is in the range 0 - 8 inclusive.
4126 // Count the number of XMM registers allocated.
4127 static const MCPhysReg XMMArgRegs[] = {
4128 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4129 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4131 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4132 assert((Subtarget.hasSSE1() || !NumXMMRegs)
4133 && "SSE registers cannot be used when SSE is disabled");
4134 RegsToPass.push_back(std::make_pair(Register(X86::AL),
4135 DAG.getConstant(NumXMMRegs, dl,
4139 if (isVarArg && IsMustTail) {
4140 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4141 for (const auto &F : Forwards) {
4142 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4143 RegsToPass.push_back(std::make_pair(F.PReg, Val));
4147 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
4148 // don't need this because the eligibility check rejects calls that require
4149 // shuffling arguments passed in memory.
4150 if (!IsSibcall && isTailCall) {
4151 // Force all the incoming stack arguments to be loaded from the stack
4152 // before any new outgoing arguments are stored to the stack, because the
4153 // outgoing stack slots may alias the incoming argument stack slots, and
4154 // the alias isn't otherwise explicit. This is slightly more conservative
4155 // than necessary, because it means that each store effectively depends
4156 // on every argument instead of just those arguments it would clobber.
4157 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4159 SmallVector<SDValue, 8> MemOpChains2;
4162 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4164 CCValAssign &VA = ArgLocs[I];
4166 if (VA.isRegLoc()) {
4167 if (VA.needsCustom()) {
4168 assert((CallConv == CallingConv::X86_RegCall) &&
4169 "Expecting custom case only in regcall calling convention");
4170 // This means that we are in special case where one argument was
4171 // passed through two register locations - Skip the next location
4178 assert(VA.isMemLoc());
4179 SDValue Arg = OutVals[OutsIndex];
4180 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4181 // Skip inalloca/preallocated arguments. They don't require any work.
4182 if (Flags.isInAlloca() || Flags.isPreallocated())
4184 // Create frame index.
4185 int32_t Offset = VA.getLocMemOffset()+FPDiff;
4186 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4187 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4188 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4190 if (Flags.isByVal()) {
4191 // Copy relative to framepointer.
4192 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4193 if (!StackPtr.getNode())
4194 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4195 getPointerTy(DAG.getDataLayout()));
4196 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4199 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4203 // Store relative to framepointer.
4204 MemOpChains2.push_back(DAG.getStore(
4205 ArgChain, dl, Arg, FIN,
4206 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4210 if (!MemOpChains2.empty())
4211 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4213 // Store the return address to the appropriate stack slot.
4214 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4215 getPointerTy(DAG.getDataLayout()),
4216 RegInfo->getSlotSize(), FPDiff, dl);
4219 // Build a sequence of copy-to-reg nodes chained together with token chain
4220 // and flag operands which copy the outgoing args into registers.
4222 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4223 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4224 RegsToPass[i].second, InFlag);
4225 InFlag = Chain.getValue(1);
4228 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4229 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4230 // In the 64-bit large code model, we have to make all calls
4231 // through a register, since the call instruction's 32-bit
4232 // pc-relative offset may not be large enough to hold the whole
4234 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4235 Callee->getOpcode() == ISD::ExternalSymbol) {
4236 // Lower direct calls to global addresses and external symbols. Setting
4237 // ForCall to true here has the effect of removing WrapperRIP when possible
4238 // to allow direct calls to be selected without first materializing the
4239 // address into a register.
4240 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4241 } else if (Subtarget.isTarget64BitILP32() &&
4242 Callee->getValueType(0) == MVT::i32) {
4243 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4244 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4247 // Returns a chain & a flag for retval copy to use.
4248 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4249 SmallVector<SDValue, 8> Ops;
4251 if (!IsSibcall && isTailCall && !IsMustTail) {
4252 Chain = DAG.getCALLSEQ_END(Chain,
4253 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4254 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4255 InFlag = Chain.getValue(1);
4258 Ops.push_back(Chain);
4259 Ops.push_back(Callee);
4262 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4264 // Add argument registers to the end of the list so that they are known live
4266 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4267 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4268 RegsToPass[i].second.getValueType()));
4270 // Add a register mask operand representing the call-preserved registers.
4271 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4272 // set X86_INTR calling convention because it has the same CSR mask
4273 // (same preserved registers).
4274 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4275 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4276 assert(Mask && "Missing call preserved mask for calling convention");
4278 // If this is an invoke in a 32-bit function using a funclet-based
4279 // personality, assume the function clobbers all registers. If an exception
4280 // is thrown, the runtime will not restore CSRs.
4281 // FIXME: Model this more precisely so that we can register allocate across
4282 // the normal edge and spill and fill across the exceptional edge.
4283 if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
4284 const Function &CallerFn = MF.getFunction();
4285 EHPersonality Pers =
4286 CallerFn.hasPersonalityFn()
4287 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4288 : EHPersonality::Unknown;
4289 if (isFuncletEHPersonality(Pers))
4290 Mask = RegInfo->getNoPreservedMask();
4293 // Define a new register mask from the existing mask.
4294 uint32_t *RegMask = nullptr;
4296 // In some calling conventions we need to remove the used physical registers
4297 // from the reg mask.
4298 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4299 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4301 // Allocate a new Reg Mask and copy Mask.
4302 RegMask = MF.allocateRegMask();
4303 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4304 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4306 // Make sure all sub registers of the argument registers are reset
4308 for (auto const &RegPair : RegsToPass)
4309 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4310 SubRegs.isValid(); ++SubRegs)
4311 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4313 // Create the RegMask Operand according to our updated mask.
4314 Ops.push_back(DAG.getRegisterMask(RegMask));
4316 // Create the RegMask Operand according to the static mask.
4317 Ops.push_back(DAG.getRegisterMask(Mask));
4320 if (InFlag.getNode())
4321 Ops.push_back(InFlag);
4325 //// If this is the first return lowered for this function, add the regs
4326 //// to the liveout set for the function.
4327 // This isn't right, although it's probably harmless on x86; liveouts
4328 // should be computed from returns not tail calls. Consider a void
4329 // function making a tail call to a function returning int.
4330 MF.getFrameInfo().setHasTailCall();
4331 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4332 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4336 if (HasNoCfCheck && IsCFProtectionSupported) {
4337 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4339 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4341 InFlag = Chain.getValue(1);
4342 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
4343 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4345 // Save heapallocsite metadata.
4347 if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
4348 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4350 // Create the CALLSEQ_END node.
4351 unsigned NumBytesForCalleeToPop;
4352 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4353 DAG.getTarget().Options.GuaranteedTailCallOpt))
4354 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4355 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4356 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4357 SR == StackStructReturn)
4358 // If this is a call to a struct-return function, the callee
4359 // pops the hidden struct pointer, so we have to push it back.
4360 // This is common for Darwin/X86, Linux & Mingw32 targets.
4361 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4362 NumBytesForCalleeToPop = 4;
4364 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4366 // Returns a flag for retval copy to use.
4368 Chain = DAG.getCALLSEQ_END(Chain,
4369 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4370 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4373 InFlag = Chain.getValue(1);
4376 // Handle result values, copying them out of physregs into vregs that we
4378 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4382 //===----------------------------------------------------------------------===//
4383 // Fast Calling Convention (tail call) implementation
4384 //===----------------------------------------------------------------------===//
4386 // Like std call, callee cleans arguments, convention except that ECX is
4387 // reserved for storing the tail called function address. Only 2 registers are
4388 // free for argument passing (inreg). Tail call optimization is performed
4390 // * tailcallopt is enabled
4391 // * caller/callee are fastcc
4392 // On X86_64 architecture with GOT-style position independent code only local
4393 // (within module) calls are supported at the moment.
4394 // To keep the stack aligned according to platform abi the function
4395 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4396 // of stack alignment. (Dynamic linkers need this - Darwin's dyld for example)
4397 // If a tail called function callee has more arguments than the caller the
4398 // caller needs to make sure that there is room to move the RETADDR to. This is
4399 // achieved by reserving an area the size of the argument delta right after the
4400 // original RETADDR, but before the saved framepointer or the spilled registers
4401 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4413 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4416 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
4417 SelectionDAG &DAG) const {
4418 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
4419 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
4420 assert(StackSize % SlotSize == 0 &&
4421 "StackSize must be a multiple of SlotSize");
4422 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
4425 /// Return true if the given stack call argument is already available in the
4426 /// same position (relatively) of the caller's incoming argument stack.
4428 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4429 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4430 const X86InstrInfo *TII, const CCValAssign &VA) {
4431 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4434 // Look through nodes that don't alter the bits of the incoming value.
4435 unsigned Op = Arg.getOpcode();
4436 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4437 Arg = Arg.getOperand(0);
4440 if (Op == ISD::TRUNCATE) {
4441 const SDValue &TruncInput = Arg.getOperand(0);
4442 if (TruncInput.getOpcode() == ISD::AssertZext &&
4443 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4444 Arg.getValueType()) {
4445 Arg = TruncInput.getOperand(0);
4453 if (Arg.getOpcode() == ISD::CopyFromReg) {
4454 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4455 if (!Register::isVirtualRegister(VR))
4457 MachineInstr *Def = MRI->getVRegDef(VR);
4460 if (!Flags.isByVal()) {
4461 if (!TII->isLoadFromStackSlot(*Def, FI))
4464 unsigned Opcode = Def->getOpcode();
4465 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4466 Opcode == X86::LEA64_32r) &&
4467 Def->getOperand(1).isFI()) {
4468 FI = Def->getOperand(1).getIndex();
4469 Bytes = Flags.getByValSize();
4473 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4474 if (Flags.isByVal())
4475 // ByVal argument is passed in as a pointer but it's now being
4476 // dereferenced. e.g.
4477 // define @foo(%struct.X* %A) {
4478 // tail call @bar(%struct.X* byval %A)
4481 SDValue Ptr = Ld->getBasePtr();
4482 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4485 FI = FINode->getIndex();
4486 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4487 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4488 FI = FINode->getIndex();
4489 Bytes = Flags.getByValSize();
4493 assert(FI != INT_MAX);
4494 if (!MFI.isFixedObjectIndex(FI))
4497 if (Offset != MFI.getObjectOffset(FI))
4500 // If this is not byval, check that the argument stack object is immutable.
4501 // inalloca and argument copy elision can create mutable argument stack
4502 // objects. Byval objects can be mutated, but a byval call intends to pass the
4504 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4507 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4508 // If the argument location is wider than the argument type, check that any
4509 // extension flags match.
4510 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4511 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4516 return Bytes == MFI.getObjectSize(FI);
4519 /// Check whether the call is eligible for tail call optimization. Targets
4520 /// that want to do tail call optimization should implement this function.
4521 bool X86TargetLowering::IsEligibleForTailCallOptimization(
4522 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4523 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4524 const SmallVectorImpl<ISD::OutputArg> &Outs,
4525 const SmallVectorImpl<SDValue> &OutVals,
4526 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4527 if (!mayTailCallThisCC(CalleeCC))
4530 // If -tailcallopt is specified, make fastcc functions tail-callable.
4531 MachineFunction &MF = DAG.getMachineFunction();
4532 const Function &CallerF = MF.getFunction();
4534 // If the function return type is x86_fp80 and the callee return type is not,
4535 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4536 // perform a tailcall optimization here.
4537 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4540 CallingConv::ID CallerCC = CallerF.getCallingConv();
4541 bool CCMatch = CallerCC == CalleeCC;
4542 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4543 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4544 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4545 CalleeCC == CallingConv::Tail;
4547 // Win64 functions have extra shadow space for argument homing. Don't do the
4548 // sibcall if the caller and callee have mismatched expectations for this
4550 if (IsCalleeWin64 != IsCallerWin64)
4553 if (IsGuaranteeTCO) {
4554 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4559 // Look for obvious safe cases to perform tail call optimization that do not
4560 // require ABI changes. This is what gcc calls sibcall.
4562 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4563 // emit a special epilogue.
4564 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4565 if (RegInfo->needsStackRealignment(MF))
4568 // Also avoid sibcall optimization if either caller or callee uses struct
4569 // return semantics.
4570 if (isCalleeStructRet || isCallerStructRet)
4573 // Do not sibcall optimize vararg calls unless all arguments are passed via
4575 LLVMContext &C = *DAG.getContext();
4576 if (isVarArg && !Outs.empty()) {
4577 // Optimizing for varargs on Win64 is unlikely to be safe without
4578 // additional testing.
4579 if (IsCalleeWin64 || IsCallerWin64)
4582 SmallVector<CCValAssign, 16> ArgLocs;
4583 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4585 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4586 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4587 if (!ArgLocs[i].isRegLoc())
4591 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4592 // stack. Therefore, if it's not used by the call it is not safe to optimize
4593 // this into a sibcall.
4594 bool Unused = false;
4595 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4602 SmallVector<CCValAssign, 16> RVLocs;
4603 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4604 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4605 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4606 CCValAssign &VA = RVLocs[i];
4607 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4612 // Check that the call results are passed in the same way.
4613 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4614 RetCC_X86, RetCC_X86))
4616 // The callee has to preserve all registers the caller needs to preserve.
4617 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4618 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4620 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4621 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4625 unsigned StackArgsSize = 0;
4627 // If the callee takes no arguments then go on to check the results of the
4629 if (!Outs.empty()) {
4630 // Check if stack adjustment is needed. For now, do not do this if any
4631 // argument is passed on the stack.
4632 SmallVector<CCValAssign, 16> ArgLocs;
4633 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4635 // Allocate shadow area for Win64
4637 CCInfo.AllocateStack(32, Align(8));
4639 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4640 StackArgsSize = CCInfo.getNextStackOffset();
4642 if (CCInfo.getNextStackOffset()) {
4643 // Check if the arguments are already laid out in the right way as
4644 // the caller's fixed stack objects.
4645 MachineFrameInfo &MFI = MF.getFrameInfo();
4646 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4647 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4648 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4649 CCValAssign &VA = ArgLocs[i];
4650 SDValue Arg = OutVals[i];
4651 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4652 if (VA.getLocInfo() == CCValAssign::Indirect)
4654 if (!VA.isRegLoc()) {
4655 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4662 bool PositionIndependent = isPositionIndependent();
4663 // If the tailcall address may be in a register, then make sure it's
4664 // possible to register allocate for it. In 32-bit, the call address can
4665 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4666 // callee-saved registers are restored. These happen to be the same
4667 // registers used to pass 'inreg' arguments so watch out for those.
4668 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4669 !isa<ExternalSymbolSDNode>(Callee)) ||
4670 PositionIndependent)) {
4671 unsigned NumInRegs = 0;
4672 // In PIC we need an extra register to formulate the address computation
4674 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4676 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4677 CCValAssign &VA = ArgLocs[i];
4680 Register Reg = VA.getLocReg();
4683 case X86::EAX: case X86::EDX: case X86::ECX:
4684 if (++NumInRegs == MaxInRegs)
4691 const MachineRegisterInfo &MRI = MF.getRegInfo();
4692 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4696 bool CalleeWillPop =
4697 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4698 MF.getTarget().Options.GuaranteedTailCallOpt);
4700 if (unsigned BytesToPop =
4701 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4702 // If we have bytes to pop, the callee must pop them.
4703 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4704 if (!CalleePopMatches)
4706 } else if (CalleeWillPop && StackArgsSize > 0) {
4707 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4715 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4716 const TargetLibraryInfo *libInfo) const {
4717 return X86::createFastISel(funcInfo, libInfo);
4720 //===----------------------------------------------------------------------===//
4721 // Other Lowering Hooks
4722 //===----------------------------------------------------------------------===//
4724 static bool MayFoldLoad(SDValue Op) {
4725 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4728 static bool MayFoldIntoStore(SDValue Op) {
4729 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4732 static bool MayFoldIntoZeroExtend(SDValue Op) {
4733 if (Op.hasOneUse()) {
4734 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4735 return (ISD::ZERO_EXTEND == Opcode);
4740 static bool isTargetShuffle(unsigned Opcode) {
4742 default: return false;
4743 case X86ISD::BLENDI:
4744 case X86ISD::PSHUFB:
4745 case X86ISD::PSHUFD:
4746 case X86ISD::PSHUFHW:
4747 case X86ISD::PSHUFLW:
4749 case X86ISD::INSERTPS:
4750 case X86ISD::EXTRQI:
4751 case X86ISD::INSERTQI:
4752 case X86ISD::VALIGN:
4753 case X86ISD::PALIGNR:
4754 case X86ISD::VSHLDQ:
4755 case X86ISD::VSRLDQ:
4756 case X86ISD::MOVLHPS:
4757 case X86ISD::MOVHLPS:
4758 case X86ISD::MOVSHDUP:
4759 case X86ISD::MOVSLDUP:
4760 case X86ISD::MOVDDUP:
4763 case X86ISD::UNPCKL:
4764 case X86ISD::UNPCKH:
4765 case X86ISD::VBROADCAST:
4766 case X86ISD::VPERMILPI:
4767 case X86ISD::VPERMILPV:
4768 case X86ISD::VPERM2X128:
4769 case X86ISD::SHUF128:
4770 case X86ISD::VPERMIL2:
4771 case X86ISD::VPERMI:
4772 case X86ISD::VPPERM:
4773 case X86ISD::VPERMV:
4774 case X86ISD::VPERMV3:
4775 case X86ISD::VZEXT_MOVL:
4780 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4782 default: return false;
4784 case X86ISD::PSHUFB:
4785 case X86ISD::VPERMILPV:
4786 case X86ISD::VPERMIL2:
4787 case X86ISD::VPPERM:
4788 case X86ISD::VPERMV:
4789 case X86ISD::VPERMV3:
4791 // 'Faux' Target Shuffles.
4799 static bool isTargetShuffleSplat(SDValue Op) {
4800 unsigned Opcode = Op.getOpcode();
4801 if (Opcode == ISD::EXTRACT_SUBVECTOR)
4802 return isTargetShuffleSplat(Op.getOperand(0));
4803 return Opcode == X86ISD::VBROADCAST || Opcode == X86ISD::VBROADCAST_LOAD;
4806 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4807 MachineFunction &MF = DAG.getMachineFunction();
4808 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4809 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4810 int ReturnAddrIndex = FuncInfo->getRAIndex();
4812 if (ReturnAddrIndex == 0) {
4813 // Set up a frame object for the return address.
4814 unsigned SlotSize = RegInfo->getSlotSize();
4815 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4818 FuncInfo->setRAIndex(ReturnAddrIndex);
4821 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4824 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4825 bool hasSymbolicDisplacement) {
4826 // Offset should fit into 32 bit immediate field.
4827 if (!isInt<32>(Offset))
4830 // If we don't have a symbolic displacement - we don't have any extra
4832 if (!hasSymbolicDisplacement)
4835 // FIXME: Some tweaks might be needed for medium code model.
4836 if (M != CodeModel::Small && M != CodeModel::Kernel)
4839 // For small code model we assume that latest object is 16MB before end of 31
4840 // bits boundary. We may also accept pretty large negative constants knowing
4841 // that all objects are in the positive half of address space.
4842 if (M == CodeModel::Small && Offset < 16*1024*1024)
4845 // For kernel code model we know that all object resist in the negative half
4846 // of 32bits address space. We may not accept negative offsets, since they may
4847 // be just off and we may accept pretty large positive ones.
4848 if (M == CodeModel::Kernel && Offset >= 0)
4854 /// Determines whether the callee is required to pop its own arguments.
4855 /// Callee pop is necessary to support tail calls.
4856 bool X86::isCalleePop(CallingConv::ID CallingConv,
4857 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4858 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4859 // can guarantee TCO.
4860 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4863 switch (CallingConv) {
4866 case CallingConv::X86_StdCall:
4867 case CallingConv::X86_FastCall:
4868 case CallingConv::X86_ThisCall:
4869 case CallingConv::X86_VectorCall:
4874 /// Return true if the condition is an signed comparison operation.
4875 static bool isX86CCSigned(unsigned X86CC) {
4878 llvm_unreachable("Invalid integer condition!");
4894 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4895 switch (SetCCOpcode) {
4896 default: llvm_unreachable("Invalid integer condition!");
4897 case ISD::SETEQ: return X86::COND_E;
4898 case ISD::SETGT: return X86::COND_G;
4899 case ISD::SETGE: return X86::COND_GE;
4900 case ISD::SETLT: return X86::COND_L;
4901 case ISD::SETLE: return X86::COND_LE;
4902 case ISD::SETNE: return X86::COND_NE;
4903 case ISD::SETULT: return X86::COND_B;
4904 case ISD::SETUGT: return X86::COND_A;
4905 case ISD::SETULE: return X86::COND_BE;
4906 case ISD::SETUGE: return X86::COND_AE;
4910 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4911 /// condition code, returning the condition code and the LHS/RHS of the
4912 /// comparison to make.
4913 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4914 bool isFP, SDValue &LHS, SDValue &RHS,
4915 SelectionDAG &DAG) {
4917 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4918 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4919 // X > -1 -> X == 0, jump !sign.
4920 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4921 return X86::COND_NS;
4923 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4924 // X < 0 -> X == 0, jump on sign.
4927 if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4928 // X >= 0 -> X == 0, jump on !sign.
4929 return X86::COND_NS;
4931 if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
4933 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4934 return X86::COND_LE;
4938 return TranslateIntegerX86CC(SetCCOpcode);
4941 // First determine if it is required or is profitable to flip the operands.
4943 // If LHS is a foldable load, but RHS is not, flip the condition.
4944 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4945 !ISD::isNON_EXTLoad(RHS.getNode())) {
4946 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4947 std::swap(LHS, RHS);
4950 switch (SetCCOpcode) {
4956 std::swap(LHS, RHS);
4960 // On a floating point condition, the flags are set as follows:
4962 // 0 | 0 | 0 | X > Y
4963 // 0 | 0 | 1 | X < Y
4964 // 1 | 0 | 0 | X == Y
4965 // 1 | 1 | 1 | unordered
4966 switch (SetCCOpcode) {
4967 default: llvm_unreachable("Condcode should be pre-legalized away");
4969 case ISD::SETEQ: return X86::COND_E;
4970 case ISD::SETOLT: // flipped
4972 case ISD::SETGT: return X86::COND_A;
4973 case ISD::SETOLE: // flipped
4975 case ISD::SETGE: return X86::COND_AE;
4976 case ISD::SETUGT: // flipped
4978 case ISD::SETLT: return X86::COND_B;
4979 case ISD::SETUGE: // flipped
4981 case ISD::SETLE: return X86::COND_BE;
4983 case ISD::SETNE: return X86::COND_NE;
4984 case ISD::SETUO: return X86::COND_P;
4985 case ISD::SETO: return X86::COND_NP;
4987 case ISD::SETUNE: return X86::COND_INVALID;
4991 /// Is there a floating point cmov for the specific X86 condition code?
4992 /// Current x86 isa includes the following FP cmov instructions:
4993 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4994 static bool hasFPCMov(unsigned X86CC) {
5011 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5013 MachineFunction &MF,
5014 unsigned Intrinsic) const {
5016 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
5020 Info.flags = MachineMemOperand::MONone;
5023 switch (IntrData->Type) {
5024 case TRUNCATE_TO_MEM_VI8:
5025 case TRUNCATE_TO_MEM_VI16:
5026 case TRUNCATE_TO_MEM_VI32: {
5027 Info.opc = ISD::INTRINSIC_VOID;
5028 Info.ptrVal = I.getArgOperand(0);
5029 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
5030 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
5031 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
5033 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
5034 ScalarVT = MVT::i16;
5035 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
5036 ScalarVT = MVT::i32;
5038 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
5039 Info.align = Align(1);
5040 Info.flags |= MachineMemOperand::MOStore;
5045 Info.opc = ISD::INTRINSIC_W_CHAIN;
5046 Info.ptrVal = nullptr;
5047 MVT DataVT = MVT::getVT(I.getType());
5048 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5049 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5050 IndexVT.getVectorNumElements());
5051 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5052 Info.align = Align(1);
5053 Info.flags |= MachineMemOperand::MOLoad;
5057 Info.opc = ISD::INTRINSIC_VOID;
5058 Info.ptrVal = nullptr;
5059 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
5060 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5061 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5062 IndexVT.getVectorNumElements());
5063 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5064 Info.align = Align(1);
5065 Info.flags |= MachineMemOperand::MOStore;
5075 /// Returns true if the target can instruction select the
5076 /// specified FP immediate natively. If false, the legalizer will
5077 /// materialize the FP immediate as a load from a constant pool.
5078 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5079 bool ForCodeSize) const {
5080 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
5081 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
5087 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5088 ISD::LoadExtType ExtTy,
5090 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5092 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5093 // relocation target a movq or addq instruction: don't let the load shrink.
5094 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5095 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5096 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5097 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5099 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5100 // those uses are extracted directly into a store, then the extract + store
5101 // can be store-folded. Therefore, it's probably not worth splitting the load.
5102 EVT VT = Load->getValueType(0);
5103 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5104 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5105 // Skip uses of the chain value. Result 0 of the node is the load value.
5106 if (UI.getUse().getResNo() != 0)
5109 // If this use is not an extract + store, it's probably worth splitting.
5110 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5111 UI->use_begin()->getOpcode() != ISD::STORE)
5114 // All non-chain uses are extract + store.
5121 /// Returns true if it is beneficial to convert a load of a constant
5122 /// to just the constant itself.
5123 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5125 assert(Ty->isIntegerTy());
5127 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5128 if (BitSize == 0 || BitSize > 64)
5133 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5134 // If we are using XMM registers in the ABI and the condition of the select is
5135 // a floating-point compare and we have blendv or conditional move, then it is
5136 // cheaper to select instead of doing a cross-register move and creating a
5137 // load that depends on the compare result.
5138 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5139 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5142 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5143 // TODO: It might be a win to ease or lift this restriction, but the generic
5144 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5145 if (VT.isVector() && Subtarget.hasAVX512())
5151 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5153 // TODO: We handle scalars using custom code, but generic combining could make
5154 // that unnecessary.
5156 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5159 // Find the type this will be legalized too. Otherwise we might prematurely
5160 // convert this to shl+add/sub and then still have to type legalize those ops.
5161 // Another choice would be to defer the decision for illegal types until
5162 // after type legalization. But constant splat vectors of i64 can't make it
5163 // through type legalization on 32-bit targets so we would need to special
5165 while (getTypeAction(Context, VT) != TypeLegal)
5166 VT = getTypeToTransformTo(Context, VT);
5168 // If vector multiply is legal, assume that's faster than shl + add/sub.
5169 // TODO: Multiply is a complex op with higher latency and lower throughput in
5170 // most implementations, so this check could be loosened based on type
5171 // and/or a CPU attribute.
5172 if (isOperationLegal(ISD::MUL, VT))
5175 // shl+add, shl+sub, shl+add+neg
5176 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5177 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5180 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5181 unsigned Index) const {
5182 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5185 // Mask vectors support all subregister combinations and operations that
5186 // extract half of vector.
5187 if (ResVT.getVectorElementType() == MVT::i1)
5188 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5189 (Index == ResVT.getVectorNumElements()));
5191 return (Index % ResVT.getVectorNumElements()) == 0;
5194 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5195 unsigned Opc = VecOp.getOpcode();
5197 // Assume target opcodes can't be scalarized.
5198 // TODO - do we have any exceptions?
5199 if (Opc >= ISD::BUILTIN_OP_END)
5202 // If the vector op is not supported, try to convert to scalar.
5203 EVT VecVT = VecOp.getValueType();
5204 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5207 // If the vector op is supported, but the scalar op is not, the transform may
5208 // not be worthwhile.
5209 EVT ScalarVT = VecVT.getScalarType();
5210 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5213 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
5215 // TODO: Allow vectors?
5218 return VT.isSimple() || !isOperationExpand(Opcode, VT);
5221 bool X86TargetLowering::isCheapToSpeculateCttz() const {
5222 // Speculate cttz only if we can directly use TZCNT.
5223 return Subtarget.hasBMI();
5226 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5227 // Speculate ctlz only if we can directly use LZCNT.
5228 return Subtarget.hasLZCNT();
5231 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5232 const SelectionDAG &DAG,
5233 const MachineMemOperand &MMO) const {
5234 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5235 BitcastVT.getVectorElementType() == MVT::i1)
5238 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5241 // If both types are legal vectors, it's always ok to convert them.
5242 if (LoadVT.isVector() && BitcastVT.isVector() &&
5243 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5246 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5249 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5250 const SelectionDAG &DAG) const {
5251 // Do not merge to float value size (128 bytes) if no implicit
5252 // float attribute is set.
5253 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5254 Attribute::NoImplicitFloat);
5257 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5258 return (MemVT.getSizeInBits() <= MaxIntSize);
5260 // Make sure we don't merge greater than our preferred vector
5262 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5267 bool X86TargetLowering::isCtlzFast() const {
5268 return Subtarget.hasFastLZCNT();
5271 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5272 const Instruction &AndI) const {
5276 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5277 EVT VT = Y.getValueType();
5282 if (!Subtarget.hasBMI())
5285 // There are only 32-bit and 64-bit forms for 'andn'.
5286 if (VT != MVT::i32 && VT != MVT::i64)
5289 return !isa<ConstantSDNode>(Y);
5292 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5293 EVT VT = Y.getValueType();
5296 return hasAndNotCompare(Y);
5300 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5303 if (VT == MVT::v4i32)
5306 return Subtarget.hasSSE2();
5309 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5310 return X.getValueType().isScalarInteger(); // 'bt'
5313 bool X86TargetLowering::
5314 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5315 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5316 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5317 SelectionDAG &DAG) const {
5318 // Does baseline recommend not to perform the fold by default?
5319 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5320 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5322 // For scalars this transform is always beneficial.
5323 if (X.getValueType().isScalarInteger())
5325 // If all the shift amounts are identical, then transform is beneficial even
5326 // with rudimentary SSE2 shifts.
5327 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5329 // If we have AVX2 with it's powerful shift operations, then it's also good.
5330 if (Subtarget.hasAVX2())
5332 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5333 return NewShiftOpcode == ISD::SHL;
5336 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5337 const SDNode *N, CombineLevel Level) const {
5338 assert(((N->getOpcode() == ISD::SHL &&
5339 N->getOperand(0).getOpcode() == ISD::SRL) ||
5340 (N->getOpcode() == ISD::SRL &&
5341 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5342 "Expected shift-shift mask");
5343 EVT VT = N->getValueType(0);
5344 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5345 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5346 // Only fold if the shift values are equal - so it folds to AND.
5347 // TODO - we should fold if either is a non-uniform vector but we don't do
5348 // the fold for non-splats yet.
5349 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5351 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5354 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5355 EVT VT = Y.getValueType();
5357 // For vectors, we don't have a preference, but we probably want a mask.
5361 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5362 if (VT == MVT::i64 && !Subtarget.is64Bit())
5368 bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5370 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5371 !Subtarget.isOSWindows())
5376 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5377 // Any legal vector type can be splatted more efficiently than
5378 // loading/spilling from memory.
5379 return isTypeLegal(VT);
5382 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5383 MVT VT = MVT::getIntegerVT(NumBits);
5384 if (isTypeLegal(VT))
5387 // PMOVMSKB can handle this.
5388 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5391 // VPMOVMSKB can handle this.
5392 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5395 // TODO: Allow 64-bit type for 32-bit target.
5396 // TODO: 512-bit types should be allowed, but make sure that those
5397 // cases are handled in combineVectorSizedSetCCEquality().
5399 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5402 /// Val is the undef sentinel value or equal to the specified value.
5403 static bool isUndefOrEqual(int Val, int CmpVal) {
5404 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5407 /// Val is either the undef or zero sentinel value.
5408 static bool isUndefOrZero(int Val) {
5409 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5412 /// Return true if every element in Mask, beginning from position Pos and ending
5413 /// in Pos+Size is the undef sentinel value.
5414 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5415 return llvm::all_of(Mask.slice(Pos, Size),
5416 [](int M) { return M == SM_SentinelUndef; });
5419 /// Return true if the mask creates a vector whose lower half is undefined.
5420 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5421 unsigned NumElts = Mask.size();
5422 return isUndefInRange(Mask, 0, NumElts / 2);
5425 /// Return true if the mask creates a vector whose upper half is undefined.
5426 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5427 unsigned NumElts = Mask.size();
5428 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5431 /// Return true if Val falls within the specified range (L, H].
5432 static bool isInRange(int Val, int Low, int Hi) {
5433 return (Val >= Low && Val < Hi);
5436 /// Return true if the value of any element in Mask falls within the specified
5438 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5439 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5442 /// Return true if the value of any element in Mask is the zero sentinel value.
5443 static bool isAnyZero(ArrayRef<int> Mask) {
5444 return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
5447 /// Return true if the value of any element in Mask is the zero or undef
5448 /// sentinel values.
5449 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
5450 return llvm::any_of(Mask, [](int M) {
5451 return M == SM_SentinelZero || M == SM_SentinelUndef;
5455 /// Return true if Val is undef or if its value falls within the
5456 /// specified range (L, H].
5457 static bool isUndefOrInRange(int Val, int Low, int Hi) {
5458 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5461 /// Return true if every element in Mask is undef or if its value
5462 /// falls within the specified range (L, H].
5463 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5464 return llvm::all_of(
5465 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5468 /// Return true if Val is undef, zero or if its value falls within the
5469 /// specified range (L, H].
5470 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5471 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5474 /// Return true if every element in Mask is undef, zero or if its value
5475 /// falls within the specified range (L, H].
5476 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5477 return llvm::all_of(
5478 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5481 /// Return true if every element in Mask, beginning
5482 /// from position Pos and ending in Pos + Size, falls within the specified
5483 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5484 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5485 unsigned Size, int Low, int Step = 1) {
5486 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5487 if (!isUndefOrEqual(Mask[i], Low))
5492 /// Return true if every element in Mask, beginning
5493 /// from position Pos and ending in Pos+Size, falls within the specified
5494 /// sequential range (Low, Low+Size], or is undef or is zero.
5495 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5496 unsigned Size, int Low,
5498 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5499 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5504 /// Return true if every element in Mask, beginning
5505 /// from position Pos and ending in Pos+Size is undef or is zero.
5506 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5508 return llvm::all_of(Mask.slice(Pos, Size),
5509 [](int M) { return isUndefOrZero(M); });
5512 /// Helper function to test whether a shuffle mask could be
5513 /// simplified by widening the elements being shuffled.
5515 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5516 /// leaves it in an unspecified state.
5518 /// NOTE: This must handle normal vector shuffle masks and *target* vector
5519 /// shuffle masks. The latter have the special property of a '-2' representing
5520 /// a zero-ed lane of a vector.
5521 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5522 SmallVectorImpl<int> &WidenedMask) {
5523 WidenedMask.assign(Mask.size() / 2, 0);
5524 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5526 int M1 = Mask[i + 1];
5528 // If both elements are undef, its trivial.
5529 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5530 WidenedMask[i / 2] = SM_SentinelUndef;
5534 // Check for an undef mask and a mask value properly aligned to fit with
5535 // a pair of values. If we find such a case, use the non-undef mask's value.
5536 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5537 WidenedMask[i / 2] = M1 / 2;
5540 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5541 WidenedMask[i / 2] = M0 / 2;
5545 // When zeroing, we need to spread the zeroing across both lanes to widen.
5546 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5547 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5548 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5549 WidenedMask[i / 2] = SM_SentinelZero;
5555 // Finally check if the two mask values are adjacent and aligned with
5557 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5558 WidenedMask[i / 2] = M0 / 2;
5562 // Otherwise we can't safely widen the elements used in this shuffle.
5565 assert(WidenedMask.size() == Mask.size() / 2 &&
5566 "Incorrect size of mask after widening the elements!");
5571 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5572 const APInt &Zeroable,
5574 SmallVectorImpl<int> &WidenedMask) {
5575 // Create an alternative mask with info about zeroable elements.
5576 // Here we do not set undef elements as zeroable.
5577 SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
5579 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
5580 for (int i = 0, Size = Mask.size(); i != Size; ++i)
5581 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
5582 ZeroableMask[i] = SM_SentinelZero;
5584 return canWidenShuffleElements(ZeroableMask, WidenedMask);
5587 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5588 SmallVector<int, 32> WidenedMask;
5589 return canWidenShuffleElements(Mask, WidenedMask);
5592 // Attempt to narrow/widen shuffle mask until it matches the target number of
5594 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
5595 SmallVectorImpl<int> &ScaledMask) {
5596 unsigned NumSrcElts = Mask.size();
5597 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
5598 "Illegal shuffle scale factor");
5600 // Narrowing is guaranteed to work.
5601 if (NumDstElts >= NumSrcElts) {
5602 int Scale = NumDstElts / NumSrcElts;
5603 llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
5607 // We have to repeat the widening until we reach the target size, but we can
5608 // split out the first widening as it sets up ScaledMask for us.
5609 if (canWidenShuffleElements(Mask, ScaledMask)) {
5610 while (ScaledMask.size() > NumDstElts) {
5611 SmallVector<int, 16> WidenedMask;
5612 if (!canWidenShuffleElements(ScaledMask, WidenedMask))
5614 ScaledMask = std::move(WidenedMask);
5622 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
5623 bool X86::isZeroNode(SDValue Elt) {
5624 return isNullConstant(Elt) || isNullFPConstant(Elt);
5627 // Build a vector of constants.
5628 // Use an UNDEF node if MaskElt == -1.
5629 // Split 64-bit constants in the 32-bit mode.
5630 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5631 const SDLoc &dl, bool IsMask = false) {
5633 SmallVector<SDValue, 32> Ops;
5636 MVT ConstVecVT = VT;
5637 unsigned NumElts = VT.getVectorNumElements();
5638 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5639 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5640 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5644 MVT EltVT = ConstVecVT.getVectorElementType();
5645 for (unsigned i = 0; i < NumElts; ++i) {
5646 bool IsUndef = Values[i] < 0 && IsMask;
5647 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5648 DAG.getConstant(Values[i], dl, EltVT);
5649 Ops.push_back(OpNode);
5651 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5652 DAG.getConstant(0, dl, EltVT));
5654 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5656 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5660 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5661 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5662 assert(Bits.size() == Undefs.getBitWidth() &&
5663 "Unequal constant and undef arrays");
5664 SmallVector<SDValue, 32> Ops;
5667 MVT ConstVecVT = VT;
5668 unsigned NumElts = VT.getVectorNumElements();
5669 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5670 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5671 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5675 MVT EltVT = ConstVecVT.getVectorElementType();
5676 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5678 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5681 const APInt &V = Bits[i];
5682 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5684 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5685 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5686 } else if (EltVT == MVT::f32) {
5687 APFloat FV(APFloat::IEEEsingle(), V);
5688 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5689 } else if (EltVT == MVT::f64) {
5690 APFloat FV(APFloat::IEEEdouble(), V);
5691 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5693 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5697 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5698 return DAG.getBitcast(VT, ConstsNode);
5701 /// Returns a vector of specified type with all zero elements.
5702 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5703 SelectionDAG &DAG, const SDLoc &dl) {
5704 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5705 VT.getVectorElementType() == MVT::i1) &&
5706 "Unexpected vector type");
5708 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5709 // type. This ensures they get CSE'd. But if the integer type is not
5710 // available, use a floating-point +0.0 instead.
5712 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5713 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5714 } else if (VT.isFloatingPoint()) {
5715 Vec = DAG.getConstantFP(+0.0, dl, VT);
5716 } else if (VT.getVectorElementType() == MVT::i1) {
5717 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5718 "Unexpected vector type");
5719 Vec = DAG.getConstant(0, dl, VT);
5721 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5722 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5724 return DAG.getBitcast(VT, Vec);
5727 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5728 const SDLoc &dl, unsigned vectorWidth) {
5729 EVT VT = Vec.getValueType();
5730 EVT ElVT = VT.getVectorElementType();
5731 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5732 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5733 VT.getVectorNumElements()/Factor);
5735 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5736 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5737 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5739 // This is the index of the first element of the vectorWidth-bit chunk
5740 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5741 IdxVal &= ~(ElemsPerChunk - 1);
5743 // If the input is a buildvector just emit a smaller one.
5744 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5745 return DAG.getBuildVector(ResultVT, dl,
5746 Vec->ops().slice(IdxVal, ElemsPerChunk));
5748 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5749 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5752 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5753 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5754 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5755 /// instructions or a simple subregister reference. Idx is an index in the
5756 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5757 /// lowering EXTRACT_VECTOR_ELT operations easier.
5758 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5759 SelectionDAG &DAG, const SDLoc &dl) {
5760 assert((Vec.getValueType().is256BitVector() ||
5761 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5762 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5765 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5766 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5767 SelectionDAG &DAG, const SDLoc &dl) {
5768 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5769 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5772 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5773 SelectionDAG &DAG, const SDLoc &dl,
5774 unsigned vectorWidth) {
5775 assert((vectorWidth == 128 || vectorWidth == 256) &&
5776 "Unsupported vector width");
5777 // Inserting UNDEF is Result
5780 EVT VT = Vec.getValueType();
5781 EVT ElVT = VT.getVectorElementType();
5782 EVT ResultVT = Result.getValueType();
5784 // Insert the relevant vectorWidth bits.
5785 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5786 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5788 // This is the index of the first element of the vectorWidth-bit chunk
5789 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5790 IdxVal &= ~(ElemsPerChunk - 1);
5792 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5793 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5796 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5797 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5798 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5799 /// simple superregister reference. Idx is an index in the 128 bits
5800 /// we want. It need not be aligned to a 128-bit boundary. That makes
5801 /// lowering INSERT_VECTOR_ELT operations easier.
5802 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5803 SelectionDAG &DAG, const SDLoc &dl) {
5804 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5805 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5808 /// Widen a vector to a larger size with the same scalar type, with the new
5809 /// elements either zero or undef.
5810 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5811 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5813 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5814 Vec.getValueType().getScalarType() == VT.getScalarType() &&
5815 "Unsupported vector widening type");
5816 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5818 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5819 DAG.getIntPtrConstant(0, dl));
5822 /// Widen a vector to a larger size with the same scalar type, with the new
5823 /// elements either zero or undef.
5824 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5825 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5826 const SDLoc &dl, unsigned WideSizeInBits) {
5827 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5828 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5829 "Unsupported vector widening type");
5830 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5831 MVT SVT = Vec.getSimpleValueType().getScalarType();
5832 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5833 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5836 // Helper function to collect subvector ops that are concatenated together,
5837 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5838 // The subvectors in Ops are guaranteed to be the same type.
5839 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5840 assert(Ops.empty() && "Expected an empty ops vector");
5842 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5843 Ops.append(N->op_begin(), N->op_end());
5847 if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
5848 SDValue Src = N->getOperand(0);
5849 SDValue Sub = N->getOperand(1);
5850 const APInt &Idx = N->getConstantOperandAPInt(2);
5851 EVT VT = Src.getValueType();
5852 EVT SubVT = Sub.getValueType();
5854 // TODO - Handle more general insert_subvector chains.
5855 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5856 Idx == (VT.getVectorNumElements() / 2)) {
5857 // insert_subvector(insert_subvector(undef, x, lo), y, hi)
5858 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5859 Src.getOperand(1).getValueType() == SubVT &&
5860 isNullConstant(Src.getOperand(2))) {
5861 Ops.push_back(Src.getOperand(1));
5865 // insert_subvector(x, extract_subvector(x, lo), hi)
5866 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5867 Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
5877 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
5879 EVT VT = Op.getValueType();
5880 unsigned NumElems = VT.getVectorNumElements();
5881 unsigned SizeInBits = VT.getSizeInBits();
5882 assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
5883 "Can't split odd sized vector");
5885 SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
5886 SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
5887 return std::make_pair(Lo, Hi);
5890 // Split an unary integer op into 2 half sized ops.
5891 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
5892 EVT VT = Op.getValueType();
5894 // Make sure we only try to split 256/512-bit types to avoid creating
5896 assert((Op.getOperand(0).getValueType().is256BitVector() ||
5897 Op.getOperand(0).getValueType().is512BitVector()) &&
5898 (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
5899 assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
5900 VT.getVectorNumElements() &&
5905 // Extract the Lo/Hi vectors
5907 std::tie(Lo, Hi) = splitVector(Op.getOperand(0), DAG, dl);
5910 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
5911 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
5912 DAG.getNode(Op.getOpcode(), dl, LoVT, Lo),
5913 DAG.getNode(Op.getOpcode(), dl, HiVT, Hi));
5916 /// Break a binary integer operation into 2 half sized ops and then
5917 /// concatenate the result back.
5918 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
5919 EVT VT = Op.getValueType();
5921 // Sanity check that all the types match.
5922 assert(Op.getOperand(0).getValueType() == VT &&
5923 Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
5924 assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
5928 // Extract the LHS Lo/Hi vectors
5930 std::tie(LHS1, LHS2) = splitVector(Op.getOperand(0), DAG, dl);
5932 // Extract the RHS Lo/Hi vectors
5934 std::tie(RHS1, RHS2) = splitVector(Op.getOperand(1), DAG, dl);
5937 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
5938 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
5939 DAG.getNode(Op.getOpcode(), dl, LoVT, LHS1, RHS1),
5940 DAG.getNode(Op.getOpcode(), dl, HiVT, LHS2, RHS2));
5943 // Helper for splitting operands of an operation to legal target size and
5944 // apply a function on each part.
5945 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5946 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5947 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5948 // The argument Builder is a function that will be applied on each split part:
5949 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5950 template <typename F>
5951 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5952 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5953 F Builder, bool CheckBWI = true) {
5954 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5955 unsigned NumSubs = 1;
5956 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5957 (!CheckBWI && Subtarget.useAVX512Regs())) {
5958 if (VT.getSizeInBits() > 512) {
5959 NumSubs = VT.getSizeInBits() / 512;
5960 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5962 } else if (Subtarget.hasAVX2()) {
5963 if (VT.getSizeInBits() > 256) {
5964 NumSubs = VT.getSizeInBits() / 256;
5965 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5968 if (VT.getSizeInBits() > 128) {
5969 NumSubs = VT.getSizeInBits() / 128;
5970 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5975 return Builder(DAG, DL, Ops);
5977 SmallVector<SDValue, 4> Subs;
5978 for (unsigned i = 0; i != NumSubs; ++i) {
5979 SmallVector<SDValue, 2> SubOps;
5980 for (SDValue Op : Ops) {
5981 EVT OpVT = Op.getValueType();
5982 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5983 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5984 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5986 Subs.push_back(Builder(DAG, DL, SubOps));
5988 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5991 /// Insert i1-subvector to i1-vector.
5992 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5993 const X86Subtarget &Subtarget) {
5996 SDValue Vec = Op.getOperand(0);
5997 SDValue SubVec = Op.getOperand(1);
5998 SDValue Idx = Op.getOperand(2);
5999 unsigned IdxVal = Op.getConstantOperandVal(2);
6001 // Inserting undef is a nop. We can just return the original vector.
6002 if (SubVec.isUndef())
6005 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
6008 MVT OpVT = Op.getSimpleValueType();
6009 unsigned NumElems = OpVT.getVectorNumElements();
6010 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6012 // Extend to natively supported kshift.
6013 MVT WideOpVT = OpVT;
6014 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
6015 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
6017 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
6019 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
6020 // May need to promote to a legal type.
6021 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6022 DAG.getConstant(0, dl, WideOpVT),
6024 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6027 MVT SubVecVT = SubVec.getSimpleValueType();
6028 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
6029 assert(IdxVal + SubVecNumElems <= NumElems &&
6030 IdxVal % SubVecVT.getSizeInBits() == 0 &&
6031 "Unexpected index value in INSERT_SUBVECTOR");
6033 SDValue Undef = DAG.getUNDEF(WideOpVT);
6036 // Zero lower bits of the Vec
6037 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
6038 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
6040 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6041 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6042 // Merge them together, SubVec should be zero extended.
6043 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6044 DAG.getConstant(0, dl, WideOpVT),
6046 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6047 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6050 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6051 Undef, SubVec, ZeroIdx);
6053 if (Vec.isUndef()) {
6054 assert(IdxVal != 0 && "Unexpected index");
6055 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6056 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6057 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6060 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
6061 assert(IdxVal != 0 && "Unexpected index");
6062 NumElems = WideOpVT.getVectorNumElements();
6063 unsigned ShiftLeft = NumElems - SubVecNumElems;
6064 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6065 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6066 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6067 if (ShiftRight != 0)
6068 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6069 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6070 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6073 // Simple case when we put subvector in the upper part
6074 if (IdxVal + SubVecNumElems == NumElems) {
6075 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6076 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6077 if (SubVecNumElems * 2 == NumElems) {
6078 // Special case, use legal zero extending insert_subvector. This allows
6079 // isel to optimize when bits are known zero.
6080 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
6081 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6082 DAG.getConstant(0, dl, WideOpVT),
6085 // Otherwise use explicit shifts to zero the bits.
6086 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6087 Undef, Vec, ZeroIdx);
6088 NumElems = WideOpVT.getVectorNumElements();
6089 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
6090 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6091 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6093 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6094 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6097 // Inserting into the middle is more complicated.
6099 NumElems = WideOpVT.getVectorNumElements();
6101 // Widen the vector if needed.
6102 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
6104 unsigned ShiftLeft = NumElems - SubVecNumElems;
6105 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6107 // Do an optimization for the the most frequently used types.
6108 if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
6109 APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
6110 Mask0.flipAllBits();
6111 SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
6112 SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
6113 Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
6114 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6115 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6116 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6117 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6118 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6120 // Reduce to original width if needed.
6121 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6124 // Clear the upper bits of the subvector and move it to its insert position.
6125 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6126 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6127 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6128 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6130 // Isolate the bits below the insertion point.
6131 unsigned LowShift = NumElems - IdxVal;
6132 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
6133 DAG.getTargetConstant(LowShift, dl, MVT::i8));
6134 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
6135 DAG.getTargetConstant(LowShift, dl, MVT::i8));
6137 // Isolate the bits after the last inserted bit.
6138 unsigned HighShift = IdxVal + SubVecNumElems;
6139 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
6140 DAG.getTargetConstant(HighShift, dl, MVT::i8));
6141 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
6142 DAG.getTargetConstant(HighShift, dl, MVT::i8));
6144 // Now OR all 3 pieces together.
6145 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
6146 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
6148 // Reduce to original width if needed.
6149 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6152 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
6154 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
6155 EVT SubVT = V1.getValueType();
6156 EVT SubSVT = SubVT.getScalarType();
6157 unsigned SubNumElts = SubVT.getVectorNumElements();
6158 unsigned SubVectorWidth = SubVT.getSizeInBits();
6159 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
6160 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
6161 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
6164 /// Returns a vector of specified type with all bits set.
6165 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
6166 /// Then bitcast to their original type, ensuring they get CSE'd.
6167 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6168 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6169 "Expected a 128/256/512-bit vector type");
6171 APInt Ones = APInt::getAllOnesValue(32);
6172 unsigned NumElts = VT.getSizeInBits() / 32;
6173 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
6174 return DAG.getBitcast(VT, Vec);
6177 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
6178 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
6180 case ISD::ANY_EXTEND:
6181 case ISD::ANY_EXTEND_VECTOR_INREG:
6182 return ISD::ANY_EXTEND_VECTOR_INREG;
6183 case ISD::ZERO_EXTEND:
6184 case ISD::ZERO_EXTEND_VECTOR_INREG:
6185 return ISD::ZERO_EXTEND_VECTOR_INREG;
6186 case ISD::SIGN_EXTEND:
6187 case ISD::SIGN_EXTEND_VECTOR_INREG:
6188 return ISD::SIGN_EXTEND_VECTOR_INREG;
6190 llvm_unreachable("Unknown opcode");
6193 static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
6194 SDValue In, SelectionDAG &DAG) {
6195 EVT InVT = In.getValueType();
6196 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
6197 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
6198 ISD::ZERO_EXTEND == Opcode) &&
6199 "Unknown extension opcode");
6201 // For 256-bit vectors, we only need the lower (128-bit) input half.
6202 // For 512-bit vectors, we only need the lower input half or quarter.
6203 if (InVT.getSizeInBits() > 128) {
6204 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
6205 "Expected VTs to be the same size!");
6206 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
6207 In = extractSubVector(In, 0, DAG, DL,
6208 std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
6209 InVT = In.getValueType();
6212 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
6213 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
6215 return DAG.getNode(Opcode, DL, VT, In);
6218 // Match (xor X, -1) -> X.
6219 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
6220 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
6221 static SDValue IsNOT(SDValue V, SelectionDAG &DAG, bool OneUse = false) {
6222 V = OneUse ? peekThroughOneUseBitcasts(V) : peekThroughBitcasts(V);
6223 if (V.getOpcode() == ISD::XOR &&
6224 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
6225 return V.getOperand(0);
6226 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6227 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
6228 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
6229 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
6230 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
6231 Not, V.getOperand(1));
6234 SmallVector<SDValue, 2> CatOps;
6235 if (collectConcatOps(V.getNode(), CatOps)) {
6236 for (SDValue &CatOp : CatOps) {
6237 SDValue NotCat = IsNOT(CatOp, DAG);
6238 if (!NotCat) return SDValue();
6239 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
6241 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
6246 void llvm::createUnpackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6247 bool Lo, bool Unary) {
6248 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6249 int NumElts = VT.getVectorNumElements();
6250 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
6251 for (int i = 0; i < NumElts; ++i) {
6252 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
6253 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
6254 Pos += (Unary ? 0 : NumElts * (i % 2));
6255 Pos += (Lo ? 0 : NumEltsInLane / 2);
6256 Mask.push_back(Pos);
6260 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
6261 /// imposed by AVX and specific to the unary pattern. Example:
6262 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
6263 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
6264 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6266 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6267 int NumElts = VT.getVectorNumElements();
6268 for (int i = 0; i < NumElts; ++i) {
6270 Pos += (Lo ? 0 : NumElts / 2);
6271 Mask.push_back(Pos);
6275 /// Returns a vector_shuffle node for an unpackl operation.
6276 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6277 SDValue V1, SDValue V2) {
6278 SmallVector<int, 8> Mask;
6279 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
6280 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6283 /// Returns a vector_shuffle node for an unpackh operation.
6284 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6285 SDValue V1, SDValue V2) {
6286 SmallVector<int, 8> Mask;
6287 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
6288 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6291 /// Return a vector_shuffle of the specified vector of zero or undef vector.
6292 /// This produces a shuffle where the low element of V2 is swizzled into the
6293 /// zero/undef vector, landing at element Idx.
6294 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
6295 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
6297 const X86Subtarget &Subtarget,
6298 SelectionDAG &DAG) {
6299 MVT VT = V2.getSimpleValueType();
6301 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
6302 int NumElems = VT.getVectorNumElements();
6303 SmallVector<int, 16> MaskVec(NumElems);
6304 for (int i = 0; i != NumElems; ++i)
6305 // If this is the insertion idx, put the low elt of V2 here.
6306 MaskVec[i] = (i == Idx) ? NumElems : i;
6307 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
6310 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
6311 if (Ptr.getOpcode() == X86ISD::Wrapper ||
6312 Ptr.getOpcode() == X86ISD::WrapperRIP)
6313 Ptr = Ptr.getOperand(0);
6315 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6316 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
6319 return CNode->getConstVal();
6322 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
6323 if (!Load || !ISD::isNormalLoad(Load))
6325 return getTargetConstantFromBasePtr(Load->getBasePtr());
6328 static const Constant *getTargetConstantFromNode(SDValue Op) {
6329 Op = peekThroughBitcasts(Op);
6330 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
6334 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
6335 assert(LD && "Unexpected null LoadSDNode");
6336 return getTargetConstantFromNode(LD);
6339 // Extract raw constant bits from constant pools.
6340 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
6342 SmallVectorImpl<APInt> &EltBits,
6343 bool AllowWholeUndefs = true,
6344 bool AllowPartialUndefs = true) {
6345 assert(EltBits.empty() && "Expected an empty EltBits vector");
6347 Op = peekThroughBitcasts(Op);
6349 EVT VT = Op.getValueType();
6350 unsigned SizeInBits = VT.getSizeInBits();
6351 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
6352 unsigned NumElts = SizeInBits / EltSizeInBits;
6354 // Bitcast a source array of element bits to the target size.
6355 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
6356 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
6357 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
6358 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
6359 "Constant bit sizes don't match");
6361 // Don't split if we don't allow undef bits.
6362 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
6363 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
6366 // If we're already the right size, don't bother bitcasting.
6367 if (NumSrcElts == NumElts) {
6368 UndefElts = UndefSrcElts;
6369 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
6373 // Extract all the undef/constant element data and pack into single bitsets.
6374 APInt UndefBits(SizeInBits, 0);
6375 APInt MaskBits(SizeInBits, 0);
6377 for (unsigned i = 0; i != NumSrcElts; ++i) {
6378 unsigned BitOffset = i * SrcEltSizeInBits;
6379 if (UndefSrcElts[i])
6380 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
6381 MaskBits.insertBits(SrcEltBits[i], BitOffset);
6384 // Split the undef/constant single bitset data into the target elements.
6385 UndefElts = APInt(NumElts, 0);
6386 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6388 for (unsigned i = 0; i != NumElts; ++i) {
6389 unsigned BitOffset = i * EltSizeInBits;
6390 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6392 // Only treat an element as UNDEF if all bits are UNDEF.
6393 if (UndefEltBits.isAllOnesValue()) {
6394 if (!AllowWholeUndefs)
6396 UndefElts.setBit(i);
6400 // If only some bits are UNDEF then treat them as zero (or bail if not
6402 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6405 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6410 // Collect constant bits and insert into mask/undef bit masks.
6411 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6412 unsigned UndefBitIndex) {
6415 if (isa<UndefValue>(Cst)) {
6416 Undefs.setBit(UndefBitIndex);
6419 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6420 Mask = CInt->getValue();
6423 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6424 Mask = CFP->getValueAPF().bitcastToAPInt();
6432 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6433 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6434 return CastBitData(UndefSrcElts, SrcEltBits);
6437 // Extract scalar constant bits.
6438 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6439 APInt UndefSrcElts = APInt::getNullValue(1);
6440 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6441 return CastBitData(UndefSrcElts, SrcEltBits);
6443 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6444 APInt UndefSrcElts = APInt::getNullValue(1);
6445 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6446 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6447 return CastBitData(UndefSrcElts, SrcEltBits);
6450 // Extract constant bits from build vector.
6451 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6452 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6453 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6455 APInt UndefSrcElts(NumSrcElts, 0);
6456 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6457 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6458 const SDValue &Src = Op.getOperand(i);
6459 if (Src.isUndef()) {
6460 UndefSrcElts.setBit(i);
6463 auto *Cst = cast<ConstantSDNode>(Src);
6464 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6466 return CastBitData(UndefSrcElts, SrcEltBits);
6468 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6469 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6470 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6472 APInt UndefSrcElts(NumSrcElts, 0);
6473 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6474 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6475 const SDValue &Src = Op.getOperand(i);
6476 if (Src.isUndef()) {
6477 UndefSrcElts.setBit(i);
6480 auto *Cst = cast<ConstantFPSDNode>(Src);
6481 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6482 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6484 return CastBitData(UndefSrcElts, SrcEltBits);
6487 // Extract constant bits from constant pool vector.
6488 if (auto *Cst = getTargetConstantFromNode(Op)) {
6489 Type *CstTy = Cst->getType();
6490 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6491 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6494 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6495 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6497 APInt UndefSrcElts(NumSrcElts, 0);
6498 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6499 for (unsigned i = 0; i != NumSrcElts; ++i)
6500 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6504 return CastBitData(UndefSrcElts, SrcEltBits);
6507 // Extract constant bits from a broadcasted constant pool scalar.
6508 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6509 EltSizeInBits <= VT.getScalarSizeInBits()) {
6510 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6511 if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6514 SDValue Ptr = MemIntr->getBasePtr();
6515 if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
6516 unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6517 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6519 APInt UndefSrcElts(NumSrcElts, 0);
6520 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6521 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6522 if (UndefSrcElts[0])
6523 UndefSrcElts.setBits(0, NumSrcElts);
6524 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6525 return CastBitData(UndefSrcElts, SrcEltBits);
6530 // Extract constant bits from a subvector broadcast.
6531 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6532 SmallVector<APInt, 16> SubEltBits;
6533 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6534 UndefElts, SubEltBits, AllowWholeUndefs,
6535 AllowPartialUndefs)) {
6536 UndefElts = APInt::getSplat(NumElts, UndefElts);
6537 while (EltBits.size() < NumElts)
6538 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6543 // Extract a rematerialized scalar constant insertion.
6544 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6545 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6546 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6547 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6548 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6550 APInt UndefSrcElts(NumSrcElts, 0);
6551 SmallVector<APInt, 64> SrcEltBits;
6552 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6553 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6554 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6555 return CastBitData(UndefSrcElts, SrcEltBits);
6558 // Insert constant bits from a base and sub vector sources.
6559 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
6560 // TODO - support insert_subvector through bitcasts.
6561 if (EltSizeInBits != VT.getScalarSizeInBits())
6565 SmallVector<APInt, 32> EltSubBits;
6566 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6567 UndefSubElts, EltSubBits,
6568 AllowWholeUndefs, AllowPartialUndefs) &&
6569 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6570 UndefElts, EltBits, AllowWholeUndefs,
6571 AllowPartialUndefs)) {
6572 unsigned BaseIdx = Op.getConstantOperandVal(2);
6573 UndefElts.insertBits(UndefSubElts, BaseIdx);
6574 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6575 EltBits[BaseIdx + i] = EltSubBits[i];
6580 // Extract constant bits from a subvector's source.
6581 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
6582 // TODO - support extract_subvector through bitcasts.
6583 if (EltSizeInBits != VT.getScalarSizeInBits())
6586 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6587 UndefElts, EltBits, AllowWholeUndefs,
6588 AllowPartialUndefs)) {
6589 EVT SrcVT = Op.getOperand(0).getValueType();
6590 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6591 unsigned NumSubElts = VT.getVectorNumElements();
6592 unsigned BaseIdx = Op.getConstantOperandVal(1);
6593 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6594 if ((BaseIdx + NumSubElts) != NumSrcElts)
6595 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6597 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6602 // Extract constant bits from shuffle node sources.
6603 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6604 // TODO - support shuffle through bitcasts.
6605 if (EltSizeInBits != VT.getScalarSizeInBits())
6608 ArrayRef<int> Mask = SVN->getMask();
6609 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6610 llvm::any_of(Mask, [](int M) { return M < 0; }))
6613 APInt UndefElts0, UndefElts1;
6614 SmallVector<APInt, 32> EltBits0, EltBits1;
6615 if (isAnyInRange(Mask, 0, NumElts) &&
6616 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6617 UndefElts0, EltBits0, AllowWholeUndefs,
6618 AllowPartialUndefs))
6620 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6621 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6622 UndefElts1, EltBits1, AllowWholeUndefs,
6623 AllowPartialUndefs))
6626 UndefElts = APInt::getNullValue(NumElts);
6627 for (int i = 0; i != (int)NumElts; ++i) {
6630 UndefElts.setBit(i);
6631 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6632 } else if (M < (int)NumElts) {
6634 UndefElts.setBit(i);
6635 EltBits.push_back(EltBits0[M]);
6637 if (UndefElts1[M - NumElts])
6638 UndefElts.setBit(i);
6639 EltBits.push_back(EltBits1[M - NumElts]);
6650 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
6652 SmallVector<APInt, 16> EltBits;
6653 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6654 UndefElts, EltBits, true,
6655 AllowPartialUndefs)) {
6656 int SplatIndex = -1;
6657 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6660 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6666 if (0 <= SplatIndex) {
6667 SplatVal = EltBits[SplatIndex];
6677 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6678 unsigned MaskEltSizeInBits,
6679 SmallVectorImpl<uint64_t> &RawMask,
6681 // Extract the raw target constant bits.
6682 SmallVector<APInt, 64> EltBits;
6683 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6684 EltBits, /* AllowWholeUndefs */ true,
6685 /* AllowPartialUndefs */ false))
6688 // Insert the extracted elements into the mask.
6689 for (APInt Elt : EltBits)
6690 RawMask.push_back(Elt.getZExtValue());
6695 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6696 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
6697 /// Note: This ignores saturation, so inputs must be checked first.
6698 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6699 bool Unary, unsigned NumStages = 1) {
6700 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6701 unsigned NumElts = VT.getVectorNumElements();
6702 unsigned NumLanes = VT.getSizeInBits() / 128;
6703 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6704 unsigned Offset = Unary ? 0 : NumElts;
6705 unsigned Repetitions = 1u << (NumStages - 1);
6706 unsigned Increment = 1u << NumStages;
6707 assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
6709 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6710 for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
6711 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
6712 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6713 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
6714 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6719 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
6720 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6721 APInt &DemandedLHS, APInt &DemandedRHS) {
6722 int NumLanes = VT.getSizeInBits() / 128;
6723 int NumElts = DemandedElts.getBitWidth();
6724 int NumInnerElts = NumElts / 2;
6725 int NumEltsPerLane = NumElts / NumLanes;
6726 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6728 DemandedLHS = APInt::getNullValue(NumInnerElts);
6729 DemandedRHS = APInt::getNullValue(NumInnerElts);
6731 // Map DemandedElts to the packed operands.
6732 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6733 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6734 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6735 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6736 if (DemandedElts[OuterIdx])
6737 DemandedLHS.setBit(InnerIdx);
6738 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6739 DemandedRHS.setBit(InnerIdx);
6744 // Split the demanded elts of a HADD/HSUB node between its operands.
6745 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6746 APInt &DemandedLHS, APInt &DemandedRHS) {
6747 int NumLanes = VT.getSizeInBits() / 128;
6748 int NumElts = DemandedElts.getBitWidth();
6749 int NumEltsPerLane = NumElts / NumLanes;
6750 int HalfEltsPerLane = NumEltsPerLane / 2;
6752 DemandedLHS = APInt::getNullValue(NumElts);
6753 DemandedRHS = APInt::getNullValue(NumElts);
6755 // Map DemandedElts to the horizontal operands.
6756 for (int Idx = 0; Idx != NumElts; ++Idx) {
6757 if (!DemandedElts[Idx])
6759 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6760 int LocalIdx = Idx % NumEltsPerLane;
6761 if (LocalIdx < HalfEltsPerLane) {
6762 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6763 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6765 LocalIdx -= HalfEltsPerLane;
6766 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6767 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6772 /// Calculates the shuffle mask corresponding to the target-specific opcode.
6773 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6774 /// operands in \p Ops, and returns true.
6775 /// Sets \p IsUnary to true if only one source is used. Note that this will set
6776 /// IsUnary for shuffles which use a single input multiple times, and in those
6777 /// cases it will adjust the mask to only have indices within that single input.
6778 /// It is an error to call this with non-empty Mask/Ops vectors.
6779 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6780 SmallVectorImpl<SDValue> &Ops,
6781 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6782 unsigned NumElems = VT.getVectorNumElements();
6783 unsigned MaskEltSize = VT.getScalarSizeInBits();
6784 SmallVector<uint64_t, 32> RawMask;
6788 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6789 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6792 bool IsFakeUnary = false;
6793 switch (N->getOpcode()) {
6794 case X86ISD::BLENDI:
6795 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6796 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6797 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6798 DecodeBLENDMask(NumElems, ImmN, Mask);
6799 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6802 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6803 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6804 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6805 DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
6806 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6808 case X86ISD::INSERTPS:
6809 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6810 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6811 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6812 DecodeINSERTPSMask(ImmN, Mask);
6813 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6815 case X86ISD::EXTRQI:
6816 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6817 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6818 isa<ConstantSDNode>(N->getOperand(2))) {
6819 int BitLen = N->getConstantOperandVal(1);
6820 int BitIdx = N->getConstantOperandVal(2);
6821 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6825 case X86ISD::INSERTQI:
6826 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6827 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6828 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6829 isa<ConstantSDNode>(N->getOperand(3))) {
6830 int BitLen = N->getConstantOperandVal(2);
6831 int BitIdx = N->getConstantOperandVal(3);
6832 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6833 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6836 case X86ISD::UNPCKH:
6837 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6838 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6839 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6840 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6842 case X86ISD::UNPCKL:
6843 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6844 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6845 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6846 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6848 case X86ISD::MOVHLPS:
6849 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6850 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6851 DecodeMOVHLPSMask(NumElems, Mask);
6852 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6854 case X86ISD::MOVLHPS:
6855 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6856 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6857 DecodeMOVLHPSMask(NumElems, Mask);
6858 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6860 case X86ISD::VALIGN:
6861 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
6862 "Only 32-bit and 64-bit elements are supported!");
6863 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6864 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6865 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6866 DecodeVALIGNMask(NumElems, ImmN, Mask);
6867 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6868 Ops.push_back(N->getOperand(1));
6869 Ops.push_back(N->getOperand(0));
6871 case X86ISD::PALIGNR:
6872 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6873 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6874 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6875 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6876 DecodePALIGNRMask(NumElems, ImmN, Mask);
6877 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6878 Ops.push_back(N->getOperand(1));
6879 Ops.push_back(N->getOperand(0));
6881 case X86ISD::VSHLDQ:
6882 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6883 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6884 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6885 DecodePSLLDQMask(NumElems, ImmN, Mask);
6888 case X86ISD::VSRLDQ:
6889 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6890 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6891 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6892 DecodePSRLDQMask(NumElems, ImmN, Mask);
6895 case X86ISD::PSHUFD:
6896 case X86ISD::VPERMILPI:
6897 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6898 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6899 DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
6902 case X86ISD::PSHUFHW:
6903 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6904 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6905 DecodePSHUFHWMask(NumElems, ImmN, Mask);
6908 case X86ISD::PSHUFLW:
6909 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6910 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6911 DecodePSHUFLWMask(NumElems, ImmN, Mask);
6914 case X86ISD::VZEXT_MOVL:
6915 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6916 DecodeZeroMoveLowMask(NumElems, Mask);
6919 case X86ISD::VBROADCAST:
6920 // We only decode broadcasts of same-sized vectors, peeking through to
6921 // extracted subvectors is likely to cause hasOneUse issues with
6922 // SimplifyDemandedBits etc.
6923 if (N->getOperand(0).getValueType() == VT) {
6924 DecodeVectorBroadcast(NumElems, Mask);
6929 case X86ISD::VPERMILPV: {
6930 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6932 SDValue MaskNode = N->getOperand(1);
6933 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6935 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6940 case X86ISD::PSHUFB: {
6941 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6942 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6943 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6945 SDValue MaskNode = N->getOperand(1);
6946 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6947 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6952 case X86ISD::VPERMI:
6953 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6954 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6955 DecodeVPERMMask(NumElems, ImmN, Mask);
6960 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6961 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6962 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6964 case X86ISD::VPERM2X128:
6965 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6966 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6967 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6968 DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
6969 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6971 case X86ISD::SHUF128:
6972 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6973 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6974 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6975 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
6976 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6978 case X86ISD::MOVSLDUP:
6979 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6980 DecodeMOVSLDUPMask(NumElems, Mask);
6983 case X86ISD::MOVSHDUP:
6984 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6985 DecodeMOVSHDUPMask(NumElems, Mask);
6988 case X86ISD::MOVDDUP:
6989 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6990 DecodeMOVDDUPMask(NumElems, Mask);
6993 case X86ISD::VPERMIL2: {
6994 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6995 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6996 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6997 SDValue MaskNode = N->getOperand(2);
6998 SDValue CtrlNode = N->getOperand(3);
6999 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
7000 unsigned CtrlImm = CtrlOp->getZExtValue();
7001 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7003 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
7010 case X86ISD::VPPERM: {
7011 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7012 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7013 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7014 SDValue MaskNode = N->getOperand(2);
7015 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7016 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
7021 case X86ISD::VPERMV: {
7022 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7024 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
7025 Ops.push_back(N->getOperand(1));
7026 SDValue MaskNode = N->getOperand(0);
7027 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7029 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
7034 case X86ISD::VPERMV3: {
7035 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7036 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
7037 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
7038 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
7039 Ops.push_back(N->getOperand(0));
7040 Ops.push_back(N->getOperand(2));
7041 SDValue MaskNode = N->getOperand(1);
7042 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7044 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
7049 default: llvm_unreachable("unknown target shuffle node");
7052 // Empty mask indicates the decode failed.
7056 // Check if we're getting a shuffle mask with zero'd elements.
7057 if (!AllowSentinelZero && isAnyZero(Mask))
7060 // If we have a fake unary shuffle, the shuffle mask is spread across two
7061 // inputs that are actually the same node. Re-map the mask to always point
7062 // into the first input.
7065 if (M >= (int)Mask.size())
7068 // If we didn't already add operands in the opcode-specific code, default to
7069 // adding 1 or 2 operands starting at 0.
7071 Ops.push_back(N->getOperand(0));
7072 if (!IsUnary || IsFakeUnary)
7073 Ops.push_back(N->getOperand(1));
7079 /// Compute whether each element of a shuffle is zeroable.
7081 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7082 /// Either it is an undef element in the shuffle mask, the element of the input
7083 /// referenced is undef, or the element of the input referenced is known to be
7084 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7085 /// as many lanes with this technique as possible to simplify the remaining
7087 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
7088 SDValue V1, SDValue V2,
7089 APInt &KnownUndef, APInt &KnownZero) {
7090 int Size = Mask.size();
7091 KnownUndef = KnownZero = APInt::getNullValue(Size);
7093 V1 = peekThroughBitcasts(V1);
7094 V2 = peekThroughBitcasts(V2);
7096 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7097 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7099 int VectorSizeInBits = V1.getValueSizeInBits();
7100 int ScalarSizeInBits = VectorSizeInBits / Size;
7101 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
7103 for (int i = 0; i < Size; ++i) {
7105 // Handle the easy cases.
7107 KnownUndef.setBit(i);
7110 if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7111 KnownZero.setBit(i);
7115 // Determine shuffle input and normalize the mask.
7116 SDValue V = M < Size ? V1 : V2;
7119 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
7120 if (V.getOpcode() != ISD::BUILD_VECTOR)
7123 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
7124 // the (larger) source element must be UNDEF/ZERO.
7125 if ((Size % V.getNumOperands()) == 0) {
7126 int Scale = Size / V->getNumOperands();
7127 SDValue Op = V.getOperand(M / Scale);
7129 KnownUndef.setBit(i);
7130 if (X86::isZeroNode(Op))
7131 KnownZero.setBit(i);
7132 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
7133 APInt Val = Cst->getAPIntValue();
7134 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
7136 KnownZero.setBit(i);
7137 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7138 APInt Val = Cst->getValueAPF().bitcastToAPInt();
7139 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
7141 KnownZero.setBit(i);
7146 // If the BUILD_VECTOR has more elements then all the (smaller) source
7147 // elements must be UNDEF or ZERO.
7148 if ((V.getNumOperands() % Size) == 0) {
7149 int Scale = V->getNumOperands() / Size;
7150 bool AllUndef = true;
7151 bool AllZero = true;
7152 for (int j = 0; j < Scale; ++j) {
7153 SDValue Op = V.getOperand((M * Scale) + j);
7154 AllUndef &= Op.isUndef();
7155 AllZero &= X86::isZeroNode(Op);
7158 KnownUndef.setBit(i);
7160 KnownZero.setBit(i);
7166 /// Decode a target shuffle mask and inputs and see if any values are
7167 /// known to be undef or zero from their inputs.
7168 /// Returns true if the target shuffle mask was decoded.
7169 /// FIXME: Merge this with computeZeroableShuffleElements?
7170 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
7171 SmallVectorImpl<SDValue> &Ops,
7172 APInt &KnownUndef, APInt &KnownZero) {
7174 if (!isTargetShuffle(N.getOpcode()))
7177 MVT VT = N.getSimpleValueType();
7178 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
7181 int Size = Mask.size();
7182 SDValue V1 = Ops[0];
7183 SDValue V2 = IsUnary ? V1 : Ops[1];
7184 KnownUndef = KnownZero = APInt::getNullValue(Size);
7186 V1 = peekThroughBitcasts(V1);
7187 V2 = peekThroughBitcasts(V2);
7189 assert((VT.getSizeInBits() % Size) == 0 &&
7190 "Illegal split of shuffle value type");
7191 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
7193 // Extract known constant input data.
7194 APInt UndefSrcElts[2];
7195 SmallVector<APInt, 32> SrcEltBits[2];
7196 bool IsSrcConstant[2] = {
7197 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
7198 SrcEltBits[0], true, false),
7199 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
7200 SrcEltBits[1], true, false)};
7202 for (int i = 0; i < Size; ++i) {
7205 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
7207 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
7208 if (SM_SentinelUndef == M)
7209 KnownUndef.setBit(i);
7210 if (SM_SentinelZero == M)
7211 KnownZero.setBit(i);
7215 // Determine shuffle input and normalize the mask.
7216 unsigned SrcIdx = M / Size;
7217 SDValue V = M < Size ? V1 : V2;
7220 // We are referencing an UNDEF input.
7222 KnownUndef.setBit(i);
7226 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
7227 // TODO: We currently only set UNDEF for integer types - floats use the same
7228 // registers as vectors and many of the scalar folded loads rely on the
7229 // SCALAR_TO_VECTOR pattern.
7230 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
7231 (Size % V.getValueType().getVectorNumElements()) == 0) {
7232 int Scale = Size / V.getValueType().getVectorNumElements();
7233 int Idx = M / Scale;
7234 if (Idx != 0 && !VT.isFloatingPoint())
7235 KnownUndef.setBit(i);
7236 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
7237 KnownZero.setBit(i);
7241 // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
7243 if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
7244 SDValue Vec = V.getOperand(0);
7245 int NumVecElts = Vec.getValueType().getVectorNumElements();
7246 if (Vec.isUndef() && Size == NumVecElts) {
7247 int Idx = V.getConstantOperandVal(2);
7248 int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
7249 if (M < Idx || (Idx + NumSubElts) <= M)
7250 KnownUndef.setBit(i);
7255 // Attempt to extract from the source's constant bits.
7256 if (IsSrcConstant[SrcIdx]) {
7257 if (UndefSrcElts[SrcIdx][M])
7258 KnownUndef.setBit(i);
7259 else if (SrcEltBits[SrcIdx][M] == 0)
7260 KnownZero.setBit(i);
7264 assert(VT.getVectorNumElements() == (unsigned)Size &&
7265 "Different mask size from vector size!");
7269 // Replace target shuffle mask elements with known undef/zero sentinels.
7270 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
7271 const APInt &KnownUndef,
7272 const APInt &KnownZero,
7273 bool ResolveKnownZeros= true) {
7274 unsigned NumElts = Mask.size();
7275 assert(KnownUndef.getBitWidth() == NumElts &&
7276 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
7278 for (unsigned i = 0; i != NumElts; ++i) {
7280 Mask[i] = SM_SentinelUndef;
7281 else if (ResolveKnownZeros && KnownZero[i])
7282 Mask[i] = SM_SentinelZero;
7286 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
7287 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
7290 unsigned NumElts = Mask.size();
7291 KnownUndef = KnownZero = APInt::getNullValue(NumElts);
7293 for (unsigned i = 0; i != NumElts; ++i) {
7295 if (SM_SentinelUndef == M)
7296 KnownUndef.setBit(i);
7297 if (SM_SentinelZero == M)
7298 KnownZero.setBit(i);
7302 // Forward declaration (for getFauxShuffleMask recursive check).
7303 // TODO: Use DemandedElts variant.
7304 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7305 SmallVectorImpl<int> &Mask,
7306 const SelectionDAG &DAG, unsigned Depth,
7307 bool ResolveKnownElts);
7309 // Attempt to decode ops that could be represented as a shuffle mask.
7310 // The decoded shuffle mask may contain a different number of elements to the
7311 // destination value type.
7312 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
7313 SmallVectorImpl<int> &Mask,
7314 SmallVectorImpl<SDValue> &Ops,
7315 const SelectionDAG &DAG, unsigned Depth,
7316 bool ResolveKnownElts) {
7320 MVT VT = N.getSimpleValueType();
7321 unsigned NumElts = VT.getVectorNumElements();
7322 unsigned NumSizeInBits = VT.getSizeInBits();
7323 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
7324 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
7326 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
7327 unsigned NumSizeInBytes = NumSizeInBits / 8;
7328 unsigned NumBytesPerElt = NumBitsPerElt / 8;
7330 unsigned Opcode = N.getOpcode();
7332 case ISD::VECTOR_SHUFFLE: {
7333 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
7334 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
7335 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
7336 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
7337 Ops.push_back(N.getOperand(0));
7338 Ops.push_back(N.getOperand(1));
7344 case X86ISD::ANDNP: {
7345 // Attempt to decode as a per-byte mask.
7347 SmallVector<APInt, 32> EltBits;
7348 SDValue N0 = N.getOperand(0);
7349 SDValue N1 = N.getOperand(1);
7350 bool IsAndN = (X86ISD::ANDNP == Opcode);
7351 uint64_t ZeroMask = IsAndN ? 255 : 0;
7352 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
7354 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
7356 Mask.push_back(SM_SentinelUndef);
7359 const APInt &ByteBits = EltBits[i];
7360 if (ByteBits != 0 && ByteBits != 255)
7362 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
7364 Ops.push_back(IsAndN ? N1 : N0);
7368 // Inspect each operand at the byte level. We can merge these into a
7369 // blend shuffle mask if for each byte at least one is masked out (zero).
7371 DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
7373 DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
7374 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
7375 bool IsByteMask = true;
7376 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
7377 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
7378 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
7379 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
7380 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
7381 if (LHS == 255 && RHS == 0)
7382 SelectMask.setBit(i);
7383 else if (LHS == 255 && RHS == 255)
7385 else if (!(LHS == 0 && RHS == 255))
7389 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
7390 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
7391 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
7392 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
7393 Mask.push_back(Idx);
7396 Ops.push_back(N.getOperand(0));
7397 Ops.push_back(N.getOperand(1));
7402 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
7403 // is a valid shuffle index.
7404 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
7405 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
7406 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
7408 SmallVector<int, 64> SrcMask0, SrcMask1;
7409 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
7410 if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
7412 !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
7416 // Shuffle inputs must be the same size as the result.
7417 if (llvm::any_of(SrcInputs0, [VT](SDValue Op) {
7418 return VT.getSizeInBits() != Op.getValueSizeInBits();
7421 if (llvm::any_of(SrcInputs1, [VT](SDValue Op) {
7422 return VT.getSizeInBits() != Op.getValueSizeInBits();
7426 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
7427 SmallVector<int, 64> Mask0, Mask1;
7428 narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
7429 narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
7430 for (size_t i = 0; i != MaskSize; ++i) {
7431 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
7432 Mask.push_back(SM_SentinelUndef);
7433 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
7434 Mask.push_back(SM_SentinelZero);
7435 else if (Mask1[i] == SM_SentinelZero)
7436 Mask.push_back(Mask0[i]);
7437 else if (Mask0[i] == SM_SentinelZero)
7438 Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
7442 Ops.append(SrcInputs0.begin(), SrcInputs0.end());
7443 Ops.append(SrcInputs1.begin(), SrcInputs1.end());
7446 case ISD::INSERT_SUBVECTOR: {
7447 SDValue Src = N.getOperand(0);
7448 SDValue Sub = N.getOperand(1);
7449 EVT SubVT = Sub.getValueType();
7450 unsigned NumSubElts = SubVT.getVectorNumElements();
7451 if (!N->isOnlyUserOf(Sub.getNode()))
7453 uint64_t InsertIdx = N.getConstantOperandVal(2);
7454 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
7455 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7456 Sub.getOperand(0).getValueType() == VT) {
7457 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
7458 for (int i = 0; i != (int)NumElts; ++i)
7460 for (int i = 0; i != (int)NumSubElts; ++i)
7461 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
7463 Ops.push_back(Sub.getOperand(0));
7466 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
7467 SmallVector<int, 64> SubMask;
7468 SmallVector<SDValue, 2> SubInputs;
7469 if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
7470 SubMask, DAG, Depth + 1, ResolveKnownElts))
7473 // Subvector shuffle inputs must not be larger than the subvector.
7474 if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
7475 return SubVT.getSizeInBits() < SubInput.getValueSizeInBits();
7479 if (SubMask.size() != NumSubElts) {
7480 assert(((SubMask.size() % NumSubElts) == 0 ||
7481 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
7482 if ((NumSubElts % SubMask.size()) == 0) {
7483 int Scale = NumSubElts / SubMask.size();
7484 SmallVector<int,64> ScaledSubMask;
7485 narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
7486 SubMask = ScaledSubMask;
7488 int Scale = SubMask.size() / NumSubElts;
7489 NumSubElts = SubMask.size();
7495 Ops.append(SubInputs.begin(), SubInputs.end());
7496 for (int i = 0; i != (int)NumElts; ++i)
7498 for (int i = 0; i != (int)NumSubElts; ++i) {
7501 int InputIdx = M / NumSubElts;
7502 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7504 Mask[i + InsertIdx] = M;
7508 case X86ISD::PINSRB:
7509 case X86ISD::PINSRW:
7510 case ISD::SCALAR_TO_VECTOR:
7511 case ISD::INSERT_VECTOR_ELT: {
7512 // Match against a insert_vector_elt/scalar_to_vector of an extract from a
7513 // vector, for matching src/dst vector types.
7514 SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
7516 unsigned DstIdx = 0;
7517 if (Opcode != ISD::SCALAR_TO_VECTOR) {
7518 // Check we have an in-range constant insertion index.
7519 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
7520 N.getConstantOperandAPInt(2).uge(NumElts))
7522 DstIdx = N.getConstantOperandVal(2);
7524 // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
7525 if (X86::isZeroNode(Scl)) {
7526 Ops.push_back(N.getOperand(0));
7527 for (unsigned i = 0; i != NumElts; ++i)
7528 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
7533 // Peek through trunc/aext/zext.
7534 // TODO: aext shouldn't require SM_SentinelZero padding.
7535 // TODO: handle shift of scalars.
7536 unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
7537 while (Scl.getOpcode() == ISD::TRUNCATE ||
7538 Scl.getOpcode() == ISD::ANY_EXTEND ||
7539 Scl.getOpcode() == ISD::ZERO_EXTEND) {
7540 Scl = Scl.getOperand(0);
7542 std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
7544 if ((MinBitsPerElt % 8) != 0)
7547 // Attempt to find the source vector the scalar was extracted from.
7549 if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
7550 Scl.getOpcode() == X86ISD::PEXTRW ||
7551 Scl.getOpcode() == X86ISD::PEXTRB) &&
7552 Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
7555 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7558 SDValue SrcVec = SrcExtract.getOperand(0);
7559 EVT SrcVT = SrcVec.getValueType();
7560 if (!SrcVT.getScalarType().isByteSized())
7562 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7563 unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
7564 unsigned DstByte = DstIdx * NumBytesPerElt;
7566 std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
7568 // Create 'identity' byte level shuffle mask and then add inserted bytes.
7569 if (Opcode == ISD::SCALAR_TO_VECTOR) {
7570 Ops.push_back(SrcVec);
7571 Mask.append(NumSizeInBytes, SM_SentinelUndef);
7573 Ops.push_back(SrcVec);
7574 Ops.push_back(N.getOperand(0));
7575 for (int i = 0; i != (int)NumSizeInBytes; ++i)
7576 Mask.push_back(NumSizeInBytes + i);
7579 unsigned MinBytesPerElts = MinBitsPerElt / 8;
7580 MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
7581 for (unsigned i = 0; i != MinBytesPerElts; ++i)
7582 Mask[DstByte + i] = SrcByte + i;
7583 for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
7584 Mask[DstByte + i] = SM_SentinelZero;
7587 case X86ISD::PACKSS:
7588 case X86ISD::PACKUS: {
7589 SDValue N0 = N.getOperand(0);
7590 SDValue N1 = N.getOperand(1);
7591 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
7592 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
7593 "Unexpected input value type");
7595 APInt EltsLHS, EltsRHS;
7596 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7598 // If we know input saturation won't happen we can treat this
7599 // as a truncation shuffle.
7600 if (Opcode == X86ISD::PACKSS) {
7601 if ((!N0.isUndef() &&
7602 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7604 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7607 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7608 if ((!N0.isUndef() &&
7609 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7611 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7615 bool IsUnary = (N0 == N1);
7621 createPackShuffleMask(VT, Mask, IsUnary);
7624 case X86ISD::VTRUNC: {
7625 SDValue Src = N.getOperand(0);
7626 EVT SrcVT = Src.getValueType();
7627 // Truncated source must be a simple vector.
7628 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7629 (SrcVT.getScalarSizeInBits() % 8) != 0)
7631 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7632 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
7633 unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
7634 assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
7635 for (unsigned i = 0; i != NumSrcElts; ++i)
7636 Mask.push_back(i * Scale);
7637 Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
7642 case X86ISD::VSRLI: {
7643 uint64_t ShiftVal = N.getConstantOperandVal(1);
7644 // Out of range bit shifts are guaranteed to be zero.
7645 if (NumBitsPerElt <= ShiftVal) {
7646 Mask.append(NumElts, SM_SentinelZero);
7650 // We can only decode 'whole byte' bit shifts as shuffles.
7651 if ((ShiftVal % 8) != 0)
7654 uint64_t ByteShift = ShiftVal / 8;
7655 Ops.push_back(N.getOperand(0));
7657 // Clear mask to all zeros and insert the shifted byte indices.
7658 Mask.append(NumSizeInBytes, SM_SentinelZero);
7660 if (X86ISD::VSHLI == Opcode) {
7661 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
7662 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7663 Mask[i + j] = i + j - ByteShift;
7665 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
7666 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7667 Mask[i + j - ByteShift] = i + j;
7671 case X86ISD::VROTLI:
7672 case X86ISD::VROTRI: {
7673 // We can only decode 'whole byte' bit rotates as shuffles.
7674 uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
7675 if ((RotateVal % 8) != 0)
7677 Ops.push_back(N.getOperand(0));
7678 int Offset = RotateVal / 8;
7679 Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
7680 for (int i = 0; i != (int)NumElts; ++i) {
7681 int BaseIdx = i * NumBytesPerElt;
7682 for (int j = 0; j != (int)NumBytesPerElt; ++j) {
7683 Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
7688 case X86ISD::VBROADCAST: {
7689 SDValue Src = N.getOperand(0);
7690 if (!Src.getSimpleValueType().isVector())
7693 Mask.append(NumElts, 0);
7696 case ISD::ZERO_EXTEND:
7697 case ISD::ANY_EXTEND:
7698 case ISD::ZERO_EXTEND_VECTOR_INREG:
7699 case ISD::ANY_EXTEND_VECTOR_INREG: {
7700 SDValue Src = N.getOperand(0);
7701 EVT SrcVT = Src.getValueType();
7703 // Extended source must be a simple vector.
7704 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7705 (SrcVT.getScalarSizeInBits() % 8) != 0)
7709 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7710 DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
7720 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7721 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7722 SmallVectorImpl<int> &Mask) {
7723 int MaskWidth = Mask.size();
7724 SmallVector<SDValue, 16> UsedInputs;
7725 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7726 int lo = UsedInputs.size() * MaskWidth;
7727 int hi = lo + MaskWidth;
7729 // Strip UNDEF input usage.
7730 if (Inputs[i].isUndef())
7732 if ((lo <= M) && (M < hi))
7733 M = SM_SentinelUndef;
7735 // Check for unused inputs.
7736 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7743 // Check for repeated inputs.
7744 bool IsRepeat = false;
7745 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7746 if (UsedInputs[j] != Inputs[i])
7750 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7757 UsedInputs.push_back(Inputs[i]);
7759 Inputs = UsedInputs;
7762 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7763 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7764 /// Returns true if the target shuffle mask was decoded.
7765 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7766 SmallVectorImpl<SDValue> &Inputs,
7767 SmallVectorImpl<int> &Mask,
7768 APInt &KnownUndef, APInt &KnownZero,
7769 const SelectionDAG &DAG, unsigned Depth,
7770 bool ResolveKnownElts) {
7771 EVT VT = Op.getValueType();
7772 if (!VT.isSimple() || !VT.isVector())
7775 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
7776 if (ResolveKnownElts)
7777 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
7780 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7781 ResolveKnownElts)) {
7782 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
7788 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7789 SmallVectorImpl<int> &Mask,
7790 const SelectionDAG &DAG, unsigned Depth = 0,
7791 bool ResolveKnownElts = true) {
7792 EVT VT = Op.getValueType();
7793 if (!VT.isSimple() || !VT.isVector())
7796 APInt KnownUndef, KnownZero;
7797 unsigned NumElts = Op.getValueType().getVectorNumElements();
7798 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7799 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
7800 KnownZero, DAG, Depth, ResolveKnownElts);
7803 /// Returns the scalar element that will make up the i'th
7804 /// element of the result of the vector shuffle.
7805 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
7806 SelectionDAG &DAG, unsigned Depth) {
7807 if (Depth >= SelectionDAG::MaxRecursionDepth)
7808 return SDValue(); // Limit search depth.
7810 EVT VT = Op.getValueType();
7811 unsigned Opcode = Op.getOpcode();
7812 unsigned NumElems = VT.getVectorNumElements();
7814 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7815 if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
7816 int Elt = SV->getMaskElt(Index);
7819 return DAG.getUNDEF(VT.getVectorElementType());
7821 SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
7822 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
7825 // Recurse into target specific vector shuffles to find scalars.
7826 if (isTargetShuffle(Opcode)) {
7827 MVT ShufVT = VT.getSimpleVT();
7828 MVT ShufSVT = ShufVT.getVectorElementType();
7829 int NumElems = (int)ShufVT.getVectorNumElements();
7830 SmallVector<int, 16> ShuffleMask;
7831 SmallVector<SDValue, 16> ShuffleOps;
7834 if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
7835 ShuffleMask, IsUnary))
7838 int Elt = ShuffleMask[Index];
7839 if (Elt == SM_SentinelZero)
7840 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
7841 : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
7842 if (Elt == SM_SentinelUndef)
7843 return DAG.getUNDEF(ShufSVT);
7845 assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
7846 SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7847 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
7850 // Recurse into insert_subvector base/sub vector to find scalars.
7851 if (Opcode == ISD::INSERT_SUBVECTOR) {
7852 SDValue Vec = Op.getOperand(0);
7853 SDValue Sub = Op.getOperand(1);
7854 uint64_t SubIdx = Op.getConstantOperandVal(2);
7855 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
7857 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7858 return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
7859 return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
7862 // Recurse into concat_vectors sub vector to find scalars.
7863 if (Opcode == ISD::CONCAT_VECTORS) {
7864 EVT SubVT = Op.getOperand(0).getValueType();
7865 unsigned NumSubElts = SubVT.getVectorNumElements();
7866 uint64_t SubIdx = Index / NumSubElts;
7867 uint64_t SubElt = Index % NumSubElts;
7868 return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
7871 // Recurse into extract_subvector src vector to find scalars.
7872 if (Opcode == ISD::EXTRACT_SUBVECTOR) {
7873 SDValue Src = Op.getOperand(0);
7874 uint64_t SrcIdx = Op.getConstantOperandVal(1);
7875 return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
7878 // We only peek through bitcasts of the same vector width.
7879 if (Opcode == ISD::BITCAST) {
7880 SDValue Src = Op.getOperand(0);
7881 EVT SrcVT = Src.getValueType();
7882 if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
7883 return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
7887 // Actual nodes that may contain scalar elements
7889 // For insert_vector_elt - either return the index matching scalar or recurse
7890 // into the base vector.
7891 if (Opcode == ISD::INSERT_VECTOR_ELT &&
7892 isa<ConstantSDNode>(Op.getOperand(2))) {
7893 if (Op.getConstantOperandAPInt(2) == Index)
7894 return Op.getOperand(1);
7895 return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
7898 if (Opcode == ISD::SCALAR_TO_VECTOR)
7899 return (Index == 0) ? Op.getOperand(0)
7900 : DAG.getUNDEF(VT.getVectorElementType());
7902 if (Opcode == ISD::BUILD_VECTOR)
7903 return Op.getOperand(Index);
7908 // Use PINSRB/PINSRW/PINSRD to create a build vector.
7909 static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7910 unsigned NumNonZero, unsigned NumZero,
7912 const X86Subtarget &Subtarget) {
7913 MVT VT = Op.getSimpleValueType();
7914 unsigned NumElts = VT.getVectorNumElements();
7915 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7916 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7917 "Illegal vector insertion");
7923 for (unsigned i = 0; i < NumElts; ++i) {
7924 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7928 // If the build vector contains zeros or our first insertion is not the
7929 // first index then insert into zero vector to break any register
7930 // dependency else use SCALAR_TO_VECTOR.
7933 if (NumZero || 0 != i)
7934 V = getZeroVector(VT, Subtarget, DAG, dl);
7936 assert(0 == i && "Expected insertion into zero-index");
7937 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7938 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7939 V = DAG.getBitcast(VT, V);
7943 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7944 DAG.getIntPtrConstant(i, dl));
7950 /// Custom lower build_vector of v16i8.
7951 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7952 unsigned NumNonZero, unsigned NumZero,
7954 const X86Subtarget &Subtarget) {
7955 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7958 // SSE4.1 - use PINSRB to insert each byte directly.
7959 if (Subtarget.hasSSE41())
7960 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7966 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7967 for (unsigned i = 0; i < 16; i += 2) {
7968 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7969 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7970 if (!ThisIsNonZero && !NextIsNonZero)
7973 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7975 if (ThisIsNonZero) {
7976 if (NumZero || NextIsNonZero)
7977 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7979 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7982 if (NextIsNonZero) {
7983 SDValue NextElt = Op.getOperand(i + 1);
7984 if (i == 0 && NumZero)
7985 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7987 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7988 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7989 DAG.getConstant(8, dl, MVT::i8));
7991 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7996 // If our first insertion is not the first index or zeros are needed, then
7997 // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
7998 // elements undefined).
8000 if (i != 0 || NumZero)
8001 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
8003 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
8004 V = DAG.getBitcast(MVT::v8i16, V);
8008 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
8009 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
8010 DAG.getIntPtrConstant(i / 2, dl));
8013 return DAG.getBitcast(MVT::v16i8, V);
8016 /// Custom lower build_vector of v8i16.
8017 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
8018 unsigned NumNonZero, unsigned NumZero,
8020 const X86Subtarget &Subtarget) {
8021 if (NumNonZero > 4 && !Subtarget.hasSSE41())
8024 // Use PINSRW to insert each byte directly.
8025 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
8029 /// Custom lower build_vector of v4i32 or v4f32.
8030 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
8031 const X86Subtarget &Subtarget) {
8032 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
8033 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
8034 // Because we're creating a less complicated build vector here, we may enable
8035 // further folding of the MOVDDUP via shuffle transforms.
8036 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
8037 Op.getOperand(0) == Op.getOperand(2) &&
8038 Op.getOperand(1) == Op.getOperand(3) &&
8039 Op.getOperand(0) != Op.getOperand(1)) {
8041 MVT VT = Op.getSimpleValueType();
8042 MVT EltVT = VT.getVectorElementType();
8043 // Create a new build vector with the first 2 elements followed by undef
8044 // padding, bitcast to v2f64, duplicate, and bitcast back.
8045 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
8046 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
8047 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
8048 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
8049 return DAG.getBitcast(VT, Dup);
8052 // Find all zeroable elements.
8053 std::bitset<4> Zeroable, Undefs;
8054 for (int i = 0; i < 4; ++i) {
8055 SDValue Elt = Op.getOperand(i);
8056 Undefs[i] = Elt.isUndef();
8057 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
8059 assert(Zeroable.size() - Zeroable.count() > 1 &&
8060 "We expect at least two non-zero elements!");
8062 // We only know how to deal with build_vector nodes where elements are either
8063 // zeroable or extract_vector_elt with constant index.
8064 SDValue FirstNonZero;
8065 unsigned FirstNonZeroIdx;
8066 for (unsigned i = 0; i < 4; ++i) {
8069 SDValue Elt = Op.getOperand(i);
8070 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8071 !isa<ConstantSDNode>(Elt.getOperand(1)))
8073 // Make sure that this node is extracting from a 128-bit vector.
8074 MVT VT = Elt.getOperand(0).getSimpleValueType();
8075 if (!VT.is128BitVector())
8077 if (!FirstNonZero.getNode()) {
8079 FirstNonZeroIdx = i;
8083 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
8084 SDValue V1 = FirstNonZero.getOperand(0);
8085 MVT VT = V1.getSimpleValueType();
8087 // See if this build_vector can be lowered as a blend with zero.
8089 unsigned EltMaskIdx, EltIdx;
8091 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
8092 if (Zeroable[EltIdx]) {
8093 // The zero vector will be on the right hand side.
8094 Mask[EltIdx] = EltIdx+4;
8098 Elt = Op->getOperand(EltIdx);
8099 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
8100 EltMaskIdx = Elt.getConstantOperandVal(1);
8101 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
8103 Mask[EltIdx] = EltIdx;
8107 // Let the shuffle legalizer deal with blend operations.
8108 SDValue VZeroOrUndef = (Zeroable == Undefs)
8110 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
8111 if (V1.getSimpleValueType() != VT)
8112 V1 = DAG.getBitcast(VT, V1);
8113 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
8116 // See if we can lower this build_vector to a INSERTPS.
8117 if (!Subtarget.hasSSE41())
8120 SDValue V2 = Elt.getOperand(0);
8121 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
8124 bool CanFold = true;
8125 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
8129 SDValue Current = Op->getOperand(i);
8130 SDValue SrcVector = Current->getOperand(0);
8133 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
8139 assert(V1.getNode() && "Expected at least two non-zero elements!");
8140 if (V1.getSimpleValueType() != MVT::v4f32)
8141 V1 = DAG.getBitcast(MVT::v4f32, V1);
8142 if (V2.getSimpleValueType() != MVT::v4f32)
8143 V2 = DAG.getBitcast(MVT::v4f32, V2);
8145 // Ok, we can emit an INSERTPS instruction.
8146 unsigned ZMask = Zeroable.to_ulong();
8148 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
8149 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8151 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8152 DAG.getIntPtrConstant(InsertPSMask, DL, true));
8153 return DAG.getBitcast(VT, Result);
8156 /// Return a vector logical shift node.
8157 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
8158 SelectionDAG &DAG, const TargetLowering &TLI,
8160 assert(VT.is128BitVector() && "Unknown type for VShift");
8161 MVT ShVT = MVT::v16i8;
8162 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
8163 SrcOp = DAG.getBitcast(ShVT, SrcOp);
8164 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
8165 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
8166 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
8169 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
8170 SelectionDAG &DAG) {
8172 // Check if the scalar load can be widened into a vector load. And if
8173 // the address is "base + cst" see if the cst can be "absorbed" into
8174 // the shuffle mask.
8175 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
8176 SDValue Ptr = LD->getBasePtr();
8177 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
8179 EVT PVT = LD->getValueType(0);
8180 if (PVT != MVT::i32 && PVT != MVT::f32)
8185 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
8186 FI = FINode->getIndex();
8188 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
8189 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
8190 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
8191 Offset = Ptr.getConstantOperandVal(1);
8192 Ptr = Ptr.getOperand(0);
8197 // FIXME: 256-bit vector instructions don't require a strict alignment,
8198 // improve this code to support it better.
8199 Align RequiredAlign(VT.getSizeInBits() / 8);
8200 SDValue Chain = LD->getChain();
8201 // Make sure the stack object alignment is at least 16 or 32.
8202 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8203 MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
8204 if (!InferredAlign || *InferredAlign < RequiredAlign) {
8205 if (MFI.isFixedObjectIndex(FI)) {
8206 // Can't change the alignment. FIXME: It's possible to compute
8207 // the exact stack offset and reference FI + adjust offset instead.
8208 // If someone *really* cares about this. That's the way to implement it.
8211 MFI.setObjectAlignment(FI, RequiredAlign);
8215 // (Offset % 16 or 32) must be multiple of 4. Then address is then
8216 // Ptr + (Offset & ~15).
8219 if ((Offset % RequiredAlign.value()) & 3)
8221 int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
8224 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
8225 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
8228 int EltNo = (Offset - StartOffset) >> 2;
8229 unsigned NumElems = VT.getVectorNumElements();
8231 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
8232 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
8233 LD->getPointerInfo().getWithOffset(StartOffset));
8235 SmallVector<int, 8> Mask(NumElems, EltNo);
8237 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
8243 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
8244 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
8245 if (ISD::isNON_EXTLoad(Elt.getNode())) {
8246 auto *BaseLd = cast<LoadSDNode>(Elt);
8247 if (!BaseLd->isSimple())
8254 switch (Elt.getOpcode()) {
8257 case ISD::SCALAR_TO_VECTOR:
8258 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
8260 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
8261 uint64_t Idx = IdxC->getZExtValue();
8262 if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
8263 ByteOffset += Idx / 8;
8268 case ISD::EXTRACT_VECTOR_ELT:
8269 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
8270 SDValue Src = Elt.getOperand(0);
8271 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
8272 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
8273 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
8274 findEltLoadSrc(Src, Ld, ByteOffset)) {
8275 uint64_t Idx = IdxC->getZExtValue();
8276 ByteOffset += Idx * (SrcSizeInBits / 8);
8286 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
8287 /// elements can be replaced by a single large load which has the same value as
8288 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
8290 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
8291 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
8292 const SDLoc &DL, SelectionDAG &DAG,
8293 const X86Subtarget &Subtarget,
8294 bool isAfterLegalize) {
8295 if ((VT.getScalarSizeInBits() % 8) != 0)
8298 unsigned NumElems = Elts.size();
8300 int LastLoadedElt = -1;
8301 APInt LoadMask = APInt::getNullValue(NumElems);
8302 APInt ZeroMask = APInt::getNullValue(NumElems);
8303 APInt UndefMask = APInt::getNullValue(NumElems);
8305 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
8306 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
8308 // For each element in the initializer, see if we've found a load, zero or an
8310 for (unsigned i = 0; i < NumElems; ++i) {
8311 SDValue Elt = peekThroughBitcasts(Elts[i]);
8314 if (Elt.isUndef()) {
8315 UndefMask.setBit(i);
8318 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
8323 // Each loaded element must be the correct fractional portion of the
8324 // requested vector load.
8325 unsigned EltSizeInBits = Elt.getValueSizeInBits();
8326 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
8329 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
8331 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
8332 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
8338 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
8339 LoadMask.countPopulation()) == NumElems &&
8340 "Incomplete element masks");
8342 // Handle Special Cases - all undef or undef/zero.
8343 if (UndefMask.countPopulation() == NumElems)
8344 return DAG.getUNDEF(VT);
8346 // FIXME: Should we return this as a BUILD_VECTOR instead?
8347 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
8348 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
8349 : DAG.getConstantFP(0.0, DL, VT);
8351 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8352 int FirstLoadedElt = LoadMask.countTrailingZeros();
8353 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
8354 EVT EltBaseVT = EltBase.getValueType();
8355 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
8356 "Register/Memory size mismatch");
8357 LoadSDNode *LDBase = Loads[FirstLoadedElt];
8358 assert(LDBase && "Did not find base load for merging consecutive loads");
8359 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
8360 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
8361 int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
8362 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
8364 // TODO: Support offsetting the base load.
8365 if (ByteOffsets[FirstLoadedElt] != 0)
8368 // Check to see if the element's load is consecutive to the base load
8369 // or offset from a previous (already checked) load.
8370 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
8371 LoadSDNode *Ld = Loads[EltIdx];
8372 int64_t ByteOffset = ByteOffsets[EltIdx];
8373 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
8374 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
8375 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
8376 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
8378 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
8379 EltIdx - FirstLoadedElt);
8382 // Consecutive loads can contain UNDEFS but not ZERO elements.
8383 // Consecutive loads with UNDEFs and ZEROs elements require a
8384 // an additional shuffle stage to clear the ZERO elements.
8385 bool IsConsecutiveLoad = true;
8386 bool IsConsecutiveLoadWithZeros = true;
8387 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
8389 if (!CheckConsecutiveLoad(LDBase, i)) {
8390 IsConsecutiveLoad = false;
8391 IsConsecutiveLoadWithZeros = false;
8394 } else if (ZeroMask[i]) {
8395 IsConsecutiveLoad = false;
8399 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
8400 auto MMOFlags = LDBase->getMemOperand()->getFlags();
8401 assert(LDBase->isSimple() &&
8402 "Cannot merge volatile or atomic loads.");
8404 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
8405 LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
8407 for (auto *LD : Loads)
8409 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
8413 // Check if the base load is entirely dereferenceable.
8414 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
8415 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
8417 // LOAD - all consecutive load/undefs (must start/end with a load or be
8418 // entirely dereferenceable). If we have found an entire vector of loads and
8419 // undefs, then return a large load of the entire vector width starting at the
8420 // base pointer. If the vector contains zeros, then attempt to shuffle those
8422 if (FirstLoadedElt == 0 &&
8423 (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
8424 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
8425 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
8428 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
8429 // will lower to regular temporal loads and use the cache.
8430 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
8431 VT.is256BitVector() && !Subtarget.hasInt256())
8435 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
8438 return CreateLoad(VT, LDBase);
8440 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
8441 // vector and a zero vector to clear out the zero elements.
8442 if (!isAfterLegalize && VT.isVector()) {
8443 unsigned NumMaskElts = VT.getVectorNumElements();
8444 if ((NumMaskElts % NumElems) == 0) {
8445 unsigned Scale = NumMaskElts / NumElems;
8446 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
8447 for (unsigned i = 0; i < NumElems; ++i) {
8450 int Offset = ZeroMask[i] ? NumMaskElts : 0;
8451 for (unsigned j = 0; j != Scale; ++j)
8452 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
8454 SDValue V = CreateLoad(VT, LDBase);
8455 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
8456 : DAG.getConstantFP(0.0, DL, VT);
8457 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
8462 // If the upper half of a ymm/zmm load is undef then just load the lower half.
8463 if (VT.is256BitVector() || VT.is512BitVector()) {
8464 unsigned HalfNumElems = NumElems / 2;
8465 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
8467 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
8469 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
8470 DAG, Subtarget, isAfterLegalize);
8472 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
8473 HalfLD, DAG.getIntPtrConstant(0, DL));
8477 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
8478 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
8479 (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
8480 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
8481 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
8482 : MVT::getIntegerVT(LoadSizeInBits);
8483 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
8484 // Allow v4f32 on SSE1 only targets.
8485 // FIXME: Add more isel patterns so we can just use VT directly.
8486 if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
8488 if (TLI.isTypeLegal(VecVT)) {
8489 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
8490 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
8491 SDValue ResNode = DAG.getMemIntrinsicNode(
8492 X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
8493 LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
8494 for (auto *LD : Loads)
8496 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
8497 return DAG.getBitcast(VT, ResNode);
8501 // BROADCAST - match the smallest possible repetition pattern, load that
8502 // scalar/subvector element and then broadcast to the entire vector.
8503 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
8504 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
8505 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
8506 unsigned RepeatSize = SubElems * BaseSizeInBits;
8507 unsigned ScalarSize = std::min(RepeatSize, 64u);
8508 if (!Subtarget.hasAVX2() && ScalarSize < 32)
8512 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
8513 for (unsigned i = 0; i != NumElems && Match; ++i) {
8516 SDValue Elt = peekThroughBitcasts(Elts[i]);
8517 if (RepeatedLoads[i % SubElems].isUndef())
8518 RepeatedLoads[i % SubElems] = Elt;
8520 Match &= (RepeatedLoads[i % SubElems] == Elt);
8523 // We must have loads at both ends of the repetition.
8524 Match &= !RepeatedLoads.front().isUndef();
8525 Match &= !RepeatedLoads.back().isUndef();
8530 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8531 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8532 : EVT::getFloatingPointVT(ScalarSize);
8533 if (RepeatSize > ScalarSize)
8534 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8535 RepeatSize / ScalarSize);
8537 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8538 VT.getSizeInBits() / ScalarSize);
8539 if (TLI.isTypeLegal(BroadcastVT)) {
8540 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8541 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8542 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8543 : X86ISD::VBROADCAST;
8544 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8545 return DAG.getBitcast(VT, Broadcast);
8554 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8555 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8556 // are consecutive, non-overlapping, and in the right order.
8557 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
8559 const X86Subtarget &Subtarget,
8560 bool isAfterLegalize) {
8561 SmallVector<SDValue, 64> Elts;
8562 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8563 if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
8564 Elts.push_back(Elt);
8569 assert(Elts.size() == VT.getVectorNumElements());
8570 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8574 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8575 unsigned SplatBitSize, LLVMContext &C) {
8576 unsigned ScalarSize = VT.getScalarSizeInBits();
8577 unsigned NumElm = SplatBitSize / ScalarSize;
8579 SmallVector<Constant *, 32> ConstantVec;
8580 for (unsigned i = 0; i < NumElm; i++) {
8581 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8583 if (VT.isFloatingPoint()) {
8584 if (ScalarSize == 32) {
8585 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8587 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
8588 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8591 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8592 ConstantVec.push_back(Const);
8594 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8597 static bool isFoldableUseOfShuffle(SDNode *N) {
8598 for (auto *U : N->uses()) {
8599 unsigned Opc = U->getOpcode();
8600 // VPERMV/VPERMV3 shuffles can never fold their index operands.
8601 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8603 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8605 if (isTargetShuffle(Opc))
8607 if (Opc == ISD::BITCAST) // Ignore bitcasts
8608 return isFoldableUseOfShuffle(U);
8615 // Check if the current node of build vector is a zero extended vector.
8616 // // If so, return the value extended.
8617 // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8618 // // NumElt - return the number of zero extended identical values.
8619 // // EltType - return the type of the value include the zero extend.
8620 static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8621 unsigned &NumElt, MVT &EltType) {
8622 SDValue ExtValue = Op->getOperand(0);
8623 unsigned NumElts = Op->getNumOperands();
8624 unsigned Delta = NumElts;
8626 for (unsigned i = 1; i < NumElts; i++) {
8627 if (Op->getOperand(i) == ExtValue) {
8631 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8634 if (!isPowerOf2_32(Delta) || Delta == 1)
8637 for (unsigned i = Delta; i < NumElts; i++) {
8638 if (i % Delta == 0) {
8639 if (Op->getOperand(i) != ExtValue)
8641 } else if (!(isNullConstant(Op->getOperand(i)) ||
8642 Op->getOperand(i).isUndef()))
8645 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8646 unsigned ExtVTSize = EltSize * Delta;
8647 EltType = MVT::getIntegerVT(ExtVTSize);
8648 NumElt = NumElts / Delta;
8652 /// Attempt to use the vbroadcast instruction to generate a splat value
8653 /// from a splat BUILD_VECTOR which uses:
8654 /// a. A single scalar load, or a constant.
8655 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8657 /// The VBROADCAST node is returned when a pattern is found,
8658 /// or SDValue() otherwise.
8659 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8660 const X86Subtarget &Subtarget,
8661 SelectionDAG &DAG) {
8662 // VBROADCAST requires AVX.
8663 // TODO: Splats could be generated for non-AVX CPUs using SSE
8664 // instructions, but there's less potential gain for only 128-bit vectors.
8665 if (!Subtarget.hasAVX())
8668 MVT VT = BVOp->getSimpleValueType(0);
8671 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
8672 "Unsupported vector type for broadcast.");
8674 BitVector UndefElements;
8675 SDValue Ld = BVOp->getSplatValue(&UndefElements);
8677 // Attempt to use VBROADCASTM
8678 // From this pattern:
8679 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8680 // b. t1 = (build_vector t0 t0)
8682 // Create (VBROADCASTM v2i1 X)
8683 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8684 MVT EltType = VT.getScalarType();
8685 unsigned NumElts = VT.getVectorNumElements();
8687 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8688 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8689 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8690 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8692 BOperand = ZeroExtended.getOperand(0);
8694 BOperand = Ld.getOperand(0).getOperand(0);
8695 MVT MaskVT = BOperand.getSimpleValueType();
8696 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8697 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8699 DAG.getNode(X86ISD::VBROADCASTM, dl,
8700 MVT::getVectorVT(EltType, NumElts), BOperand);
8701 return DAG.getBitcast(VT, Brdcst);
8706 unsigned NumElts = VT.getVectorNumElements();
8707 unsigned NumUndefElts = UndefElements.count();
8708 if (!Ld || (NumElts - NumUndefElts) <= 1) {
8709 APInt SplatValue, Undef;
8710 unsigned SplatBitSize;
8712 // Check if this is a repeated constant pattern suitable for broadcasting.
8713 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8714 SplatBitSize > VT.getScalarSizeInBits() &&
8715 SplatBitSize < VT.getSizeInBits()) {
8716 // Avoid replacing with broadcast when it's a use of a shuffle
8717 // instruction to preserve the present custom lowering of shuffles.
8718 if (isFoldableUseOfShuffle(BVOp))
8720 // replace BUILD_VECTOR with broadcast of the repeated constants.
8721 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8722 LLVMContext *Ctx = DAG.getContext();
8723 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8724 if (Subtarget.hasAVX()) {
8725 if (SplatBitSize == 32 || SplatBitSize == 64 ||
8726 (SplatBitSize < 32 && Subtarget.hasAVX2())) {
8727 // Splatted value can fit in one INTEGER constant in constant pool.
8728 // Load the constant and broadcast it.
8729 MVT CVT = MVT::getIntegerVT(SplatBitSize);
8730 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8731 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8732 SDValue CP = DAG.getConstantPool(C, PVT);
8733 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8735 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
8737 DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
8738 SDValue Ops[] = {DAG.getEntryNode(), CP};
8739 MachinePointerInfo MPI =
8740 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
8741 SDValue Brdcst = DAG.getMemIntrinsicNode(
8742 X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment,
8743 MachineMemOperand::MOLoad);
8744 return DAG.getBitcast(VT, Brdcst);
8746 if (SplatBitSize > 64) {
8747 // Load the vector of constants and broadcast it.
8748 MVT CVT = VT.getScalarType();
8749 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8751 SDValue VCP = DAG.getConstantPool(VecC, PVT);
8752 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8753 Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
8755 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8756 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8758 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8759 return DAG.getBitcast(VT, Brdcst);
8764 // If we are moving a scalar into a vector (Ld must be set and all elements
8765 // but 1 are undef) and that operation is not obviously supported by
8766 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8767 // That's better than general shuffling and may eliminate a load to GPR and
8768 // move from scalar to vector register.
8769 if (!Ld || NumElts - NumUndefElts != 1)
8771 unsigned ScalarSize = Ld.getValueSizeInBits();
8772 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8776 bool ConstSplatVal =
8777 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8778 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8780 // Make sure that all of the users of a non-constant load are from the
8781 // BUILD_VECTOR node.
8782 // FIXME: Is the use count needed for non-constant, non-load case?
8783 if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
8786 unsigned ScalarSize = Ld.getValueSizeInBits();
8787 bool IsGE256 = (VT.getSizeInBits() >= 256);
8789 // When optimizing for size, generate up to 5 extra bytes for a broadcast
8790 // instruction to save 8 or more bytes of constant pool data.
8791 // TODO: If multiple splats are generated to load the same constant,
8792 // it may be detrimental to overall size. There needs to be a way to detect
8793 // that condition to know if this is truly a size win.
8794 bool OptForSize = DAG.shouldOptForSize();
8796 // Handle broadcasting a single constant scalar from the constant pool
8798 // On Sandybridge (no AVX2), it is still better to load a constant vector
8799 // from the constant pool and not to broadcast it from a scalar.
8800 // But override that restriction when optimizing for size.
8801 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8802 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8803 EVT CVT = Ld.getValueType();
8804 assert(!CVT.isVector() && "Must not broadcast a vector type");
8806 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8807 // For size optimization, also splat v2f64 and v2i64, and for size opt
8808 // with AVX2, also splat i8 and i16.
8809 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8810 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8811 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8812 const Constant *C = nullptr;
8813 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8814 C = CI->getConstantIntValue();
8815 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8816 C = CF->getConstantFPValue();
8818 assert(C && "Invalid constant type");
8820 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8822 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8823 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
8825 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8826 SDValue Ops[] = {DAG.getEntryNode(), CP};
8827 MachinePointerInfo MPI =
8828 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
8829 return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
8830 MPI, Alignment, MachineMemOperand::MOLoad);
8834 // Handle AVX2 in-register broadcasts.
8835 if (!IsLoad && Subtarget.hasInt256() &&
8836 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8837 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8839 // The scalar source must be a normal load.
8843 // Make sure the non-chain result is only used by this build vector.
8844 if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
8847 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8848 (Subtarget.hasVLX() && ScalarSize == 64)) {
8849 auto *LN = cast<LoadSDNode>(Ld);
8850 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8851 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
8853 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
8854 LN->getMemoryVT(), LN->getMemOperand());
8855 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
8859 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8860 // double since there is no vbroadcastsd xmm
8861 if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
8862 (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
8863 auto *LN = cast<LoadSDNode>(Ld);
8864 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8865 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
8867 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
8868 LN->getMemoryVT(), LN->getMemOperand());
8869 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
8873 // Unsupported broadcast.
8877 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
8878 /// underlying vector and index.
8880 /// Modifies \p ExtractedFromVec to the real vector and returns the real
8882 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8884 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8885 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8888 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8890 // (extract_vector_elt (v8f32 %1), Constant<6>)
8892 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8893 // (extract_subvector (v8f32 %0), Constant<4>),
8896 // In this case the vector is the extract_subvector expression and the index
8897 // is 2, as specified by the shuffle.
8898 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8899 SDValue ShuffleVec = SVOp->getOperand(0);
8900 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8901 assert(ShuffleVecVT.getVectorElementType() ==
8902 ExtractedFromVec.getSimpleValueType().getVectorElementType());
8904 int ShuffleIdx = SVOp->getMaskElt(Idx);
8905 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8906 ExtractedFromVec = ShuffleVec;
8912 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8913 MVT VT = Op.getSimpleValueType();
8915 // Skip if insert_vec_elt is not supported.
8916 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8917 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8921 unsigned NumElems = Op.getNumOperands();
8925 SmallVector<unsigned, 4> InsertIndices;
8926 SmallVector<int, 8> Mask(NumElems, -1);
8928 for (unsigned i = 0; i != NumElems; ++i) {
8929 unsigned Opc = Op.getOperand(i).getOpcode();
8931 if (Opc == ISD::UNDEF)
8934 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8935 // Quit if more than 1 elements need inserting.
8936 if (InsertIndices.size() > 1)
8939 InsertIndices.push_back(i);
8943 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8944 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8946 // Quit if non-constant index.
8947 if (!isa<ConstantSDNode>(ExtIdx))
8949 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8951 // Quit if extracted from vector of different type.
8952 if (ExtractedFromVec.getValueType() != VT)
8955 if (!VecIn1.getNode())
8956 VecIn1 = ExtractedFromVec;
8957 else if (VecIn1 != ExtractedFromVec) {
8958 if (!VecIn2.getNode())
8959 VecIn2 = ExtractedFromVec;
8960 else if (VecIn2 != ExtractedFromVec)
8961 // Quit if more than 2 vectors to shuffle
8965 if (ExtractedFromVec == VecIn1)
8967 else if (ExtractedFromVec == VecIn2)
8968 Mask[i] = Idx + NumElems;
8971 if (!VecIn1.getNode())
8974 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8975 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8977 for (unsigned Idx : InsertIndices)
8978 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8979 DAG.getIntPtrConstant(Idx, DL));
8984 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8985 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8986 const X86Subtarget &Subtarget) {
8988 MVT VT = Op.getSimpleValueType();
8989 assert((VT.getVectorElementType() == MVT::i1) &&
8990 "Unexpected type in LowerBUILD_VECTORvXi1!");
8993 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
8994 ISD::isBuildVectorAllOnes(Op.getNode()))
8997 uint64_t Immediate = 0;
8998 SmallVector<unsigned, 16> NonConstIdx;
8999 bool IsSplat = true;
9000 bool HasConstElts = false;
9002 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
9003 SDValue In = Op.getOperand(idx);
9006 if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
9007 Immediate |= (InC->getZExtValue() & 0x1) << idx;
9008 HasConstElts = true;
9010 NonConstIdx.push_back(idx);
9014 else if (In != Op.getOperand(SplatIdx))
9018 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
9020 // The build_vector allows the scalar element to be larger than the vector
9021 // element type. We need to mask it to use as a condition unless we know
9022 // the upper bits are zero.
9023 // FIXME: Use computeKnownBits instead of checking specific opcode?
9024 SDValue Cond = Op.getOperand(SplatIdx);
9025 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
9026 if (Cond.getOpcode() != ISD::SETCC)
9027 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
9028 DAG.getConstant(1, dl, MVT::i8));
9030 // Perform the select in the scalar domain so we can use cmov.
9031 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
9032 SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
9033 DAG.getAllOnesConstant(dl, MVT::i32),
9034 DAG.getConstant(0, dl, MVT::i32));
9035 Select = DAG.getBitcast(MVT::v32i1, Select);
9036 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
9038 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
9039 SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
9040 DAG.getAllOnesConstant(dl, ImmVT),
9041 DAG.getConstant(0, dl, ImmVT));
9042 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
9043 Select = DAG.getBitcast(VecVT, Select);
9044 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
9045 DAG.getIntPtrConstant(0, dl));
9049 // insert elements one by one
9052 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
9053 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
9054 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
9055 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
9056 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
9057 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
9059 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
9060 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
9061 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
9062 DstVec = DAG.getBitcast(VecVT, Imm);
9063 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
9064 DAG.getIntPtrConstant(0, dl));
9067 DstVec = DAG.getUNDEF(VT);
9069 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
9070 unsigned InsertIdx = NonConstIdx[i];
9071 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
9072 Op.getOperand(InsertIdx),
9073 DAG.getIntPtrConstant(InsertIdx, dl));
9078 /// This is a helper function of LowerToHorizontalOp().
9079 /// This function checks that the build_vector \p N in input implements a
9080 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
9081 /// may not match the layout of an x86 256-bit horizontal instruction.
9082 /// In other words, if this returns true, then some extraction/insertion will
9083 /// be required to produce a valid horizontal instruction.
9085 /// Parameter \p Opcode defines the kind of horizontal operation to match.
9086 /// For example, if \p Opcode is equal to ISD::ADD, then this function
9087 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
9088 /// is equal to ISD::SUB, then this function checks if this is a horizontal
9091 /// This function only analyzes elements of \p N whose indices are
9092 /// in range [BaseIdx, LastIdx).
9094 /// TODO: This function was originally used to match both real and fake partial
9095 /// horizontal operations, but the index-matching logic is incorrect for that.
9096 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
9097 /// code because it is only used for partial h-op matching now?
9098 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
9100 unsigned BaseIdx, unsigned LastIdx,
9101 SDValue &V0, SDValue &V1) {
9102 EVT VT = N->getValueType(0);
9103 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
9104 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
9105 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
9106 "Invalid Vector in input!");
9108 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
9109 bool CanFold = true;
9110 unsigned ExpectedVExtractIdx = BaseIdx;
9111 unsigned NumElts = LastIdx - BaseIdx;
9112 V0 = DAG.getUNDEF(VT);
9113 V1 = DAG.getUNDEF(VT);
9115 // Check if N implements a horizontal binop.
9116 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
9117 SDValue Op = N->getOperand(i + BaseIdx);
9120 if (Op->isUndef()) {
9121 // Update the expected vector extract index.
9122 if (i * 2 == NumElts)
9123 ExpectedVExtractIdx = BaseIdx;
9124 ExpectedVExtractIdx += 2;
9128 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
9133 SDValue Op0 = Op.getOperand(0);
9134 SDValue Op1 = Op.getOperand(1);
9136 // Try to match the following pattern:
9137 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
9138 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
9139 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
9140 Op0.getOperand(0) == Op1.getOperand(0) &&
9141 isa<ConstantSDNode>(Op0.getOperand(1)) &&
9142 isa<ConstantSDNode>(Op1.getOperand(1)));
9146 unsigned I0 = Op0.getConstantOperandVal(1);
9147 unsigned I1 = Op1.getConstantOperandVal(1);
9149 if (i * 2 < NumElts) {
9151 V0 = Op0.getOperand(0);
9152 if (V0.getValueType() != VT)
9157 V1 = Op0.getOperand(0);
9158 if (V1.getValueType() != VT)
9161 if (i * 2 == NumElts)
9162 ExpectedVExtractIdx = BaseIdx;
9165 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
9166 if (I0 == ExpectedVExtractIdx)
9167 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
9168 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
9169 // Try to match the following dag sequence:
9170 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
9171 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
9175 ExpectedVExtractIdx += 2;
9181 /// Emit a sequence of two 128-bit horizontal add/sub followed by
9182 /// a concat_vector.
9184 /// This is a helper function of LowerToHorizontalOp().
9185 /// This function expects two 256-bit vectors called V0 and V1.
9186 /// At first, each vector is split into two separate 128-bit vectors.
9187 /// Then, the resulting 128-bit vectors are used to implement two
9188 /// horizontal binary operations.
9190 /// The kind of horizontal binary operation is defined by \p X86Opcode.
9192 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
9193 /// the two new horizontal binop.
9194 /// When Mode is set, the first horizontal binop dag node would take as input
9195 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
9196 /// horizontal binop dag node would take as input the lower 128-bit of V1
9197 /// and the upper 128-bit of V1.
9199 /// HADD V0_LO, V0_HI
9200 /// HADD V1_LO, V1_HI
9202 /// Otherwise, the first horizontal binop dag node takes as input the lower
9203 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
9204 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
9206 /// HADD V0_LO, V1_LO
9207 /// HADD V0_HI, V1_HI
9209 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
9210 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
9211 /// the upper 128-bits of the result.
9212 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
9213 const SDLoc &DL, SelectionDAG &DAG,
9214 unsigned X86Opcode, bool Mode,
9215 bool isUndefLO, bool isUndefHI) {
9216 MVT VT = V0.getSimpleValueType();
9217 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
9218 "Invalid nodes in input!");
9220 unsigned NumElts = VT.getVectorNumElements();
9221 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
9222 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
9223 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
9224 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
9225 MVT NewVT = V0_LO.getSimpleValueType();
9227 SDValue LO = DAG.getUNDEF(NewVT);
9228 SDValue HI = DAG.getUNDEF(NewVT);
9231 // Don't emit a horizontal binop if the result is expected to be UNDEF.
9232 if (!isUndefLO && !V0->isUndef())
9233 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
9234 if (!isUndefHI && !V1->isUndef())
9235 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
9237 // Don't emit a horizontal binop if the result is expected to be UNDEF.
9238 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
9239 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
9241 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
9242 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
9245 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
9248 /// Returns true iff \p BV builds a vector with the result equivalent to
9249 /// the result of ADDSUB/SUBADD operation.
9250 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
9251 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
9252 /// \p Opnd0 and \p Opnd1.
9253 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
9254 const X86Subtarget &Subtarget, SelectionDAG &DAG,
9255 SDValue &Opnd0, SDValue &Opnd1,
9256 unsigned &NumExtracts,
9259 MVT VT = BV->getSimpleValueType(0);
9260 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
9263 unsigned NumElts = VT.getVectorNumElements();
9264 SDValue InVec0 = DAG.getUNDEF(VT);
9265 SDValue InVec1 = DAG.getUNDEF(VT);
9269 // Odd-numbered elements in the input build vector are obtained from
9270 // adding/subtracting two integer/float elements.
9271 // Even-numbered elements in the input build vector are obtained from
9272 // subtracting/adding two integer/float elements.
9273 unsigned Opc[2] = {0, 0};
9274 for (unsigned i = 0, e = NumElts; i != e; ++i) {
9275 SDValue Op = BV->getOperand(i);
9277 // Skip 'undef' values.
9278 unsigned Opcode = Op.getOpcode();
9279 if (Opcode == ISD::UNDEF)
9282 // Early exit if we found an unexpected opcode.
9283 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
9286 SDValue Op0 = Op.getOperand(0);
9287 SDValue Op1 = Op.getOperand(1);
9289 // Try to match the following pattern:
9290 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
9291 // Early exit if we cannot match that sequence.
9292 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9293 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9294 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9295 Op0.getOperand(1) != Op1.getOperand(1))
9298 unsigned I0 = Op0.getConstantOperandVal(1);
9302 // We found a valid add/sub node, make sure its the same opcode as previous
9303 // elements for this parity.
9304 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
9306 Opc[i % 2] = Opcode;
9308 // Update InVec0 and InVec1.
9309 if (InVec0.isUndef()) {
9310 InVec0 = Op0.getOperand(0);
9311 if (InVec0.getSimpleValueType() != VT)
9314 if (InVec1.isUndef()) {
9315 InVec1 = Op1.getOperand(0);
9316 if (InVec1.getSimpleValueType() != VT)
9320 // Make sure that operands in input to each add/sub node always
9321 // come from a same pair of vectors.
9322 if (InVec0 != Op0.getOperand(0)) {
9323 if (Opcode == ISD::FSUB)
9326 // FADD is commutable. Try to commute the operands
9327 // and then test again.
9328 std::swap(Op0, Op1);
9329 if (InVec0 != Op0.getOperand(0))
9333 if (InVec1 != Op1.getOperand(0))
9336 // Increment the number of extractions done.
9340 // Ensure we have found an opcode for both parities and that they are
9341 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
9342 // inputs are undef.
9343 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
9344 InVec0.isUndef() || InVec1.isUndef())
9347 IsSubAdd = Opc[0] == ISD::FADD;
9354 /// Returns true if is possible to fold MUL and an idiom that has already been
9355 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
9356 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
9357 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
9359 /// Prior to calling this function it should be known that there is some
9360 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
9361 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
9362 /// before replacement of such SDNode with ADDSUB operation. Thus the number
9363 /// of \p Opnd0 uses is expected to be equal to 2.
9364 /// For example, this function may be called for the following IR:
9365 /// %AB = fmul fast <2 x double> %A, %B
9366 /// %Sub = fsub fast <2 x double> %AB, %C
9367 /// %Add = fadd fast <2 x double> %AB, %C
9368 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
9369 /// <2 x i32> <i32 0, i32 3>
9370 /// There is a def for %Addsub here, which potentially can be replaced by
9371 /// X86ISD::ADDSUB operation:
9372 /// %Addsub = X86ISD::ADDSUB %AB, %C
9373 /// and such ADDSUB can further be replaced with FMADDSUB:
9374 /// %Addsub = FMADDSUB %A, %B, %C.
9376 /// The main reason why this method is called before the replacement of the
9377 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
9378 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
9380 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
9382 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
9383 unsigned ExpectedUses) {
9384 if (Opnd0.getOpcode() != ISD::FMUL ||
9385 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
9388 // FIXME: These checks must match the similar ones in
9389 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
9390 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
9391 // or MUL + ADDSUB to FMADDSUB.
9392 const TargetOptions &Options = DAG.getTarget().Options;
9394 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
9399 Opnd1 = Opnd0.getOperand(1);
9400 Opnd0 = Opnd0.getOperand(0);
9405 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
9406 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
9407 /// X86ISD::FMSUBADD node.
9408 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
9409 const X86Subtarget &Subtarget,
9410 SelectionDAG &DAG) {
9411 SDValue Opnd0, Opnd1;
9412 unsigned NumExtracts;
9414 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
9418 MVT VT = BV->getSimpleValueType(0);
9421 // Try to generate X86ISD::FMADDSUB node here.
9423 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
9424 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
9425 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
9428 // We only support ADDSUB.
9432 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
9433 // the ADDSUB idiom has been successfully recognized. There are no known
9434 // X86 targets with 512-bit ADDSUB instructions!
9435 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
9437 if (VT.is512BitVector())
9440 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
9443 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
9444 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
9445 // Initialize outputs to known values.
9446 MVT VT = BV->getSimpleValueType(0);
9447 HOpcode = ISD::DELETED_NODE;
9448 V0 = DAG.getUNDEF(VT);
9449 V1 = DAG.getUNDEF(VT);
9451 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
9452 // half of the result is calculated independently from the 128-bit halves of
9453 // the inputs, so that makes the index-checking logic below more complicated.
9454 unsigned NumElts = VT.getVectorNumElements();
9455 unsigned GenericOpcode = ISD::DELETED_NODE;
9456 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
9457 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
9458 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
9459 for (unsigned i = 0; i != Num128BitChunks; ++i) {
9460 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
9461 // Ignore undef elements.
9462 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
9466 // If there's an opcode mismatch, we're done.
9467 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
9470 // Initialize horizontal opcode.
9471 if (HOpcode == ISD::DELETED_NODE) {
9472 GenericOpcode = Op.getOpcode();
9473 switch (GenericOpcode) {
9474 case ISD::ADD: HOpcode = X86ISD::HADD; break;
9475 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
9476 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
9477 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
9478 default: return false;
9482 SDValue Op0 = Op.getOperand(0);
9483 SDValue Op1 = Op.getOperand(1);
9484 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9485 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9486 Op0.getOperand(0) != Op1.getOperand(0) ||
9487 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9488 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
9491 // The source vector is chosen based on which 64-bit half of the
9492 // destination vector is being calculated.
9493 if (j < NumEltsIn64Bits) {
9495 V0 = Op0.getOperand(0);
9498 V1 = Op0.getOperand(0);
9501 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
9502 if (SourceVec != Op0.getOperand(0))
9505 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
9506 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
9507 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
9508 unsigned ExpectedIndex = i * NumEltsIn128Bits +
9509 (j % NumEltsIn64Bits) * 2;
9510 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
9513 // If this is not a commutative op, this does not match.
9514 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
9517 // Addition is commutative, so try swapping the extract indexes.
9518 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
9519 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
9522 // Extract indexes do not match horizontal requirement.
9526 // We matched. Opcode and operands are returned by reference as arguments.
9530 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9531 SelectionDAG &DAG, unsigned HOpcode,
9532 SDValue V0, SDValue V1) {
9533 // If either input vector is not the same size as the build vector,
9534 // extract/insert the low bits to the correct size.
9535 // This is free (examples: zmm --> xmm, xmm --> ymm).
9536 MVT VT = BV->getSimpleValueType(0);
9537 unsigned Width = VT.getSizeInBits();
9538 if (V0.getValueSizeInBits() > Width)
9539 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9540 else if (V0.getValueSizeInBits() < Width)
9541 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9543 if (V1.getValueSizeInBits() > Width)
9544 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9545 else if (V1.getValueSizeInBits() < Width)
9546 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9548 unsigned NumElts = VT.getVectorNumElements();
9549 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9550 for (unsigned i = 0; i != NumElts; ++i)
9551 if (BV->getOperand(i).isUndef())
9552 DemandedElts.clearBit(i);
9554 // If we don't need the upper xmm, then perform as a xmm hop.
9555 unsigned HalfNumElts = NumElts / 2;
9556 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9557 MVT HalfVT = VT.getHalfNumVectorElementsVT();
9558 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9559 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9560 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9561 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9564 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9567 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9568 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9569 const X86Subtarget &Subtarget,
9570 SelectionDAG &DAG) {
9571 // We need at least 2 non-undef elements to make this worthwhile by default.
9572 unsigned NumNonUndefs =
9573 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9574 if (NumNonUndefs < 2)
9577 // There are 4 sets of horizontal math operations distinguished by type:
9578 // int/FP at 128-bit/256-bit. Each type was introduced with a different
9579 // subtarget feature. Try to match those "native" patterns first.
9580 MVT VT = BV->getSimpleValueType(0);
9581 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9582 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9583 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9584 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9587 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9588 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9591 // Try harder to match 256-bit ops by using extract/concat.
9592 if (!Subtarget.hasAVX() || !VT.is256BitVector())
9595 // Count the number of UNDEF operands in the build_vector in input.
9596 unsigned NumElts = VT.getVectorNumElements();
9597 unsigned Half = NumElts / 2;
9598 unsigned NumUndefsLO = 0;
9599 unsigned NumUndefsHI = 0;
9600 for (unsigned i = 0, e = Half; i != e; ++i)
9601 if (BV->getOperand(i)->isUndef())
9604 for (unsigned i = Half, e = NumElts; i != e; ++i)
9605 if (BV->getOperand(i)->isUndef())
9609 SDValue InVec0, InVec1;
9610 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9611 SDValue InVec2, InVec3;
9613 bool CanFold = true;
9615 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9616 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9618 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9619 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9620 X86Opcode = X86ISD::HADD;
9621 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9623 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9625 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9626 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9627 X86Opcode = X86ISD::HSUB;
9632 // Do not try to expand this build_vector into a pair of horizontal
9633 // add/sub if we can emit a pair of scalar add/sub.
9634 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9637 // Convert this build_vector into a pair of horizontal binops followed by
9638 // a concat vector. We must adjust the outputs from the partial horizontal
9639 // matching calls above to account for undefined vector halves.
9640 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9641 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9642 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
9643 bool isUndefLO = NumUndefsLO == Half;
9644 bool isUndefHI = NumUndefsHI == Half;
9645 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9650 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9651 VT == MVT::v16i16) {
9653 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9654 X86Opcode = X86ISD::HADD;
9655 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9657 X86Opcode = X86ISD::HSUB;
9658 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9660 X86Opcode = X86ISD::FHADD;
9661 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9663 X86Opcode = X86ISD::FHSUB;
9667 // Don't try to expand this build_vector into a pair of horizontal add/sub
9668 // if we can simply emit a pair of scalar add/sub.
9669 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9672 // Convert this build_vector into two horizontal add/sub followed by
9674 bool isUndefLO = NumUndefsLO == Half;
9675 bool isUndefHI = NumUndefsHI == Half;
9676 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9677 isUndefLO, isUndefHI);
9683 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
9686 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
9687 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9688 /// just apply the bit to the vectors.
9689 /// NOTE: Its not in our interest to start make a general purpose vectorizer
9690 /// from this, but enough scalar bit operations are created from the later
9691 /// legalization + scalarization stages to need basic support.
9692 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9693 const X86Subtarget &Subtarget,
9694 SelectionDAG &DAG) {
9696 MVT VT = Op->getSimpleValueType(0);
9697 unsigned NumElems = VT.getVectorNumElements();
9698 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9700 // Check that all elements have the same opcode.
9701 // TODO: Should we allow UNDEFS and if so how many?
9702 unsigned Opcode = Op->getOperand(0).getOpcode();
9703 for (unsigned i = 1; i < NumElems; ++i)
9704 if (Opcode != Op->getOperand(i).getOpcode())
9707 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9708 bool IsShift = false;
9720 // Don't do this if the buildvector is a splat - we'd replace one
9721 // constant with an entire vector.
9722 if (Op->getSplatValue())
9724 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9729 SmallVector<SDValue, 4> LHSElts, RHSElts;
9730 for (SDValue Elt : Op->ops()) {
9731 SDValue LHS = Elt.getOperand(0);
9732 SDValue RHS = Elt.getOperand(1);
9734 // We expect the canonicalized RHS operand to be the constant.
9735 if (!isa<ConstantSDNode>(RHS))
9738 // Extend shift amounts.
9739 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9742 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9745 LHSElts.push_back(LHS);
9746 RHSElts.push_back(RHS);
9749 // Limit to shifts by uniform immediates.
9750 // TODO: Only accept vXi8/vXi64 special cases?
9751 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9752 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9755 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9756 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9757 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
9762 // Immediately lower the shift to ensure the constant build vector doesn't
9763 // get converted to a constant pool before the shift is lowered.
9764 return LowerShift(Res, Subtarget, DAG);
9767 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
9768 /// functionality to do this, so it's all zeros, all ones, or some derivation
9769 /// that is cheap to calculate.
9770 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9771 const X86Subtarget &Subtarget) {
9773 MVT VT = Op.getSimpleValueType();
9775 // Vectors containing all zeros can be matched by pxor and xorps.
9776 if (ISD::isBuildVectorAllZeros(Op.getNode()))
9779 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9780 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9781 // vpcmpeqd on 256-bit vectors.
9782 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9783 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9786 return getOnesVector(VT, DAG, DL);
9792 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9793 /// from a vector of source values and a vector of extraction indices.
9794 /// The vectors might be manipulated to match the type of the permute op.
9795 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9796 SDLoc &DL, SelectionDAG &DAG,
9797 const X86Subtarget &Subtarget) {
9799 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9800 unsigned NumElts = VT.getVectorNumElements();
9801 unsigned SizeInBits = VT.getSizeInBits();
9803 // Adjust IndicesVec to match VT size.
9804 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9805 "Illegal variable permute mask size");
9806 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9807 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9808 NumElts * VT.getScalarSizeInBits());
9809 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9811 // Handle SrcVec that don't match VT type.
9812 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9813 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9814 // Handle larger SrcVec by treating it as a larger permute.
9815 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9816 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9817 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9818 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9819 Subtarget, DAG, SDLoc(IndicesVec));
9821 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9823 return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
9825 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9826 // Widen smaller SrcVec to match VT.
9827 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9832 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9833 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9834 EVT SrcVT = Idx.getValueType();
9835 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9836 uint64_t IndexScale = 0;
9837 uint64_t IndexOffset = 0;
9839 // If we're scaling a smaller permute op, then we need to repeat the
9840 // indices, scaling and offsetting them as well.
9841 // e.g. v4i32 -> v16i8 (Scale = 4)
9842 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9843 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9844 for (uint64_t i = 0; i != Scale; ++i) {
9845 IndexScale |= Scale << (i * NumDstBits);
9846 IndexOffset |= i << (i * NumDstBits);
9849 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9850 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9851 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9852 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9856 unsigned Opcode = 0;
9857 switch (VT.SimpleTy) {
9861 if (Subtarget.hasSSSE3())
9862 Opcode = X86ISD::PSHUFB;
9865 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9866 Opcode = X86ISD::VPERMV;
9867 else if (Subtarget.hasSSSE3()) {
9868 Opcode = X86ISD::PSHUFB;
9869 ShuffleVT = MVT::v16i8;
9874 if (Subtarget.hasAVX()) {
9875 Opcode = X86ISD::VPERMILPV;
9876 ShuffleVT = MVT::v4f32;
9877 } else if (Subtarget.hasSSSE3()) {
9878 Opcode = X86ISD::PSHUFB;
9879 ShuffleVT = MVT::v16i8;
9884 if (Subtarget.hasAVX()) {
9885 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9886 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9887 Opcode = X86ISD::VPERMILPV;
9888 ShuffleVT = MVT::v2f64;
9889 } else if (Subtarget.hasSSE41()) {
9890 // SSE41 can compare v2i64 - select between indices 0 and 1.
9891 return DAG.getSelectCC(
9893 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9894 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9895 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9896 ISD::CondCode::SETEQ);
9900 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9901 Opcode = X86ISD::VPERMV;
9902 else if (Subtarget.hasXOP()) {
9903 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9904 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9905 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9906 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9908 ISD::CONCAT_VECTORS, DL, VT,
9909 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9910 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9911 } else if (Subtarget.hasAVX()) {
9912 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9913 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9914 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9915 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9916 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9917 ArrayRef<SDValue> Ops) {
9918 // Permute Lo and Hi and then select based on index range.
9919 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9920 // care about the bit[7] as its just an index vector.
9921 SDValue Idx = Ops[2];
9922 EVT VT = Idx.getValueType();
9923 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9924 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9925 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9926 ISD::CondCode::SETGT);
9928 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9929 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9934 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9935 Opcode = X86ISD::VPERMV;
9936 else if (Subtarget.hasAVX()) {
9937 // Scale to v32i8 and perform as v32i8.
9938 IndicesVec = ScaleIndices(IndicesVec, 2);
9939 return DAG.getBitcast(
9940 VT, createVariablePermute(
9941 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9942 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9947 if (Subtarget.hasAVX2())
9948 Opcode = X86ISD::VPERMV;
9949 else if (Subtarget.hasAVX()) {
9950 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9951 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9952 {0, 1, 2, 3, 0, 1, 2, 3});
9953 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9954 {4, 5, 6, 7, 4, 5, 6, 7});
9955 if (Subtarget.hasXOP())
9956 return DAG.getBitcast(
9957 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9958 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9959 // Permute Lo and Hi and then select based on index range.
9960 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9961 SDValue Res = DAG.getSelectCC(
9962 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9963 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9964 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9965 ISD::CondCode::SETGT);
9966 return DAG.getBitcast(VT, Res);
9971 if (Subtarget.hasAVX512()) {
9972 if (!Subtarget.hasVLX()) {
9973 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9974 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9976 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9977 DAG, SDLoc(IndicesVec));
9978 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9980 return extract256BitVector(Res, 0, DAG, DL);
9982 Opcode = X86ISD::VPERMV;
9983 } else if (Subtarget.hasAVX()) {
9984 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9986 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9988 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9989 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9990 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9991 if (Subtarget.hasXOP())
9992 return DAG.getBitcast(
9993 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
9994 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9995 // Permute Lo and Hi and then select based on index range.
9996 // This works as VPERMILPD only uses index bit[1] to permute elements.
9997 SDValue Res = DAG.getSelectCC(
9998 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9999 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
10000 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
10001 ISD::CondCode::SETGT);
10002 return DAG.getBitcast(VT, Res);
10006 if (Subtarget.hasVBMI())
10007 Opcode = X86ISD::VPERMV;
10010 if (Subtarget.hasBWI())
10011 Opcode = X86ISD::VPERMV;
10017 if (Subtarget.hasAVX512())
10018 Opcode = X86ISD::VPERMV;
10024 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
10025 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
10026 "Illegal variable permute shuffle type");
10028 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
10030 IndicesVec = ScaleIndices(IndicesVec, Scale);
10032 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
10033 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
10035 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
10036 SDValue Res = Opcode == X86ISD::VPERMV
10037 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
10038 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
10039 return DAG.getBitcast(VT, Res);
10042 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
10043 // reasoned to be a permutation of a vector by indices in a non-constant vector.
10044 // (build_vector (extract_elt V, (extract_elt I, 0)),
10045 // (extract_elt V, (extract_elt I, 1)),
10050 // TODO: Handle undefs
10051 // TODO: Utilize pshufb and zero mask blending to support more efficient
10052 // construction of vectors with constant-0 elements.
10054 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
10055 const X86Subtarget &Subtarget) {
10056 SDValue SrcVec, IndicesVec;
10057 // Check for a match of the permute source vector and permute index elements.
10058 // This is done by checking that the i-th build_vector operand is of the form:
10059 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
10060 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
10061 SDValue Op = V.getOperand(Idx);
10062 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10065 // If this is the first extract encountered in V, set the source vector,
10066 // otherwise verify the extract is from the previously defined source
10069 SrcVec = Op.getOperand(0);
10070 else if (SrcVec != Op.getOperand(0))
10072 SDValue ExtractedIndex = Op->getOperand(1);
10073 // Peek through extends.
10074 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
10075 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
10076 ExtractedIndex = ExtractedIndex.getOperand(0);
10077 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10080 // If this is the first extract from the index vector candidate, set the
10081 // indices vector, otherwise verify the extract is from the previously
10082 // defined indices vector.
10084 IndicesVec = ExtractedIndex.getOperand(0);
10085 else if (IndicesVec != ExtractedIndex.getOperand(0))
10088 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
10089 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
10094 MVT VT = V.getSimpleValueType();
10095 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
10099 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
10102 MVT VT = Op.getSimpleValueType();
10103 MVT EltVT = VT.getVectorElementType();
10104 unsigned NumElems = Op.getNumOperands();
10106 // Generate vectors for predicate vectors.
10107 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
10108 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
10110 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
10111 return VectorConstant;
10113 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
10114 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
10116 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
10117 return HorizontalOp;
10118 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
10120 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
10123 unsigned EVTBits = EltVT.getSizeInBits();
10125 unsigned NumZero = 0;
10126 unsigned NumNonZero = 0;
10127 uint64_t NonZeros = 0;
10128 bool IsAllConstants = true;
10129 SmallSet<SDValue, 8> Values;
10130 unsigned NumConstants = NumElems;
10131 for (unsigned i = 0; i < NumElems; ++i) {
10132 SDValue Elt = Op.getOperand(i);
10135 Values.insert(Elt);
10136 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
10137 IsAllConstants = false;
10140 if (X86::isZeroNode(Elt))
10143 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
10144 NonZeros |= ((uint64_t)1 << i);
10149 // All undef vector. Return an UNDEF. All zero vectors were handled above.
10150 if (NumNonZero == 0)
10151 return DAG.getUNDEF(VT);
10153 // If we are inserting one variable into a vector of non-zero constants, try
10154 // to avoid loading each constant element as a scalar. Load the constants as a
10155 // vector and then insert the variable scalar element. If insertion is not
10156 // supported, fall back to a shuffle to get the scalar blended with the
10157 // constants. Insertion into a zero vector is handled as a special-case
10158 // somewhere below here.
10159 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
10160 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
10161 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
10162 // Create an all-constant vector. The variable element in the old
10163 // build vector is replaced by undef in the constant vector. Save the
10164 // variable scalar element and its index for use in the insertelement.
10165 LLVMContext &Context = *DAG.getContext();
10166 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
10167 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
10170 for (unsigned i = 0; i != NumElems; ++i) {
10171 SDValue Elt = Op.getOperand(i);
10172 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
10173 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
10174 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
10175 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
10176 else if (!Elt.isUndef()) {
10177 assert(!VarElt.getNode() && !InsIndex.getNode() &&
10178 "Expected one variable element in this vector");
10180 InsIndex = DAG.getVectorIdxConstant(i, dl);
10183 Constant *CV = ConstantVector::get(ConstVecOps);
10184 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
10186 // The constants we just created may not be legal (eg, floating point). We
10187 // must lower the vector right here because we can not guarantee that we'll
10188 // legalize it before loading it. This is also why we could not just create
10189 // a new build vector here. If the build vector contains illegal constants,
10190 // it could get split back up into a series of insert elements.
10191 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
10192 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
10193 MachineFunction &MF = DAG.getMachineFunction();
10194 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
10195 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
10196 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
10197 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
10198 if (InsertC < NumEltsInLow128Bits)
10199 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
10201 // There's no good way to insert into the high elements of a >128-bit
10202 // vector, so use shuffles to avoid an extract/insert sequence.
10203 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
10204 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
10205 SmallVector<int, 8> ShuffleMask;
10206 unsigned NumElts = VT.getVectorNumElements();
10207 for (unsigned i = 0; i != NumElts; ++i)
10208 ShuffleMask.push_back(i == InsertC ? NumElts : i);
10209 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
10210 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
10213 // Special case for single non-zero, non-undef, element.
10214 if (NumNonZero == 1) {
10215 unsigned Idx = countTrailingZeros(NonZeros);
10216 SDValue Item = Op.getOperand(Idx);
10218 // If we have a constant or non-constant insertion into the low element of
10219 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
10220 // the rest of the elements. This will be matched as movd/movq/movss/movsd
10221 // depending on what the source datatype is.
10224 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10226 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
10227 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
10228 assert((VT.is128BitVector() || VT.is256BitVector() ||
10229 VT.is512BitVector()) &&
10230 "Expected an SSE value type!");
10231 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10232 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
10233 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
10236 // We can't directly insert an i8 or i16 into a vector, so zero extend
10237 // it to i32 first.
10238 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
10239 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
10240 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
10241 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
10242 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
10243 return DAG.getBitcast(VT, Item);
10247 // Is it a vector logical left shift?
10248 if (NumElems == 2 && Idx == 1 &&
10249 X86::isZeroNode(Op.getOperand(0)) &&
10250 !X86::isZeroNode(Op.getOperand(1))) {
10251 unsigned NumBits = VT.getSizeInBits();
10252 return getVShift(true, VT,
10253 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
10254 VT, Op.getOperand(1)),
10255 NumBits/2, DAG, *this, dl);
10258 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
10261 // Otherwise, if this is a vector with i32 or f32 elements, and the element
10262 // is a non-constant being inserted into an element other than the low one,
10263 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
10264 // movd/movss) to move this into the low element, then shuffle it into
10266 if (EVTBits == 32) {
10267 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10268 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
10272 // Splat is obviously ok. Let legalizer expand it to a shuffle.
10273 if (Values.size() == 1) {
10274 if (EVTBits == 32) {
10275 // Instead of a shuffle like this:
10276 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
10277 // Check if it's possible to issue this instead.
10278 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
10279 unsigned Idx = countTrailingZeros(NonZeros);
10280 SDValue Item = Op.getOperand(Idx);
10281 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
10282 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
10287 // A vector full of immediates; various special cases are already
10288 // handled, so this is best done with a single constant-pool load.
10289 if (IsAllConstants)
10292 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
10295 // See if we can use a vector load to get all of the elements.
10297 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
10299 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
10303 // If this is a splat of pairs of 32-bit elements, we can use a narrower
10304 // build_vector and broadcast it.
10305 // TODO: We could probably generalize this more.
10306 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
10307 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
10308 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
10309 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
10310 // Make sure all the even/odd operands match.
10311 for (unsigned i = 2; i != NumElems; ++i)
10312 if (Ops[i % 2] != Op.getOperand(i))
10316 if (CanSplat(Op, NumElems, Ops)) {
10317 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
10318 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
10319 // Create a new build vector and cast to v2i64/v2f64.
10320 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
10321 DAG.getBuildVector(NarrowVT, dl, Ops));
10322 // Broadcast from v2i64/v2f64 and cast to final VT.
10323 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
10324 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
10329 // For AVX-length vectors, build the individual 128-bit pieces and use
10330 // shuffles to put them in place.
10331 if (VT.getSizeInBits() > 128) {
10332 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
10334 // Build both the lower and upper subvector.
10336 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
10337 SDValue Upper = DAG.getBuildVector(
10338 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
10340 // Recreate the wider vector with the lower and upper part.
10341 return concatSubVectors(Lower, Upper, DAG, dl);
10344 // Let legalizer expand 2-wide build_vectors.
10345 if (EVTBits == 64) {
10346 if (NumNonZero == 1) {
10347 // One half is zero or undef.
10348 unsigned Idx = countTrailingZeros(NonZeros);
10349 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
10350 Op.getOperand(Idx));
10351 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
10356 // If element VT is < 32 bits, convert it to inserts into a zero vector.
10357 if (EVTBits == 8 && NumElems == 16)
10358 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
10362 if (EVTBits == 16 && NumElems == 8)
10363 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
10367 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
10368 if (EVTBits == 32 && NumElems == 4)
10369 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
10372 // If element VT is == 32 bits, turn it into a number of shuffles.
10373 if (NumElems == 4 && NumZero > 0) {
10374 SmallVector<SDValue, 8> Ops(NumElems);
10375 for (unsigned i = 0; i < 4; ++i) {
10376 bool isZero = !(NonZeros & (1ULL << i));
10378 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
10380 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10383 for (unsigned i = 0; i < 2; ++i) {
10384 switch ((NonZeros >> (i*2)) & 0x3) {
10385 default: llvm_unreachable("Unexpected NonZero count");
10387 Ops[i] = Ops[i*2]; // Must be a zero vector.
10390 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
10393 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10396 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10401 bool Reverse1 = (NonZeros & 0x3) == 2;
10402 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
10406 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
10407 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
10409 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
10412 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
10414 // Check for a build vector from mostly shuffle plus few inserting.
10415 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
10418 // For SSE 4.1, use insertps to put the high elements into the low element.
10419 if (Subtarget.hasSSE41()) {
10421 if (!Op.getOperand(0).isUndef())
10422 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
10424 Result = DAG.getUNDEF(VT);
10426 for (unsigned i = 1; i < NumElems; ++i) {
10427 if (Op.getOperand(i).isUndef()) continue;
10428 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
10429 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
10434 // Otherwise, expand into a number of unpckl*, start by extending each of
10435 // our (non-undef) elements to the full vector width with the element in the
10436 // bottom slot of the vector (which generates no code for SSE).
10437 SmallVector<SDValue, 8> Ops(NumElems);
10438 for (unsigned i = 0; i < NumElems; ++i) {
10439 if (!Op.getOperand(i).isUndef())
10440 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10442 Ops[i] = DAG.getUNDEF(VT);
10445 // Next, we iteratively mix elements, e.g. for v4f32:
10446 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
10447 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
10448 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
10449 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
10450 // Generate scaled UNPCKL shuffle mask.
10451 SmallVector<int, 16> Mask;
10452 for(unsigned i = 0; i != Scale; ++i)
10454 for (unsigned i = 0; i != Scale; ++i)
10455 Mask.push_back(NumElems+i);
10456 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
10458 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
10459 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
10464 // 256-bit AVX can use the vinsertf128 instruction
10465 // to create 256-bit vectors from two other 128-bit ones.
10466 // TODO: Detect subvector broadcast here instead of DAG combine?
10467 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
10468 const X86Subtarget &Subtarget) {
10470 MVT ResVT = Op.getSimpleValueType();
10472 assert((ResVT.is256BitVector() ||
10473 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
10475 unsigned NumOperands = Op.getNumOperands();
10476 unsigned NumZero = 0;
10477 unsigned NumNonZero = 0;
10478 unsigned NonZeros = 0;
10479 for (unsigned i = 0; i != NumOperands; ++i) {
10480 SDValue SubVec = Op.getOperand(i);
10481 if (SubVec.isUndef())
10483 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10486 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10487 NonZeros |= 1 << i;
10492 // If we have more than 2 non-zeros, build each half separately.
10493 if (NumNonZero > 2) {
10494 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10495 ArrayRef<SDUse> Ops = Op->ops();
10496 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10497 Ops.slice(0, NumOperands/2));
10498 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10499 Ops.slice(NumOperands/2));
10500 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10503 // Otherwise, build it up through insert_subvectors.
10504 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
10505 : DAG.getUNDEF(ResVT);
10507 MVT SubVT = Op.getOperand(0).getSimpleValueType();
10508 unsigned NumSubElems = SubVT.getVectorNumElements();
10509 for (unsigned i = 0; i != NumOperands; ++i) {
10510 if ((NonZeros & (1 << i)) == 0)
10513 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
10515 DAG.getIntPtrConstant(i * NumSubElems, dl));
10521 // Returns true if the given node is a type promotion (by concatenating i1
10522 // zeros) of the result of a node that already zeros all upper bits of
10524 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
10525 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
10526 const X86Subtarget &Subtarget,
10527 SelectionDAG & DAG) {
10529 MVT ResVT = Op.getSimpleValueType();
10530 unsigned NumOperands = Op.getNumOperands();
10532 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
10533 "Unexpected number of operands in CONCAT_VECTORS");
10535 uint64_t Zeros = 0;
10536 uint64_t NonZeros = 0;
10537 for (unsigned i = 0; i != NumOperands; ++i) {
10538 SDValue SubVec = Op.getOperand(i);
10539 if (SubVec.isUndef())
10541 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10542 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10543 Zeros |= (uint64_t)1 << i;
10545 NonZeros |= (uint64_t)1 << i;
10548 unsigned NumElems = ResVT.getVectorNumElements();
10550 // If we are inserting non-zero vector and there are zeros in LSBs and undef
10551 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10552 // insert_subvector will give us two kshifts.
10553 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10554 Log2_64(NonZeros) != NumOperands - 1) {
10555 MVT ShiftVT = ResVT;
10556 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10557 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10558 unsigned Idx = Log2_64(NonZeros);
10559 SDValue SubVec = Op.getOperand(Idx);
10560 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10561 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10562 DAG.getUNDEF(ShiftVT), SubVec,
10563 DAG.getIntPtrConstant(0, dl));
10564 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10565 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10566 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10567 DAG.getIntPtrConstant(0, dl));
10570 // If there are zero or one non-zeros we can handle this very simply.
10571 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10572 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10575 unsigned Idx = Log2_64(NonZeros);
10576 SDValue SubVec = Op.getOperand(Idx);
10577 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10578 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10579 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10582 if (NumOperands > 2) {
10583 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10584 ArrayRef<SDUse> Ops = Op->ops();
10585 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10586 Ops.slice(0, NumOperands/2));
10587 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10588 Ops.slice(NumOperands/2));
10589 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10592 assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
10594 if (ResVT.getVectorNumElements() >= 16)
10595 return Op; // The operation is legal with KUNPCK
10597 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10598 DAG.getUNDEF(ResVT), Op.getOperand(0),
10599 DAG.getIntPtrConstant(0, dl));
10600 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10601 DAG.getIntPtrConstant(NumElems/2, dl));
10604 static SDValue LowerCONCAT_VECTORS(SDValue Op,
10605 const X86Subtarget &Subtarget,
10606 SelectionDAG &DAG) {
10607 MVT VT = Op.getSimpleValueType();
10608 if (VT.getVectorElementType() == MVT::i1)
10609 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10611 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
10612 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
10613 Op.getNumOperands() == 4)));
10615 // AVX can use the vinsertf128 instruction to create 256-bit vectors
10616 // from two other 128-bit ones.
10618 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10619 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10622 //===----------------------------------------------------------------------===//
10623 // Vector shuffle lowering
10625 // This is an experimental code path for lowering vector shuffles on x86. It is
10626 // designed to handle arbitrary vector shuffles and blends, gracefully
10627 // degrading performance as necessary. It works hard to recognize idiomatic
10628 // shuffles and lower them to optimal instruction patterns without leaving
10629 // a framework that allows reasonably efficient handling of all vector shuffle
10631 //===----------------------------------------------------------------------===//
10633 /// Tiny helper function to identify a no-op mask.
10635 /// This is a somewhat boring predicate function. It checks whether the mask
10636 /// array input, which is assumed to be a single-input shuffle mask of the kind
10637 /// used by the X86 shuffle instructions (not a fully general
10638 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10639 /// in-place shuffle are 'no-op's.
10640 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10641 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10642 assert(Mask[i] >= -1 && "Out of bound mask element!");
10643 if (Mask[i] >= 0 && Mask[i] != i)
10649 /// Test whether there are elements crossing LaneSizeInBits lanes in this
10652 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10653 /// and we routinely test for these.
10654 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
10655 unsigned ScalarSizeInBits,
10656 ArrayRef<int> Mask) {
10657 assert(LaneSizeInBits && ScalarSizeInBits &&
10658 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
10659 "Illegal shuffle lane size");
10660 int LaneSize = LaneSizeInBits / ScalarSizeInBits;
10661 int Size = Mask.size();
10662 for (int i = 0; i < Size; ++i)
10663 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10668 /// Test whether there are elements crossing 128-bit lanes in this
10670 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10671 return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
10674 /// Test whether a shuffle mask is equivalent within each sub-lane.
10676 /// This checks a shuffle mask to see if it is performing the same
10677 /// lane-relative shuffle in each sub-lane. This trivially implies
10678 /// that it is also not lane-crossing. It may however involve a blend from the
10679 /// same lane of a second vector.
10681 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10682 /// non-trivial to compute in the face of undef lanes. The representation is
10683 /// suitable for use with existing 128-bit shuffles as entries from the second
10684 /// vector have been remapped to [LaneSize, 2*LaneSize).
10685 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10686 ArrayRef<int> Mask,
10687 SmallVectorImpl<int> &RepeatedMask) {
10688 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10689 RepeatedMask.assign(LaneSize, -1);
10690 int Size = Mask.size();
10691 for (int i = 0; i < Size; ++i) {
10692 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
10695 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10696 // This entry crosses lanes, so there is no way to model this shuffle.
10699 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10700 // Adjust second vector indices to start at LaneSize instead of Size.
10701 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10702 : Mask[i] % LaneSize + LaneSize;
10703 if (RepeatedMask[i % LaneSize] < 0)
10704 // This is the first non-undef entry in this slot of a 128-bit lane.
10705 RepeatedMask[i % LaneSize] = LocalM;
10706 else if (RepeatedMask[i % LaneSize] != LocalM)
10707 // Found a mismatch with the repeated mask.
10713 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
10715 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10716 SmallVectorImpl<int> &RepeatedMask) {
10717 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10721 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10722 SmallVector<int, 32> RepeatedMask;
10723 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10726 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
10728 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10729 SmallVectorImpl<int> &RepeatedMask) {
10730 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10733 /// Test whether a target shuffle mask is equivalent within each sub-lane.
10734 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10735 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10736 ArrayRef<int> Mask,
10737 SmallVectorImpl<int> &RepeatedMask) {
10738 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10739 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10740 int Size = Mask.size();
10741 for (int i = 0; i < Size; ++i) {
10742 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
10743 if (Mask[i] == SM_SentinelUndef)
10745 if (Mask[i] == SM_SentinelZero) {
10746 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10748 RepeatedMask[i % LaneSize] = SM_SentinelZero;
10751 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10752 // This entry crosses lanes, so there is no way to model this shuffle.
10755 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10756 // Adjust second vector indices to start at LaneSize instead of Size.
10758 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10759 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10760 // This is the first non-undef entry in this slot of a 128-bit lane.
10761 RepeatedMask[i % LaneSize] = LocalM;
10762 else if (RepeatedMask[i % LaneSize] != LocalM)
10763 // Found a mismatch with the repeated mask.
10769 /// Checks whether a shuffle mask is equivalent to an explicit list of
10772 /// This is a fast way to test a shuffle mask against a fixed pattern:
10774 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10776 /// It returns true if the mask is exactly as wide as the argument list, and
10777 /// each element of the mask is either -1 (signifying undef) or the value given
10778 /// in the argument.
10779 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10780 ArrayRef<int> ExpectedMask) {
10781 if (Mask.size() != ExpectedMask.size())
10784 int Size = Mask.size();
10786 // If the values are build vectors, we can look through them to find
10787 // equivalent inputs that make the shuffles equivalent.
10788 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10789 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10791 for (int i = 0; i < Size; ++i) {
10792 assert(Mask[i] >= -1 && "Out of bound mask element!");
10793 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10794 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10795 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10796 if (!MaskBV || !ExpectedBV ||
10797 MaskBV->getOperand(Mask[i] % Size) !=
10798 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10806 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10808 /// The masks must be exactly the same width.
10810 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10811 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
10813 /// SM_SentinelZero is accepted as a valid negative index but must match in
10815 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10816 ArrayRef<int> ExpectedMask,
10817 SDValue V1 = SDValue(),
10818 SDValue V2 = SDValue()) {
10819 int Size = Mask.size();
10820 if (Size != (int)ExpectedMask.size())
10822 assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10823 "Illegal target shuffle mask");
10825 // Check for out-of-range target shuffle mask indices.
10826 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10829 // If the values are build vectors, we can look through them to find
10830 // equivalent inputs that make the shuffles equivalent.
10831 auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10832 auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10833 BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10834 BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10836 for (int i = 0; i < Size; ++i) {
10837 if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10839 if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10840 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10841 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10842 if (MaskBV && ExpectedBV &&
10843 MaskBV->getOperand(Mask[i] % Size) ==
10844 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10847 // TODO - handle SM_Sentinel equivalences.
10853 // Attempt to create a shuffle mask from a VSELECT condition mask.
10854 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10856 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10859 unsigned Size = Cond.getValueType().getVectorNumElements();
10860 Mask.resize(Size, SM_SentinelUndef);
10862 for (int i = 0; i != (int)Size; ++i) {
10863 SDValue CondElt = Cond.getOperand(i);
10865 // Arbitrarily choose from the 2nd operand if the select condition element
10867 // TODO: Can we do better by matching patterns such as even/odd?
10868 if (CondElt.isUndef() || isNullConstant(CondElt))
10875 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10877 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10878 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10881 SmallVector<int, 8> Unpcklwd;
10882 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10883 /* Unary = */ false);
10884 SmallVector<int, 8> Unpckhwd;
10885 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10886 /* Unary = */ false);
10887 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10888 isTargetShuffleEquivalent(Mask, Unpckhwd));
10889 return IsUnpackwdMask;
10892 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10893 // Create 128-bit vector type based on mask size.
10894 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10895 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10897 // We can't assume a canonical shuffle mask, so try the commuted version too.
10898 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10899 ShuffleVectorSDNode::commuteMask(CommutedMask);
10901 // Match any of unary/binary or low/high.
10902 for (unsigned i = 0; i != 4; ++i) {
10903 SmallVector<int, 16> UnpackMask;
10904 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10905 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10906 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10912 /// Return true if a shuffle mask chooses elements identically in its top and
10913 /// bottom halves. For example, any splat mask has the same top and bottom
10914 /// halves. If an element is undefined in only one half of the mask, the halves
10915 /// are not considered identical.
10916 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10917 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10918 unsigned HalfSize = Mask.size() / 2;
10919 for (unsigned i = 0; i != HalfSize; ++i) {
10920 if (Mask[i] != Mask[i + HalfSize])
10926 /// Get a 4-lane 8-bit shuffle immediate for a mask.
10928 /// This helper function produces an 8-bit shuffle immediate corresponding to
10929 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
10930 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10933 /// NB: We rely heavily on "undef" masks preserving the input lane.
10934 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10935 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10936 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10937 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10938 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10939 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10942 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10943 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10944 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10945 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10949 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10950 SelectionDAG &DAG) {
10951 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10954 // The Shuffle result is as follow:
10955 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10956 // Each Zeroable's element correspond to a particular Mask's element.
10957 // As described in computeZeroableShuffleElements function.
10959 // The function looks for a sub-mask that the nonzero elements are in
10960 // increasing order. If such sub-mask exist. The function returns true.
10961 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10962 ArrayRef<int> Mask, const EVT &VectorType,
10963 bool &IsZeroSideLeft) {
10964 int NextElement = -1;
10965 // Check if the Mask's nonzero elements are in increasing order.
10966 for (int i = 0, e = Mask.size(); i < e; i++) {
10967 // Checks if the mask's zeros elements are built from only zeros.
10968 assert(Mask[i] >= -1 && "Out of bound mask element!");
10973 // Find the lowest non zero element
10974 if (NextElement < 0) {
10975 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10976 IsZeroSideLeft = NextElement != 0;
10978 // Exit if the mask's non zero elements are not in increasing order.
10979 if (NextElement != Mask[i])
10986 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10987 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10988 ArrayRef<int> Mask, SDValue V1,
10989 SDValue V2, const APInt &Zeroable,
10990 const X86Subtarget &Subtarget,
10991 SelectionDAG &DAG) {
10992 int Size = Mask.size();
10993 int LaneSize = 128 / VT.getScalarSizeInBits();
10994 const int NumBytes = VT.getSizeInBits() / 8;
10995 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10997 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10998 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10999 (Subtarget.hasBWI() && VT.is512BitVector()));
11001 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
11002 // Sign bit set in i8 mask means zero element.
11003 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
11006 for (int i = 0; i < NumBytes; ++i) {
11007 int M = Mask[i / NumEltBytes];
11009 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
11012 if (Zeroable[i / NumEltBytes]) {
11013 PSHUFBMask[i] = ZeroMask;
11017 // We can only use a single input of V1 or V2.
11018 SDValue SrcV = (M >= Size ? V2 : V1);
11019 if (V && V != SrcV)
11024 // PSHUFB can't cross lanes, ensure this doesn't happen.
11025 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
11029 M = M * NumEltBytes + (i % NumEltBytes);
11030 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
11032 assert(V && "Failed to find a source input");
11034 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
11035 return DAG.getBitcast(
11036 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
11037 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
11040 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
11041 const X86Subtarget &Subtarget, SelectionDAG &DAG,
11044 // X86 has dedicated shuffle that can be lowered to VEXPAND
11045 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
11046 const APInt &Zeroable,
11047 ArrayRef<int> Mask, SDValue &V1,
11048 SDValue &V2, SelectionDAG &DAG,
11049 const X86Subtarget &Subtarget) {
11050 bool IsLeftZeroSide = true;
11051 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
11054 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
11056 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11057 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
11058 unsigned NumElts = VT.getVectorNumElements();
11059 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
11060 "Unexpected number of vector elements");
11061 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
11062 Subtarget, DAG, DL);
11063 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
11064 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
11065 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
11068 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
11069 unsigned &UnpackOpcode, bool IsUnary,
11070 ArrayRef<int> TargetMask, const SDLoc &DL,
11072 const X86Subtarget &Subtarget) {
11073 int NumElts = VT.getVectorNumElements();
11075 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
11076 for (int i = 0; i != NumElts; i += 2) {
11077 int M1 = TargetMask[i + 0];
11078 int M2 = TargetMask[i + 1];
11079 Undef1 &= (SM_SentinelUndef == M1);
11080 Undef2 &= (SM_SentinelUndef == M2);
11081 Zero1 &= isUndefOrZero(M1);
11082 Zero2 &= isUndefOrZero(M2);
11084 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
11085 "Zeroable shuffle detected");
11087 // Attempt to match the target mask against the unpack lo/hi mask patterns.
11088 SmallVector<int, 64> Unpckl, Unpckh;
11089 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
11090 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
11091 UnpackOpcode = X86ISD::UNPCKL;
11092 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
11093 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
11097 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
11098 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
11099 UnpackOpcode = X86ISD::UNPCKH;
11100 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
11101 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
11105 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
11106 if (IsUnary && (Zero1 || Zero2)) {
11107 // Don't bother if we can blend instead.
11108 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
11109 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
11112 bool MatchLo = true, MatchHi = true;
11113 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
11114 int M = TargetMask[i];
11116 // Ignore if the input is known to be zero or the index is undef.
11117 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
11118 (M == SM_SentinelUndef))
11121 MatchLo &= (M == Unpckl[i]);
11122 MatchHi &= (M == Unpckh[i]);
11125 if (MatchLo || MatchHi) {
11126 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11127 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
11128 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
11133 // If a binary shuffle, commute and try again.
11135 ShuffleVectorSDNode::commuteMask(Unpckl);
11136 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
11137 UnpackOpcode = X86ISD::UNPCKL;
11142 ShuffleVectorSDNode::commuteMask(Unpckh);
11143 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
11144 UnpackOpcode = X86ISD::UNPCKH;
11153 // X86 has dedicated unpack instructions that can handle specific blend
11154 // operations: UNPCKH and UNPCKL.
11155 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
11156 ArrayRef<int> Mask, SDValue V1, SDValue V2,
11157 SelectionDAG &DAG) {
11158 SmallVector<int, 8> Unpckl;
11159 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
11160 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
11161 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
11163 SmallVector<int, 8> Unpckh;
11164 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
11165 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
11166 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
11168 // Commute and try again.
11169 ShuffleVectorSDNode::commuteMask(Unpckl);
11170 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
11171 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
11173 ShuffleVectorSDNode::commuteMask(Unpckh);
11174 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
11175 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
11180 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
11181 /// followed by unpack 256-bit.
11182 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
11183 ArrayRef<int> Mask, SDValue V1,
11184 SDValue V2, SelectionDAG &DAG) {
11185 SmallVector<int, 32> Unpckl, Unpckh;
11186 createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
11187 createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
11189 unsigned UnpackOpcode;
11190 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
11191 UnpackOpcode = X86ISD::UNPCKL;
11192 else if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
11193 UnpackOpcode = X86ISD::UNPCKH;
11197 // This is a "natural" unpack operation (rather than the 128-bit sectored
11198 // operation implemented by AVX). We need to rearrange 64-bit chunks of the
11199 // input in order to use the x86 instruction.
11200 V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
11201 DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
11202 V1 = DAG.getBitcast(VT, V1);
11203 return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
11206 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
11207 // source into the lower elements and zeroing the upper elements.
11208 // TODO: Merge with matchShuffleAsVPMOV.
11209 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
11210 ArrayRef<int> Mask, const APInt &Zeroable,
11211 const X86Subtarget &Subtarget) {
11212 if (!VT.is512BitVector() && !Subtarget.hasVLX())
11215 unsigned NumElts = Mask.size();
11216 unsigned EltSizeInBits = VT.getScalarSizeInBits();
11217 unsigned MaxScale = 64 / EltSizeInBits;
11219 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
11220 unsigned SrcEltBits = EltSizeInBits * Scale;
11221 if (SrcEltBits < 32 && !Subtarget.hasBWI())
11223 unsigned NumSrcElts = NumElts / Scale;
11224 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
11226 unsigned UpperElts = NumElts - NumSrcElts;
11227 if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnesValue())
11229 SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
11230 SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
11231 DstVT = MVT::getIntegerVT(EltSizeInBits);
11232 if ((NumSrcElts * EltSizeInBits) >= 128) {
11234 DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
11237 DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
11245 static bool matchShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
11247 int Size = (int)Mask.size();
11248 int Split = Size / Delta;
11249 int TruncatedVectorStart = SwappedOps ? Size : 0;
11251 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
11252 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
11255 // The rest of the mask should not refer to the truncated vector's elements.
11256 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
11257 TruncatedVectorStart + Size))
11263 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
11265 // An example is the following:
11267 // t0: ch = EntryToken
11268 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
11269 // t25: v4i32 = truncate t2
11270 // t41: v8i16 = bitcast t25
11271 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
11272 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
11273 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
11274 // t18: v2i64 = bitcast t51
11276 // Without avx512vl, this is lowered to:
11278 // vpmovqd %zmm0, %ymm0
11279 // vpshufb {{.*#+}} xmm0 =
11280 // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
11282 // But when avx512vl is available, one can just use a single vpmovdw
11284 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
11285 MVT VT, SDValue V1, SDValue V2,
11287 const X86Subtarget &Subtarget) {
11288 if (VT != MVT::v16i8 && VT != MVT::v8i16)
11291 if (Mask.size() != VT.getVectorNumElements())
11294 bool SwappedOps = false;
11296 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
11297 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
11306 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
11307 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
11309 // and similar ones.
11310 if (V1.getOpcode() != ISD::BITCAST)
11312 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
11315 SDValue Src = V1.getOperand(0).getOperand(0);
11316 MVT SrcVT = Src.getSimpleValueType();
11318 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
11319 // are only available with avx512vl.
11320 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
11323 // Down Convert Word to Byte is only available with avx512bw. The case with
11324 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
11325 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
11326 !Subtarget.hasBWI())
11329 // The first half/quarter of the mask should refer to every second/fourth
11330 // element of the vector truncated and bitcasted.
11331 if (!matchShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11332 !matchShuffleAsVPMOV(Mask, SwappedOps, 4))
11335 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
11338 /// Check whether a compaction lowering can be done by dropping even
11339 /// elements and compute how many times even elements must be dropped.
11341 /// This handles shuffles which take every Nth element where N is a power of
11342 /// two. Example shuffle masks:
11344 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
11345 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
11346 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
11347 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
11348 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
11349 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
11351 /// Any of these lanes can of course be undef.
11353 /// This routine only supports N <= 3.
11354 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
11357 /// \returns N above, or the number of times even elements must be dropped if
11358 /// there is such a number. Otherwise returns zero.
11359 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
11360 bool IsSingleInput) {
11361 // The modulus for the shuffle vector entries is based on whether this is
11362 // a single input or not.
11363 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
11364 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
11365 "We should only be called with masks with a power-of-2 size!");
11367 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
11369 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
11370 // and 2^3 simultaneously. This is because we may have ambiguity with
11371 // partially undef inputs.
11372 bool ViableForN[3] = {true, true, true};
11374 for (int i = 0, e = Mask.size(); i < e; ++i) {
11375 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
11380 bool IsAnyViable = false;
11381 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
11382 if (ViableForN[j]) {
11383 uint64_t N = j + 1;
11385 // The shuffle mask must be equal to (i * 2^N) % M.
11386 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
11387 IsAnyViable = true;
11389 ViableForN[j] = false;
11391 // Early exit if we exhaust the possible powers of two.
11396 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
11400 // Return 0 as there is no viable power of two.
11404 // X86 has dedicated pack instructions that can handle specific truncation
11405 // operations: PACKSS and PACKUS.
11406 // Checks for compaction shuffle masks if MaxStages > 1.
11407 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
11408 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
11409 unsigned &PackOpcode, ArrayRef<int> TargetMask,
11411 const X86Subtarget &Subtarget,
11412 unsigned MaxStages = 1) {
11413 unsigned NumElts = VT.getVectorNumElements();
11414 unsigned BitSize = VT.getScalarSizeInBits();
11415 assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
11416 "Illegal maximum compaction");
11418 auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
11419 unsigned NumSrcBits = PackVT.getScalarSizeInBits();
11420 unsigned NumPackedBits = NumSrcBits - BitSize;
11421 SDValue VV1 = DAG.getBitcast(PackVT, N1);
11422 SDValue VV2 = DAG.getBitcast(PackVT, N2);
11423 if (Subtarget.hasSSE41() || BitSize == 8) {
11424 APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
11425 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
11426 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
11430 PackOpcode = X86ISD::PACKUS;
11434 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > NumPackedBits) &&
11435 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > NumPackedBits)) {
11439 PackOpcode = X86ISD::PACKSS;
11445 // Attempt to match against wider and wider compaction patterns.
11446 for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
11447 MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
11448 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
11450 // Try binary shuffle.
11451 SmallVector<int, 32> BinaryMask;
11452 createPackShuffleMask(VT, BinaryMask, false, NumStages);
11453 if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
11454 if (MatchPACK(V1, V2, PackVT))
11457 // Try unary shuffle.
11458 SmallVector<int, 32> UnaryMask;
11459 createPackShuffleMask(VT, UnaryMask, true, NumStages);
11460 if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
11461 if (MatchPACK(V1, V1, PackVT))
11468 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
11469 SDValue V1, SDValue V2, SelectionDAG &DAG,
11470 const X86Subtarget &Subtarget) {
11472 unsigned PackOpcode;
11473 unsigned SizeBits = VT.getSizeInBits();
11474 unsigned EltBits = VT.getScalarSizeInBits();
11475 unsigned MaxStages = Log2_32(64 / EltBits);
11476 if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11477 Subtarget, MaxStages))
11480 unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
11481 unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
11483 // Don't lower multi-stage packs on AVX512, truncation is better.
11484 if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
11487 // Pack to the largest type possible:
11488 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
11489 unsigned MaxPackBits = 16;
11490 if (CurrentEltBits > 16 &&
11491 (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
11494 // Repeatedly pack down to the target size.
11496 for (unsigned i = 0; i != NumStages; ++i) {
11497 unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
11498 unsigned NumSrcElts = SizeBits / SrcEltBits;
11499 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
11500 MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
11501 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
11502 MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
11503 Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
11504 DAG.getBitcast(SrcVT, V2));
11506 CurrentEltBits /= 2;
11508 assert(Res && Res.getValueType() == VT &&
11509 "Failed to lower compaction shuffle");
11513 /// Try to emit a bitmask instruction for a shuffle.
11515 /// This handles cases where we can model a blend exactly as a bitmask due to
11516 /// one of the inputs being zeroable.
11517 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
11518 SDValue V2, ArrayRef<int> Mask,
11519 const APInt &Zeroable,
11520 const X86Subtarget &Subtarget,
11521 SelectionDAG &DAG) {
11523 MVT EltVT = VT.getVectorElementType();
11524 SDValue Zero, AllOnes;
11525 // Use f64 if i64 isn't legal.
11526 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
11528 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
11532 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
11533 Zero = DAG.getConstantFP(0.0, DL, EltVT);
11534 APFloat AllOnesValue = APFloat::getAllOnesValue(
11535 SelectionDAG::EVTToAPFloatSemantics(EltVT), EltVT.getSizeInBits());
11536 AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
11538 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
11540 Zero = DAG.getConstant(0, DL, EltVT);
11541 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11544 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
11546 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11549 if (Mask[i] % Size != i)
11550 return SDValue(); // Not a blend.
11552 V = Mask[i] < Size ? V1 : V2;
11553 else if (V != (Mask[i] < Size ? V1 : V2))
11554 return SDValue(); // Can only let one input through the mask.
11556 VMaskOps[i] = AllOnes;
11559 return SDValue(); // No non-zeroable elements!
11561 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
11562 VMask = DAG.getBitcast(LogicVT, VMask);
11563 V = DAG.getBitcast(LogicVT, V);
11564 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
11565 return DAG.getBitcast(VT, And);
11568 /// Try to emit a blend instruction for a shuffle using bit math.
11570 /// This is used as a fallback approach when first class blend instructions are
11571 /// unavailable. Currently it is only suitable for integer vectors, but could
11572 /// be generalized for floating point vectors if desirable.
11573 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
11574 SDValue V2, ArrayRef<int> Mask,
11575 SelectionDAG &DAG) {
11576 assert(VT.isInteger() && "Only supports integer vector types!");
11577 MVT EltVT = VT.getVectorElementType();
11578 SDValue Zero = DAG.getConstant(0, DL, EltVT);
11579 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11580 SmallVector<SDValue, 16> MaskOps;
11581 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11582 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
11583 return SDValue(); // Shuffled input!
11584 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
11587 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
11588 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
11589 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
11590 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
11593 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
11594 SDValue PreservedSrc,
11595 const X86Subtarget &Subtarget,
11596 SelectionDAG &DAG);
11598 static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
11599 MutableArrayRef<int> Mask,
11600 const APInt &Zeroable, bool &ForceV1Zero,
11601 bool &ForceV2Zero, uint64_t &BlendMask) {
11602 bool V1IsZeroOrUndef =
11603 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
11604 bool V2IsZeroOrUndef =
11605 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
11608 ForceV1Zero = false, ForceV2Zero = false;
11609 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
11611 // Attempt to generate the binary blend mask. If an input is zero then
11612 // we can use any lane.
11613 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11615 if (M == SM_SentinelUndef)
11619 if (M == i + Size) {
11620 BlendMask |= 1ull << i;
11624 if (V1IsZeroOrUndef) {
11625 ForceV1Zero = true;
11629 if (V2IsZeroOrUndef) {
11630 ForceV2Zero = true;
11631 BlendMask |= 1ull << i;
11632 Mask[i] = i + Size;
11641 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11643 uint64_t ScaledMask = 0;
11644 for (int i = 0; i != Size; ++i)
11645 if (BlendMask & (1ull << i))
11646 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11650 /// Try to emit a blend instruction for a shuffle.
11652 /// This doesn't do any checks for the availability of instructions for blending
11653 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11654 /// be matched in the backend with the type given. What it does check for is
11655 /// that the shuffle mask is a blend, or convertible into a blend with zero.
11656 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11657 SDValue V2, ArrayRef<int> Original,
11658 const APInt &Zeroable,
11659 const X86Subtarget &Subtarget,
11660 SelectionDAG &DAG) {
11661 uint64_t BlendMask = 0;
11662 bool ForceV1Zero = false, ForceV2Zero = false;
11663 SmallVector<int, 64> Mask(Original.begin(), Original.end());
11664 if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11668 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11670 V1 = getZeroVector(VT, Subtarget, DAG, DL);
11672 V2 = getZeroVector(VT, Subtarget, DAG, DL);
11674 switch (VT.SimpleTy) {
11677 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
11681 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
11688 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
11689 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11690 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11691 case MVT::v16i16: {
11692 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
11693 SmallVector<int, 8> RepeatedMask;
11694 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11695 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11696 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
11698 for (int i = 0; i < 8; ++i)
11699 if (RepeatedMask[i] >= 8)
11700 BlendMask |= 1ull << i;
11701 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11702 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11704 // Use PBLENDW for lower/upper lanes and then blend lanes.
11705 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11706 // merge to VSELECT where useful.
11707 uint64_t LoMask = BlendMask & 0xFF;
11708 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11709 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11710 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11711 DAG.getTargetConstant(LoMask, DL, MVT::i8));
11712 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11713 DAG.getTargetConstant(HiMask, DL, MVT::i8));
11714 return DAG.getVectorShuffle(
11715 MVT::v16i16, DL, Lo, Hi,
11716 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11721 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
11724 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
11726 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11727 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11731 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11733 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11734 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11735 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11738 // If we have VPTERNLOG, we can use that as a bit blend.
11739 if (Subtarget.hasVLX())
11740 if (SDValue BitBlend =
11741 lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
11744 // Scale the blend by the number of bytes per element.
11745 int Scale = VT.getScalarSizeInBits() / 8;
11747 // This form of blend is always done on bytes. Compute the byte vector
11749 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11751 // x86 allows load folding with blendvb from the 2nd source operand. But
11752 // we are still using LLVM select here (see comment below), so that's V1.
11753 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11754 // allow that load-folding possibility.
11755 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11756 ShuffleVectorSDNode::commuteMask(Mask);
11760 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11761 // mix of LLVM's code generator and the x86 backend. We tell the code
11762 // generator that boolean values in the elements of an x86 vector register
11763 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11764 // mapping a select to operand #1, and 'false' mapping to operand #2. The
11765 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11766 // of the element (the remaining are ignored) and 0 in that high bit would
11767 // mean operand #1 while 1 in the high bit would mean operand #2. So while
11768 // the LLVM model for boolean values in vector elements gets the relevant
11769 // bit set, it is set backwards and over constrained relative to x86's
11771 SmallVector<SDValue, 32> VSELECTMask;
11772 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11773 for (int j = 0; j < Scale; ++j)
11774 VSELECTMask.push_back(
11775 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11776 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11779 V1 = DAG.getBitcast(BlendVT, V1);
11780 V2 = DAG.getBitcast(BlendVT, V2);
11781 return DAG.getBitcast(
11783 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11792 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11793 bool OptForSize = DAG.shouldOptForSize();
11795 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11800 // Otherwise load an immediate into a GPR, cast to k-register, and use a
11803 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11804 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11805 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11808 llvm_unreachable("Not a supported integer vector type!");
11812 /// Try to lower as a blend of elements from two inputs followed by
11813 /// a single-input permutation.
11815 /// This matches the pattern where we can blend elements from two inputs and
11816 /// then reduce the shuffle to a single-input permutation.
11817 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11818 SDValue V1, SDValue V2,
11819 ArrayRef<int> Mask,
11821 bool ImmBlends = false) {
11822 // We build up the blend mask while checking whether a blend is a viable way
11823 // to reduce the shuffle.
11824 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11825 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11827 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11831 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
11833 if (BlendMask[Mask[i] % Size] < 0)
11834 BlendMask[Mask[i] % Size] = Mask[i];
11835 else if (BlendMask[Mask[i] % Size] != Mask[i])
11836 return SDValue(); // Can't blend in the needed input!
11838 PermuteMask[i] = Mask[i] % Size;
11841 // If only immediate blends, then bail if the blend mask can't be widened to
11843 unsigned EltSize = VT.getScalarSizeInBits();
11844 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11847 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11848 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11851 /// Try to lower as an unpack of elements from two inputs followed by
11852 /// a single-input permutation.
11854 /// This matches the pattern where we can unpack elements from two inputs and
11855 /// then reduce the shuffle to a single-input (wider) permutation.
11856 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11857 SDValue V1, SDValue V2,
11858 ArrayRef<int> Mask,
11859 SelectionDAG &DAG) {
11860 int NumElts = Mask.size();
11861 int NumLanes = VT.getSizeInBits() / 128;
11862 int NumLaneElts = NumElts / NumLanes;
11863 int NumHalfLaneElts = NumLaneElts / 2;
11865 bool MatchLo = true, MatchHi = true;
11866 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11868 // Determine UNPCKL/UNPCKH type and operand order.
11869 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11870 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11871 int M = Mask[Lane + Elt];
11875 SDValue &Op = Ops[Elt & 1];
11876 if (M < NumElts && (Op.isUndef() || Op == V1))
11878 else if (NumElts <= M && (Op.isUndef() || Op == V2))
11883 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11884 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11885 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11886 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11887 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11888 if (!MatchLo && !MatchHi)
11892 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
11894 // Now check that each pair of elts come from the same unpack pair
11895 // and set the permute mask based on each pair.
11896 // TODO - Investigate cases where we permute individual elements.
11897 SmallVector<int, 32> PermuteMask(NumElts, -1);
11898 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11899 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11900 int M0 = Mask[Lane + Elt + 0];
11901 int M1 = Mask[Lane + Elt + 1];
11902 if (0 <= M0 && 0 <= M1 &&
11903 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11906 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11908 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11912 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11913 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11914 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11917 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11918 /// permuting the elements of the result in place.
11919 static SDValue lowerShuffleAsByteRotateAndPermute(
11920 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11921 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11922 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11923 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11924 (VT.is512BitVector() && !Subtarget.hasBWI()))
11927 // We don't currently support lane crossing permutes.
11928 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11931 int Scale = VT.getScalarSizeInBits() / 8;
11932 int NumLanes = VT.getSizeInBits() / 128;
11933 int NumElts = VT.getVectorNumElements();
11934 int NumEltsPerLane = NumElts / NumLanes;
11936 // Determine range of mask elts.
11937 bool Blend1 = true;
11938 bool Blend2 = true;
11939 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11940 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11941 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11942 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11943 int M = Mask[Lane + Elt];
11947 Blend1 &= (M == (Lane + Elt));
11948 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11949 M = M % NumEltsPerLane;
11950 Range1.first = std::min(Range1.first, M);
11951 Range1.second = std::max(Range1.second, M);
11954 Blend2 &= (M == (Lane + Elt));
11955 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11956 M = M % NumEltsPerLane;
11957 Range2.first = std::min(Range2.first, M);
11958 Range2.second = std::max(Range2.second, M);
11963 // Bail if we don't need both elements.
11964 // TODO - it might be worth doing this for unary shuffles if the permute
11966 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11967 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11970 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11973 // Rotate the 2 ops so we can access both ranges, then permute the result.
11974 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11975 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11976 SDValue Rotate = DAG.getBitcast(
11977 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11978 DAG.getBitcast(ByteVT, Lo),
11979 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11980 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11981 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11982 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11983 int M = Mask[Lane + Elt];
11987 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11989 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11992 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11995 // Check if the ranges are small enough to rotate from either direction.
11996 if (Range2.second < Range1.first)
11997 return RotateAndPermute(V1, V2, Range1.first, 0);
11998 if (Range1.second < Range2.first)
11999 return RotateAndPermute(V2, V1, Range2.first, NumElts);
12003 /// Generic routine to decompose a shuffle and blend into independent
12004 /// blends and permutes.
12006 /// This matches the extremely common pattern for handling combined
12007 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
12008 /// operations. It will try to pick the best arrangement of shuffles and
12010 static SDValue lowerShuffleAsDecomposedShuffleBlend(
12011 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12012 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12013 // Shuffle the input elements into the desired positions in V1 and V2 and
12014 // blend them together.
12015 SmallVector<int, 32> V1Mask(Mask.size(), -1);
12016 SmallVector<int, 32> V2Mask(Mask.size(), -1);
12017 SmallVector<int, 32> BlendMask(Mask.size(), -1);
12018 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12019 if (Mask[i] >= 0 && Mask[i] < Size) {
12020 V1Mask[i] = Mask[i];
12022 } else if (Mask[i] >= Size) {
12023 V2Mask[i] = Mask[i] - Size;
12024 BlendMask[i] = i + Size;
12027 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
12028 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
12029 // the shuffle may be able to fold with a load or other benefit. However, when
12030 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
12031 // pre-shuffle first is a better strategy.
12032 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
12033 // Only prefer immediate blends to unpack/rotate.
12034 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
12037 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
12040 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
12041 DL, VT, V1, V2, Mask, Subtarget, DAG))
12043 // Unpack/rotate failed - try again with variable blends.
12044 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
12049 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
12050 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
12051 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
12054 /// Try to lower a vector shuffle as a bit rotation.
12056 /// Look for a repeated rotation pattern in each sub group.
12057 /// Returns a ISD::ROTL element rotation amount or -1 if failed.
12058 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
12059 int NumElts = Mask.size();
12060 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
12062 int RotateAmt = -1;
12063 for (int i = 0; i != NumElts; i += NumSubElts) {
12064 for (int j = 0; j != NumSubElts; ++j) {
12065 int M = Mask[i + j];
12068 if (!isInRange(M, i, i + NumSubElts))
12070 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
12071 if (0 <= RotateAmt && Offset != RotateAmt)
12073 RotateAmt = Offset;
12079 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
12080 const X86Subtarget &Subtarget,
12081 ArrayRef<int> Mask) {
12082 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
12083 assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
12085 // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
12086 int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
12087 int MaxSubElts = 64 / EltSizeInBits;
12088 for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
12089 int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
12093 int NumElts = Mask.size();
12094 MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
12095 RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
12096 return RotateAmt * EltSizeInBits;
12102 /// Lower shuffle using X86ISD::VROTLI rotations.
12103 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
12104 ArrayRef<int> Mask,
12105 const X86Subtarget &Subtarget,
12106 SelectionDAG &DAG) {
12107 // Only XOP + AVX512 targets have bit rotation instructions.
12108 // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
12110 (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
12111 if (!IsLegal && Subtarget.hasSSE3())
12115 int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
12120 // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
12121 // expanded to OR(SRL,SHL), will be more efficient, but if they can
12122 // widen to vXi16 or more then existing lowering should will be better.
12124 if ((RotateAmt % 16) == 0)
12126 // TODO: Use getTargetVShiftByConstNode.
12127 unsigned ShlAmt = RotateAmt;
12128 unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
12129 V1 = DAG.getBitcast(RotateVT, V1);
12130 SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
12131 DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
12132 SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
12133 DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
12134 SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
12135 return DAG.getBitcast(VT, Rot);
12139 DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
12140 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
12141 return DAG.getBitcast(VT, Rot);
12144 /// Try to match a vector shuffle as an element rotation.
12146 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
12147 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
12148 ArrayRef<int> Mask) {
12149 int NumElts = Mask.size();
12151 // We need to detect various ways of spelling a rotation:
12152 // [11, 12, 13, 14, 15, 0, 1, 2]
12153 // [-1, 12, 13, 14, -1, -1, 1, -1]
12154 // [-1, -1, -1, -1, -1, -1, 1, 2]
12155 // [ 3, 4, 5, 6, 7, 8, 9, 10]
12156 // [-1, 4, 5, 6, -1, -1, 9, -1]
12157 // [-1, 4, 5, 6, -1, -1, -1, -1]
12160 for (int i = 0; i < NumElts; ++i) {
12162 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
12163 "Unexpected mask index.");
12167 // Determine where a rotated vector would have started.
12168 int StartIdx = i - (M % NumElts);
12170 // The identity rotation isn't interesting, stop.
12173 // If we found the tail of a vector the rotation must be the missing
12174 // front. If we found the head of a vector, it must be how much of the
12176 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
12179 Rotation = CandidateRotation;
12180 else if (Rotation != CandidateRotation)
12181 // The rotations don't match, so we can't match this mask.
12184 // Compute which value this mask is pointing at.
12185 SDValue MaskV = M < NumElts ? V1 : V2;
12187 // Compute which of the two target values this index should be assigned
12188 // to. This reflects whether the high elements are remaining or the low
12189 // elements are remaining.
12190 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
12192 // Either set up this value if we've not encountered it before, or check
12193 // that it remains consistent.
12196 else if (TargetV != MaskV)
12197 // This may be a rotation, but it pulls from the inputs in some
12198 // unsupported interleaving.
12202 // Check that we successfully analyzed the mask, and normalize the results.
12203 assert(Rotation != 0 && "Failed to locate a viable rotation!");
12204 assert((Lo || Hi) && "Failed to find a rotated input vector!");
12216 /// Try to lower a vector shuffle as a byte rotation.
12218 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
12219 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
12220 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
12221 /// try to generically lower a vector shuffle through such an pattern. It
12222 /// does not check for the profitability of lowering either as PALIGNR or
12223 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
12224 /// This matches shuffle vectors that look like:
12226 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
12228 /// Essentially it concatenates V1 and V2, shifts right by some number of
12229 /// elements, and takes the low elements as the result. Note that while this is
12230 /// specified as a *right shift* because x86 is little-endian, it is a *left
12231 /// rotate* of the vector lanes.
12232 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
12233 ArrayRef<int> Mask) {
12234 // Don't accept any shuffles with zero elements.
12235 if (isAnyZero(Mask))
12238 // PALIGNR works on 128-bit lanes.
12239 SmallVector<int, 16> RepeatedMask;
12240 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
12243 int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
12247 // PALIGNR rotates bytes, so we need to scale the
12248 // rotation based on how many bytes are in the vector lane.
12249 int NumElts = RepeatedMask.size();
12250 int Scale = 16 / NumElts;
12251 return Rotation * Scale;
12254 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
12255 SDValue V2, ArrayRef<int> Mask,
12256 const X86Subtarget &Subtarget,
12257 SelectionDAG &DAG) {
12258 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
12260 SDValue Lo = V1, Hi = V2;
12261 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
12262 if (ByteRotation <= 0)
12265 // Cast the inputs to i8 vector of correct length to match PALIGNR or
12267 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
12268 Lo = DAG.getBitcast(ByteVT, Lo);
12269 Hi = DAG.getBitcast(ByteVT, Hi);
12271 // SSSE3 targets can use the palignr instruction.
12272 if (Subtarget.hasSSSE3()) {
12273 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
12274 "512-bit PALIGNR requires BWI instructions");
12275 return DAG.getBitcast(
12276 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
12277 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
12280 assert(VT.is128BitVector() &&
12281 "Rotate-based lowering only supports 128-bit lowering!");
12282 assert(Mask.size() <= 16 &&
12283 "Can shuffle at most 16 bytes in a 128-bit vector!");
12284 assert(ByteVT == MVT::v16i8 &&
12285 "SSE2 rotate lowering only needed for v16i8!");
12287 // Default SSE2 implementation
12288 int LoByteShift = 16 - ByteRotation;
12289 int HiByteShift = ByteRotation;
12292 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
12293 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
12295 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
12296 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
12297 return DAG.getBitcast(VT,
12298 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
12301 /// Try to lower a vector shuffle as a dword/qword rotation.
12303 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
12304 /// rotation of the concatenation of two vectors; This routine will
12305 /// try to generically lower a vector shuffle through such an pattern.
12307 /// Essentially it concatenates V1 and V2, shifts right by some number of
12308 /// elements, and takes the low elements as the result. Note that while this is
12309 /// specified as a *right shift* because x86 is little-endian, it is a *left
12310 /// rotate* of the vector lanes.
12311 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
12312 SDValue V2, ArrayRef<int> Mask,
12313 const X86Subtarget &Subtarget,
12314 SelectionDAG &DAG) {
12315 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
12316 "Only 32-bit and 64-bit elements are supported!");
12318 // 128/256-bit vectors are only supported with VLX.
12319 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
12320 && "VLX required for 128/256-bit vectors");
12322 SDValue Lo = V1, Hi = V2;
12323 int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
12327 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
12328 DAG.getTargetConstant(Rotation, DL, MVT::i8));
12331 /// Try to lower a vector shuffle as a byte shift sequence.
12332 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
12333 SDValue V2, ArrayRef<int> Mask,
12334 const APInt &Zeroable,
12335 const X86Subtarget &Subtarget,
12336 SelectionDAG &DAG) {
12337 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
12338 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
12340 // We need a shuffle that has zeros at one/both ends and a sequential
12341 // shuffle from one source within.
12342 unsigned ZeroLo = Zeroable.countTrailingOnes();
12343 unsigned ZeroHi = Zeroable.countLeadingOnes();
12344 if (!ZeroLo && !ZeroHi)
12347 unsigned NumElts = Mask.size();
12348 unsigned Len = NumElts - (ZeroLo + ZeroHi);
12349 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
12352 unsigned Scale = VT.getScalarSizeInBits() / 8;
12353 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
12354 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
12355 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
12358 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
12359 Res = DAG.getBitcast(MVT::v16i8, Res);
12361 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
12362 // inner sequential set of elements, possibly offset:
12363 // 01234567 --> zzzzzz01 --> 1zzzzzzz
12364 // 01234567 --> 4567zzzz --> zzzzz456
12365 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
12367 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
12368 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12369 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12370 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
12371 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
12372 } else if (ZeroHi == 0) {
12373 unsigned Shift = Mask[ZeroLo] % NumElts;
12374 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
12375 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12376 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12377 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
12378 } else if (!Subtarget.hasSSSE3()) {
12379 // If we don't have PSHUFB then its worth avoiding an AND constant mask
12380 // by performing 3 byte shifts. Shuffle combining can kick in above that.
12381 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
12382 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
12383 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12384 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12385 Shift += Mask[ZeroLo] % NumElts;
12386 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
12387 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12388 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12389 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
12393 return DAG.getBitcast(VT, Res);
12396 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
12398 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
12399 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
12400 /// matches elements from one of the input vectors shuffled to the left or
12401 /// right with zeroable elements 'shifted in'. It handles both the strictly
12402 /// bit-wise element shifts and the byte shift across an entire 128-bit double
12403 /// quad word lane.
12405 /// PSHL : (little-endian) left bit shift.
12406 /// [ zz, 0, zz, 2 ]
12407 /// [ -1, 4, zz, -1 ]
12408 /// PSRL : (little-endian) right bit shift.
12409 /// [ 1, zz, 3, zz]
12410 /// [ -1, -1, 7, zz]
12411 /// PSLLDQ : (little-endian) left byte shift
12412 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
12413 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
12414 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
12415 /// PSRLDQ : (little-endian) right byte shift
12416 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
12417 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
12418 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
12419 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
12420 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
12421 int MaskOffset, const APInt &Zeroable,
12422 const X86Subtarget &Subtarget) {
12423 int Size = Mask.size();
12424 unsigned SizeInBits = Size * ScalarSizeInBits;
12426 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
12427 for (int i = 0; i < Size; i += Scale)
12428 for (int j = 0; j < Shift; ++j)
12429 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
12435 auto MatchShift = [&](int Shift, int Scale, bool Left) {
12436 for (int i = 0; i != Size; i += Scale) {
12437 unsigned Pos = Left ? i + Shift : i;
12438 unsigned Low = Left ? i : i + Shift;
12439 unsigned Len = Scale - Shift;
12440 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
12444 int ShiftEltBits = ScalarSizeInBits * Scale;
12445 bool ByteShift = ShiftEltBits > 64;
12446 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
12447 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
12448 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
12450 // Normalize the scale for byte shifts to still produce an i64 element
12452 Scale = ByteShift ? Scale / 2 : Scale;
12454 // We need to round trip through the appropriate type for the shift.
12455 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
12456 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
12457 : MVT::getVectorVT(ShiftSVT, Size / Scale);
12458 return (int)ShiftAmt;
12461 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
12462 // keep doubling the size of the integer elements up to that. We can
12463 // then shift the elements of the integer vector by whole multiples of
12464 // their width within the elements of the larger integer vector. Test each
12465 // multiple to see if we can find a match with the moved element indices
12466 // and that the shifted in elements are all zeroable.
12467 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
12468 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
12469 for (int Shift = 1; Shift != Scale; ++Shift)
12470 for (bool Left : {true, false})
12471 if (CheckZeros(Shift, Scale, Left)) {
12472 int ShiftAmt = MatchShift(Shift, Scale, Left);
12481 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
12482 SDValue V2, ArrayRef<int> Mask,
12483 const APInt &Zeroable,
12484 const X86Subtarget &Subtarget,
12485 SelectionDAG &DAG) {
12486 int Size = Mask.size();
12487 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12493 // Try to match shuffle against V1 shift.
12494 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
12495 Mask, 0, Zeroable, Subtarget);
12497 // If V1 failed, try to match shuffle against V2 shift.
12498 if (ShiftAmt < 0) {
12499 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
12500 Mask, Size, Zeroable, Subtarget);
12507 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
12508 "Illegal integer vector type");
12509 V = DAG.getBitcast(ShiftVT, V);
12510 V = DAG.getNode(Opcode, DL, ShiftVT, V,
12511 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
12512 return DAG.getBitcast(VT, V);
12515 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
12516 // Remainder of lower half result is zero and upper half is all undef.
12517 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
12518 ArrayRef<int> Mask, uint64_t &BitLen,
12519 uint64_t &BitIdx, const APInt &Zeroable) {
12520 int Size = Mask.size();
12521 int HalfSize = Size / 2;
12522 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12523 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
12525 // Upper half must be undefined.
12526 if (!isUndefUpperHalf(Mask))
12529 // Determine the extraction length from the part of the
12530 // lower half that isn't zeroable.
12531 int Len = HalfSize;
12532 for (; Len > 0; --Len)
12533 if (!Zeroable[Len - 1])
12535 assert(Len > 0 && "Zeroable shuffle mask");
12537 // Attempt to match first Len sequential elements from the lower half.
12540 for (int i = 0; i != Len; ++i) {
12542 if (M == SM_SentinelUndef)
12544 SDValue &V = (M < Size ? V1 : V2);
12547 // The extracted elements must start at a valid index and all mask
12548 // elements must be in the lower half.
12549 if (i > M || M >= HalfSize)
12552 if (Idx < 0 || (Src == V && Idx == (M - i))) {
12560 if (!Src || Idx < 0)
12563 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
12564 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12565 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12570 // INSERTQ: Extract lowest Len elements from lower half of second source and
12571 // insert over first source, starting at Idx.
12572 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
12573 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
12574 ArrayRef<int> Mask, uint64_t &BitLen,
12575 uint64_t &BitIdx) {
12576 int Size = Mask.size();
12577 int HalfSize = Size / 2;
12578 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12580 // Upper half must be undefined.
12581 if (!isUndefUpperHalf(Mask))
12584 for (int Idx = 0; Idx != HalfSize; ++Idx) {
12587 // Attempt to match first source from mask before insertion point.
12588 if (isUndefInRange(Mask, 0, Idx)) {
12590 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
12592 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
12598 // Extend the extraction length looking to match both the insertion of
12599 // the second source and the remaining elements of the first.
12600 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
12602 int Len = Hi - Idx;
12604 // Match insertion.
12605 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
12607 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
12613 // Match the remaining elements of the lower half.
12614 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
12616 } else if ((!Base || (Base == V1)) &&
12617 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
12619 } else if ((!Base || (Base == V2)) &&
12620 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
12627 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12628 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12638 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
12639 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
12640 SDValue V2, ArrayRef<int> Mask,
12641 const APInt &Zeroable, SelectionDAG &DAG) {
12642 uint64_t BitLen, BitIdx;
12643 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
12644 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
12645 DAG.getTargetConstant(BitLen, DL, MVT::i8),
12646 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12648 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
12649 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
12650 V2 ? V2 : DAG.getUNDEF(VT),
12651 DAG.getTargetConstant(BitLen, DL, MVT::i8),
12652 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12657 /// Lower a vector shuffle as a zero or any extension.
12659 /// Given a specific number of elements, element bit width, and extension
12660 /// stride, produce either a zero or any extension based on the available
12661 /// features of the subtarget. The extended elements are consecutive and
12662 /// begin and can start from an offsetted element index in the input; to
12663 /// avoid excess shuffling the offset must either being in the bottom lane
12664 /// or at the start of a higher lane. All extended elements must be from
12666 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
12667 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
12668 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12669 assert(Scale > 1 && "Need a scale to extend.");
12670 int EltBits = VT.getScalarSizeInBits();
12671 int NumElements = VT.getVectorNumElements();
12672 int NumEltsPerLane = 128 / EltBits;
12673 int OffsetLane = Offset / NumEltsPerLane;
12674 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
12675 "Only 8, 16, and 32 bit elements can be extended.");
12676 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
12677 assert(0 <= Offset && "Extension offset must be positive.");
12678 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
12679 "Extension offset must be in the first lane or start an upper lane.");
12681 // Check that an index is in same lane as the base offset.
12682 auto SafeOffset = [&](int Idx) {
12683 return OffsetLane == (Idx / NumEltsPerLane);
12686 // Shift along an input so that the offset base moves to the first element.
12687 auto ShuffleOffset = [&](SDValue V) {
12691 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12692 for (int i = 0; i * Scale < NumElements; ++i) {
12693 int SrcIdx = i + Offset;
12694 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
12696 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
12699 // Found a valid a/zext mask! Try various lowering strategies based on the
12700 // input type and available ISA extensions.
12701 if (Subtarget.hasSSE41()) {
12702 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
12703 // PUNPCK will catch this in a later shuffle match.
12704 if (Offset && Scale == 2 && VT.is128BitVector())
12706 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
12707 NumElements / Scale);
12708 InputV = ShuffleOffset(InputV);
12709 InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
12710 ExtVT, InputV, DAG);
12711 return DAG.getBitcast(VT, InputV);
12714 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
12716 // For any extends we can cheat for larger element sizes and use shuffle
12717 // instructions that can fold with a load and/or copy.
12718 if (AnyExt && EltBits == 32) {
12719 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
12721 return DAG.getBitcast(
12722 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12723 DAG.getBitcast(MVT::v4i32, InputV),
12724 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12726 if (AnyExt && EltBits == 16 && Scale > 2) {
12727 int PSHUFDMask[4] = {Offset / 2, -1,
12728 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
12729 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12730 DAG.getBitcast(MVT::v4i32, InputV),
12731 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
12732 int PSHUFWMask[4] = {1, -1, -1, -1};
12733 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
12734 return DAG.getBitcast(
12735 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
12736 DAG.getBitcast(MVT::v8i16, InputV),
12737 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
12740 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12742 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12743 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
12744 assert(VT.is128BitVector() && "Unexpected vector width!");
12746 int LoIdx = Offset * EltBits;
12747 SDValue Lo = DAG.getBitcast(
12748 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12749 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12750 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12752 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12753 return DAG.getBitcast(VT, Lo);
12755 int HiIdx = (Offset + 1) * EltBits;
12756 SDValue Hi = DAG.getBitcast(
12757 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12758 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12759 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12760 return DAG.getBitcast(VT,
12761 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12764 // If this would require more than 2 unpack instructions to expand, use
12765 // pshufb when available. We can only use more than 2 unpack instructions
12766 // when zero extending i8 elements which also makes it easier to use pshufb.
12767 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12768 assert(NumElements == 16 && "Unexpected byte vector width!");
12769 SDValue PSHUFBMask[16];
12770 for (int i = 0; i < 16; ++i) {
12771 int Idx = Offset + (i / Scale);
12772 if ((i % Scale == 0 && SafeOffset(Idx))) {
12773 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12777 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12779 InputV = DAG.getBitcast(MVT::v16i8, InputV);
12780 return DAG.getBitcast(
12781 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12782 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12785 // If we are extending from an offset, ensure we start on a boundary that
12786 // we can unpack from.
12787 int AlignToUnpack = Offset % (NumElements / Scale);
12788 if (AlignToUnpack) {
12789 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12790 for (int i = AlignToUnpack; i < NumElements; ++i)
12791 ShMask[i - AlignToUnpack] = i;
12792 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12793 Offset -= AlignToUnpack;
12796 // Otherwise emit a sequence of unpacks.
12798 unsigned UnpackLoHi = X86ISD::UNPCKL;
12799 if (Offset >= (NumElements / 2)) {
12800 UnpackLoHi = X86ISD::UNPCKH;
12801 Offset -= (NumElements / 2);
12804 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12805 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12806 : getZeroVector(InputVT, Subtarget, DAG, DL);
12807 InputV = DAG.getBitcast(InputVT, InputV);
12808 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12812 } while (Scale > 1);
12813 return DAG.getBitcast(VT, InputV);
12816 /// Try to lower a vector shuffle as a zero extension on any microarch.
12818 /// This routine will try to do everything in its power to cleverly lower
12819 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
12820 /// check for the profitability of this lowering, it tries to aggressively
12821 /// match this pattern. It will use all of the micro-architectural details it
12822 /// can to emit an efficient lowering. It handles both blends with all-zero
12823 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12824 /// masking out later).
12826 /// The reason we have dedicated lowering for zext-style shuffles is that they
12827 /// are both incredibly common and often quite performance sensitive.
12828 static SDValue lowerShuffleAsZeroOrAnyExtend(
12829 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12830 const APInt &Zeroable, const X86Subtarget &Subtarget,
12831 SelectionDAG &DAG) {
12832 int Bits = VT.getSizeInBits();
12833 int NumLanes = Bits / 128;
12834 int NumElements = VT.getVectorNumElements();
12835 int NumEltsPerLane = NumElements / NumLanes;
12836 assert(VT.getScalarSizeInBits() <= 32 &&
12837 "Exceeds 32-bit integer zero extension limit");
12838 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
12840 // Define a helper function to check a particular ext-scale and lower to it if
12842 auto Lower = [&](int Scale) -> SDValue {
12844 bool AnyExt = true;
12847 for (int i = 0; i < NumElements; ++i) {
12850 continue; // Valid anywhere but doesn't tell us anything.
12851 if (i % Scale != 0) {
12852 // Each of the extended elements need to be zeroable.
12856 // We no longer are in the anyext case.
12861 // Each of the base elements needs to be consecutive indices into the
12862 // same input vector.
12863 SDValue V = M < NumElements ? V1 : V2;
12864 M = M % NumElements;
12867 Offset = M - (i / Scale);
12868 } else if (InputV != V)
12869 return SDValue(); // Flip-flopping inputs.
12871 // Offset must start in the lowest 128-bit lane or at the start of an
12873 // FIXME: Is it ever worth allowing a negative base offset?
12874 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12875 (Offset % NumEltsPerLane) == 0))
12878 // If we are offsetting, all referenced entries must come from the same
12880 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12883 if ((M % NumElements) != (Offset + (i / Scale)))
12884 return SDValue(); // Non-consecutive strided elements.
12888 // If we fail to find an input, we have a zero-shuffle which should always
12889 // have already been handled.
12890 // FIXME: Maybe handle this here in case during blending we end up with one?
12894 // If we are offsetting, don't extend if we only match a single input, we
12895 // can always do better by using a basic PSHUF or PUNPCK.
12896 if (Offset != 0 && Matches < 2)
12899 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12900 InputV, Mask, Subtarget, DAG);
12903 // The widest scale possible for extending is to a 64-bit integer.
12904 assert(Bits % 64 == 0 &&
12905 "The number of bits in a vector must be divisible by 64 on x86!");
12906 int NumExtElements = Bits / 64;
12908 // Each iteration, try extending the elements half as much, but into twice as
12910 for (; NumExtElements < NumElements; NumExtElements *= 2) {
12911 assert(NumElements % NumExtElements == 0 &&
12912 "The input vector size must be divisible by the extended size.");
12913 if (SDValue V = Lower(NumElements / NumExtElements))
12917 // General extends failed, but 128-bit vectors may be able to use MOVQ.
12921 // Returns one of the source operands if the shuffle can be reduced to a
12922 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12923 auto CanZExtLowHalf = [&]() {
12924 for (int i = NumElements / 2; i != NumElements; ++i)
12927 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12929 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12934 if (SDValue V = CanZExtLowHalf()) {
12935 V = DAG.getBitcast(MVT::v2i64, V);
12936 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12937 return DAG.getBitcast(VT, V);
12940 // No viable ext lowering found.
12944 /// Try to get a scalar value for a specific element of a vector.
12946 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12947 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12948 SelectionDAG &DAG) {
12949 MVT VT = V.getSimpleValueType();
12950 MVT EltVT = VT.getVectorElementType();
12951 V = peekThroughBitcasts(V);
12953 // If the bitcasts shift the element size, we can't extract an equivalent
12954 // element from it.
12955 MVT NewVT = V.getSimpleValueType();
12956 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12959 if (V.getOpcode() == ISD::BUILD_VECTOR ||
12960 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12961 // Ensure the scalar operand is the same size as the destination.
12962 // FIXME: Add support for scalar truncation where possible.
12963 SDValue S = V.getOperand(Idx);
12964 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12965 return DAG.getBitcast(EltVT, S);
12971 /// Helper to test for a load that can be folded with x86 shuffles.
12973 /// This is particularly important because the set of instructions varies
12974 /// significantly based on whether the operand is a load or not.
12975 static bool isShuffleFoldableLoad(SDValue V) {
12976 V = peekThroughBitcasts(V);
12977 return ISD::isNON_EXTLoad(V.getNode());
12980 /// Try to lower insertion of a single element into a zero vector.
12982 /// This is a common pattern that we have especially efficient patterns to lower
12983 /// across all subtarget feature sets.
12984 static SDValue lowerShuffleAsElementInsertion(
12985 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12986 const APInt &Zeroable, const X86Subtarget &Subtarget,
12987 SelectionDAG &DAG) {
12989 MVT EltVT = VT.getVectorElementType();
12992 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12994 bool IsV1Zeroable = true;
12995 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12996 if (i != V2Index && !Zeroable[i]) {
12997 IsV1Zeroable = false;
13001 // Check for a single input from a SCALAR_TO_VECTOR node.
13002 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
13003 // all the smarts here sunk into that routine. However, the current
13004 // lowering of BUILD_VECTOR makes that nearly impossible until the old
13005 // vector shuffle lowering is dead.
13006 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
13008 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
13009 // We need to zext the scalar if it is smaller than an i32.
13010 V2S = DAG.getBitcast(EltVT, V2S);
13011 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
13012 // Using zext to expand a narrow element won't work for non-zero
13017 // Zero-extend directly to i32.
13018 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
13019 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
13021 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
13022 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
13023 EltVT == MVT::i16) {
13024 // Either not inserting from the low element of the input or the input
13025 // element size is too small to use VZEXT_MOVL to clear the high bits.
13029 if (!IsV1Zeroable) {
13030 // If V1 can't be treated as a zero vector we have fewer options to lower
13031 // this. We can't support integer vectors or non-zero targets cheaply, and
13032 // the V1 elements can't be permuted in any way.
13033 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
13034 if (!VT.isFloatingPoint() || V2Index != 0)
13036 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
13037 V1Mask[V2Index] = -1;
13038 if (!isNoopShuffleMask(V1Mask))
13040 if (!VT.is128BitVector())
13043 // Otherwise, use MOVSD or MOVSS.
13044 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
13045 "Only two types of floating point element types to handle!");
13046 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
13050 // This lowering only works for the low element with floating point vectors.
13051 if (VT.isFloatingPoint() && V2Index != 0)
13054 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
13056 V2 = DAG.getBitcast(VT, V2);
13058 if (V2Index != 0) {
13059 // If we have 4 or fewer lanes we can cheaply shuffle the element into
13060 // the desired position. Otherwise it is more efficient to do a vector
13061 // shift left. We know that we can do a vector shift left because all
13062 // the inputs are zero.
13063 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
13064 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
13065 V2Shuffle[V2Index] = 0;
13066 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
13068 V2 = DAG.getBitcast(MVT::v16i8, V2);
13069 V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
13070 DAG.getTargetConstant(
13071 V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
13072 V2 = DAG.getBitcast(VT, V2);
13078 /// Try to lower broadcast of a single - truncated - integer element,
13079 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
13081 /// This assumes we have AVX2.
13082 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
13084 const X86Subtarget &Subtarget,
13085 SelectionDAG &DAG) {
13086 assert(Subtarget.hasAVX2() &&
13087 "We can only lower integer broadcasts with AVX2!");
13089 MVT EltVT = VT.getVectorElementType();
13090 MVT V0VT = V0.getSimpleValueType();
13092 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
13093 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
13095 MVT V0EltVT = V0VT.getVectorElementType();
13096 if (!V0EltVT.isInteger())
13099 const unsigned EltSize = EltVT.getSizeInBits();
13100 const unsigned V0EltSize = V0EltVT.getSizeInBits();
13102 // This is only a truncation if the original element type is larger.
13103 if (V0EltSize <= EltSize)
13106 assert(((V0EltSize % EltSize) == 0) &&
13107 "Scalar type sizes must all be powers of 2 on x86!");
13109 const unsigned V0Opc = V0.getOpcode();
13110 const unsigned Scale = V0EltSize / EltSize;
13111 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
13113 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
13114 V0Opc != ISD::BUILD_VECTOR)
13117 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
13119 // If we're extracting non-least-significant bits, shift so we can truncate.
13120 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
13121 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
13122 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
13123 if (const int OffsetIdx = BroadcastIdx % Scale)
13124 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
13125 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
13127 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
13128 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
13131 /// Test whether this can be lowered with a single SHUFPS instruction.
13133 /// This is used to disable more specialized lowerings when the shufps lowering
13134 /// will happen to be efficient.
13135 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
13136 // This routine only handles 128-bit shufps.
13137 assert(Mask.size() == 4 && "Unsupported mask size!");
13138 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
13139 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
13140 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
13141 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
13143 // To lower with a single SHUFPS we need to have the low half and high half
13144 // each requiring a single input.
13145 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
13147 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
13153 /// If we are extracting two 128-bit halves of a vector and shuffling the
13154 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
13155 /// multi-shuffle lowering.
13156 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
13157 SDValue N1, ArrayRef<int> Mask,
13158 SelectionDAG &DAG) {
13159 MVT VT = N0.getSimpleValueType();
13160 assert((VT.is128BitVector() &&
13161 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
13162 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
13164 // Check that both sources are extracts of the same source vector.
13165 if (!N0.hasOneUse() || !N1.hasOneUse() ||
13166 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13167 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13168 N0.getOperand(0) != N1.getOperand(0))
13171 SDValue WideVec = N0.getOperand(0);
13172 MVT WideVT = WideVec.getSimpleValueType();
13173 if (!WideVT.is256BitVector())
13176 // Match extracts of each half of the wide source vector. Commute the shuffle
13177 // if the extract of the low half is N1.
13178 unsigned NumElts = VT.getVectorNumElements();
13179 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
13180 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
13181 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
13182 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
13183 ShuffleVectorSDNode::commuteMask(NewMask);
13184 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
13187 // Final bailout: if the mask is simple, we are better off using an extract
13188 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
13189 // because that avoids a constant load from memory.
13190 if (NumElts == 4 &&
13191 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
13194 // Extend the shuffle mask with undef elements.
13195 NewMask.append(NumElts, -1);
13197 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
13198 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
13200 // This is free: ymm -> xmm.
13201 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
13202 DAG.getIntPtrConstant(0, DL));
13205 /// Try to lower broadcast of a single element.
13207 /// For convenience, this code also bundles all of the subtarget feature set
13208 /// filtering. While a little annoying to re-dispatch on type here, there isn't
13209 /// a convenient way to factor it out.
13210 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
13211 SDValue V2, ArrayRef<int> Mask,
13212 const X86Subtarget &Subtarget,
13213 SelectionDAG &DAG) {
13214 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
13215 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
13216 (Subtarget.hasAVX2() && VT.isInteger())))
13219 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
13220 // we can only broadcast from a register with AVX2.
13221 unsigned NumEltBits = VT.getScalarSizeInBits();
13222 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
13224 : X86ISD::VBROADCAST;
13225 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
13227 // Check that the mask is a broadcast.
13228 int BroadcastIdx = getSplatIndex(Mask);
13229 if (BroadcastIdx < 0)
13231 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
13232 "a sorted mask where the broadcast "
13235 // Go up the chain of (vector) values to find a scalar load that we can
13236 // combine with the broadcast.
13237 // TODO: Combine this logic with findEltLoadSrc() used by
13238 // EltsFromConsecutiveLoads().
13239 int BitOffset = BroadcastIdx * NumEltBits;
13242 switch (V.getOpcode()) {
13243 case ISD::BITCAST: {
13244 V = V.getOperand(0);
13247 case ISD::CONCAT_VECTORS: {
13248 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
13249 int OpIdx = BitOffset / OpBitWidth;
13250 V = V.getOperand(OpIdx);
13251 BitOffset %= OpBitWidth;
13254 case ISD::EXTRACT_SUBVECTOR: {
13255 // The extraction index adds to the existing offset.
13256 unsigned EltBitWidth = V.getScalarValueSizeInBits();
13257 unsigned Idx = V.getConstantOperandVal(1);
13258 unsigned BeginOffset = Idx * EltBitWidth;
13259 BitOffset += BeginOffset;
13260 V = V.getOperand(0);
13263 case ISD::INSERT_SUBVECTOR: {
13264 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
13265 int EltBitWidth = VOuter.getScalarValueSizeInBits();
13266 int Idx = (int)V.getConstantOperandVal(2);
13267 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
13268 int BeginOffset = Idx * EltBitWidth;
13269 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
13270 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
13271 BitOffset -= BeginOffset;
13281 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
13282 BroadcastIdx = BitOffset / NumEltBits;
13284 // Do we need to bitcast the source to retrieve the original broadcast index?
13285 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
13287 // Check if this is a broadcast of a scalar. We special case lowering
13288 // for scalars so that we can more effectively fold with loads.
13289 // If the original value has a larger element type than the shuffle, the
13290 // broadcast element is in essence truncated. Make that explicit to ease
13292 if (BitCastSrc && VT.isInteger())
13293 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
13294 DL, VT, V, BroadcastIdx, Subtarget, DAG))
13295 return TruncBroadcast;
13297 // Also check the simpler case, where we can directly reuse the scalar.
13299 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
13300 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
13301 V = V.getOperand(BroadcastIdx);
13303 // If we can't broadcast from a register, check that the input is a load.
13304 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
13306 } else if (ISD::isNormalLoad(V.getNode()) &&
13307 cast<LoadSDNode>(V)->isSimple()) {
13308 // We do not check for one-use of the vector load because a broadcast load
13309 // is expected to be a win for code size, register pressure, and possibly
13310 // uops even if the original vector load is not eliminated.
13312 // Reduce the vector load and shuffle to a broadcasted scalar load.
13313 LoadSDNode *Ld = cast<LoadSDNode>(V);
13314 SDValue BaseAddr = Ld->getOperand(1);
13315 MVT SVT = VT.getScalarType();
13316 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
13317 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
13318 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
13320 // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
13322 // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
13323 if (Opcode == X86ISD::VBROADCAST) {
13324 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
13325 SDValue Ops[] = {Ld->getChain(), NewAddr};
13326 V = DAG.getMemIntrinsicNode(
13327 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
13328 DAG.getMachineFunction().getMachineMemOperand(
13329 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
13330 DAG.makeEquivalentMemoryOrdering(Ld, V);
13331 return DAG.getBitcast(VT, V);
13333 assert(SVT == MVT::f64 && "Unexpected VT!");
13334 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
13335 DAG.getMachineFunction().getMachineMemOperand(
13336 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
13337 DAG.makeEquivalentMemoryOrdering(Ld, V);
13338 } else if (!BroadcastFromReg) {
13339 // We can't broadcast from a vector register.
13341 } else if (BitOffset != 0) {
13342 // We can only broadcast from the zero-element of a vector register,
13343 // but it can be advantageous to broadcast from the zero-element of a
13345 if (!VT.is256BitVector() && !VT.is512BitVector())
13348 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
13349 if (VT == MVT::v4f64 || VT == MVT::v4i64)
13352 // Only broadcast the zero-element of a 128-bit subvector.
13353 if ((BitOffset % 128) != 0)
13356 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
13357 "Unexpected bit-offset");
13358 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
13359 "Unexpected vector size");
13360 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
13361 V = extract128BitVector(V, ExtractIdx, DAG, DL);
13364 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
13365 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
13366 DAG.getBitcast(MVT::f64, V));
13368 // If this is a scalar, do the broadcast on this type and bitcast.
13369 if (!V.getValueType().isVector()) {
13370 assert(V.getScalarValueSizeInBits() == NumEltBits &&
13371 "Unexpected scalar size");
13372 MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
13373 VT.getVectorNumElements());
13374 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
13377 // We only support broadcasting from 128-bit vectors to minimize the
13378 // number of patterns we need to deal with in isel. So extract down to
13379 // 128-bits, removing as many bitcasts as possible.
13380 if (V.getValueSizeInBits() > 128)
13381 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
13383 // Otherwise cast V to a vector with the same element type as VT, but
13384 // possibly narrower than VT. Then perform the broadcast.
13385 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
13386 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
13387 return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
13390 // Check for whether we can use INSERTPS to perform the shuffle. We only use
13391 // INSERTPS when the V1 elements are already in the correct locations
13392 // because otherwise we can just always use two SHUFPS instructions which
13393 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
13394 // perform INSERTPS if a single V1 element is out of place and all V2
13395 // elements are zeroable.
13396 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
13397 unsigned &InsertPSMask,
13398 const APInt &Zeroable,
13399 ArrayRef<int> Mask, SelectionDAG &DAG) {
13400 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
13401 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
13402 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13404 // Attempt to match INSERTPS with one element from VA or VB being
13405 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
13407 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
13408 ArrayRef<int> CandidateMask) {
13409 unsigned ZMask = 0;
13410 int VADstIndex = -1;
13411 int VBDstIndex = -1;
13412 bool VAUsedInPlace = false;
13414 for (int i = 0; i < 4; ++i) {
13415 // Synthesize a zero mask from the zeroable elements (includes undefs).
13421 // Flag if we use any VA inputs in place.
13422 if (i == CandidateMask[i]) {
13423 VAUsedInPlace = true;
13427 // We can only insert a single non-zeroable element.
13428 if (VADstIndex >= 0 || VBDstIndex >= 0)
13431 if (CandidateMask[i] < 4) {
13432 // VA input out of place for insertion.
13435 // VB input for insertion.
13440 // Don't bother if we have no (non-zeroable) element for insertion.
13441 if (VADstIndex < 0 && VBDstIndex < 0)
13444 // Determine element insertion src/dst indices. The src index is from the
13445 // start of the inserted vector, not the start of the concatenated vector.
13446 unsigned VBSrcIndex = 0;
13447 if (VADstIndex >= 0) {
13448 // If we have a VA input out of place, we use VA as the V2 element
13449 // insertion and don't use the original V2 at all.
13450 VBSrcIndex = CandidateMask[VADstIndex];
13451 VBDstIndex = VADstIndex;
13454 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
13457 // If no V1 inputs are used in place, then the result is created only from
13458 // the zero mask and the V2 insertion - so remove V1 dependency.
13459 if (!VAUsedInPlace)
13460 VA = DAG.getUNDEF(MVT::v4f32);
13462 // Update V1, V2 and InsertPSMask accordingly.
13466 // Insert the V2 element into the desired position.
13467 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
13468 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
13472 if (matchAsInsertPS(V1, V2, Mask))
13475 // Commute and try again.
13476 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
13477 ShuffleVectorSDNode::commuteMask(CommutedMask);
13478 if (matchAsInsertPS(V2, V1, CommutedMask))
13484 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
13485 ArrayRef<int> Mask, const APInt &Zeroable,
13486 SelectionDAG &DAG) {
13487 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13488 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13490 // Attempt to match the insertps pattern.
13491 unsigned InsertPSMask;
13492 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
13495 // Insert the V2 element into the desired position.
13496 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
13497 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
13500 /// Try to lower a shuffle as a permute of the inputs followed by an
13501 /// UNPCK instruction.
13503 /// This specifically targets cases where we end up with alternating between
13504 /// the two inputs, and so can permute them into something that feeds a single
13505 /// UNPCK instruction. Note that this routine only targets integer vectors
13506 /// because for floating point vectors we have a generalized SHUFPS lowering
13507 /// strategy that handles everything that doesn't *exactly* match an unpack,
13508 /// making this clever lowering unnecessary.
13509 static SDValue lowerShuffleAsPermuteAndUnpack(
13510 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13511 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13512 assert(!VT.isFloatingPoint() &&
13513 "This routine only supports integer vectors.");
13514 assert(VT.is128BitVector() &&
13515 "This routine only works on 128-bit vectors.");
13516 assert(!V2.isUndef() &&
13517 "This routine should only be used when blending two inputs.");
13518 assert(Mask.size() >= 2 && "Single element masks are invalid.");
13520 int Size = Mask.size();
13523 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13525 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13527 bool UnpackLo = NumLoInputs >= NumHiInputs;
13529 auto TryUnpack = [&](int ScalarSize, int Scale) {
13530 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13531 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13533 for (int i = 0; i < Size; ++i) {
13537 // Each element of the unpack contains Scale elements from this mask.
13538 int UnpackIdx = i / Scale;
13540 // We only handle the case where V1 feeds the first slots of the unpack.
13541 // We rely on canonicalization to ensure this is the case.
13542 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13545 // Setup the mask for this input. The indexing is tricky as we have to
13546 // handle the unpack stride.
13547 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13548 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13552 // If we will have to shuffle both inputs to use the unpack, check whether
13553 // we can just unpack first and shuffle the result. If so, skip this unpack.
13554 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13555 !isNoopShuffleMask(V2Mask))
13558 // Shuffle the inputs into place.
13559 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13560 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13562 // Cast the inputs to the type we will use to unpack them.
13563 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13564 V1 = DAG.getBitcast(UnpackVT, V1);
13565 V2 = DAG.getBitcast(UnpackVT, V2);
13567 // Unpack the inputs and cast the result back to the desired type.
13568 return DAG.getBitcast(
13569 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13570 UnpackVT, V1, V2));
13573 // We try each unpack from the largest to the smallest to try and find one
13574 // that fits this mask.
13575 int OrigScalarSize = VT.getScalarSizeInBits();
13576 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13577 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13580 // If we're shuffling with a zero vector then we're better off not doing
13581 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13582 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13583 ISD::isBuildVectorAllZeros(V2.getNode()))
13586 // If none of the unpack-rooted lowerings worked (or were profitable) try an
13588 if (NumLoInputs == 0 || NumHiInputs == 0) {
13589 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13590 "We have to have *some* inputs!");
13591 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13593 // FIXME: We could consider the total complexity of the permute of each
13594 // possible unpacking. Or at the least we should consider how many
13595 // half-crossings are created.
13596 // FIXME: We could consider commuting the unpacks.
13598 SmallVector<int, 32> PermMask((unsigned)Size, -1);
13599 for (int i = 0; i < Size; ++i) {
13603 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13606 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13608 return DAG.getVectorShuffle(
13609 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
13611 DAG.getUNDEF(VT), PermMask);
13617 /// Handle lowering of 2-lane 64-bit floating point shuffles.
13619 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
13620 /// support for floating point shuffles but not integer shuffles. These
13621 /// instructions will incur a domain crossing penalty on some chips though so
13622 /// it is better to avoid lowering through this for integer vectors where
13624 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13625 const APInt &Zeroable, SDValue V1, SDValue V2,
13626 const X86Subtarget &Subtarget,
13627 SelectionDAG &DAG) {
13628 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13629 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13630 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13632 if (V2.isUndef()) {
13633 // Check for being able to broadcast a single element.
13634 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
13635 Mask, Subtarget, DAG))
13638 // Straight shuffle of a single input vector. Simulate this by using the
13639 // single input as both of the "inputs" to this instruction..
13640 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
13642 if (Subtarget.hasAVX()) {
13643 // If we have AVX, we can use VPERMILPS which will allow folding a load
13644 // into the shuffle.
13645 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
13646 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13649 return DAG.getNode(
13650 X86ISD::SHUFP, DL, MVT::v2f64,
13651 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13652 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13653 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13655 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13656 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13657 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13658 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13660 if (Subtarget.hasAVX2())
13661 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13664 // When loading a scalar and then shuffling it into a vector we can often do
13665 // the insertion cheaply.
13666 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13667 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13669 // Try inverting the insertion since for v2 masks it is easy to do and we
13670 // can't reliably sort the mask one way or the other.
13671 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
13672 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
13673 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13674 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13677 // Try to use one of the special instruction patterns to handle two common
13678 // blend patterns if a zero-blend above didn't work.
13679 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
13680 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
13681 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
13682 // We can either use a special instruction to load over the low double or
13683 // to move just the low double.
13684 return DAG.getNode(
13685 X86ISD::MOVSD, DL, MVT::v2f64, V2,
13686 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
13688 if (Subtarget.hasSSE41())
13689 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
13690 Zeroable, Subtarget, DAG))
13693 // Use dedicated unpack instructions for masks that match their pattern.
13694 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
13697 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
13698 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
13699 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13702 /// Handle lowering of 2-lane 64-bit integer shuffles.
13704 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
13705 /// the integer unit to minimize domain crossing penalties. However, for blends
13706 /// it falls back to the floating point shuffle operation with appropriate bit
13708 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13709 const APInt &Zeroable, SDValue V1, SDValue V2,
13710 const X86Subtarget &Subtarget,
13711 SelectionDAG &DAG) {
13712 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13713 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13714 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13716 if (V2.isUndef()) {
13717 // Check for being able to broadcast a single element.
13718 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
13719 Mask, Subtarget, DAG))
13722 // Straight shuffle of a single input vector. For everything from SSE2
13723 // onward this has a single fast instruction with no scary immediates.
13724 // We have to map the mask as it is actually a v4i32 shuffle instruction.
13725 V1 = DAG.getBitcast(MVT::v4i32, V1);
13726 int WidenedMask[4] = {
13727 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
13728 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
13729 return DAG.getBitcast(
13731 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13732 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
13734 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
13735 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
13736 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13737 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13739 if (Subtarget.hasAVX2())
13740 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13743 // Try to use shift instructions.
13744 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13745 Zeroable, Subtarget, DAG))
13748 // When loading a scalar and then shuffling it into a vector we can often do
13749 // the insertion cheaply.
13750 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13751 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13753 // Try inverting the insertion since for v2 masks it is easy to do and we
13754 // can't reliably sort the mask one way or the other.
13755 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13756 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13757 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13760 // We have different paths for blend lowering, but they all must use the
13761 // *exact* same predicate.
13762 bool IsBlendSupported = Subtarget.hasSSE41();
13763 if (IsBlendSupported)
13764 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13765 Zeroable, Subtarget, DAG))
13768 // Use dedicated unpack instructions for masks that match their pattern.
13769 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13772 // Try to use byte rotation instructions.
13773 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13774 if (Subtarget.hasSSSE3()) {
13775 if (Subtarget.hasVLX())
13776 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
13780 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13785 // If we have direct support for blends, we should lower by decomposing into
13786 // a permute. That will be faster than the domain cross.
13787 if (IsBlendSupported)
13788 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13791 // We implement this with SHUFPD which is pretty lame because it will likely
13792 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13793 // However, all the alternatives are still more cycles and newer chips don't
13794 // have this problem. It would be really nice if x86 had better shuffles here.
13795 V1 = DAG.getBitcast(MVT::v2f64, V1);
13796 V2 = DAG.getBitcast(MVT::v2f64, V2);
13797 return DAG.getBitcast(MVT::v2i64,
13798 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13801 /// Lower a vector shuffle using the SHUFPS instruction.
13803 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13804 /// It makes no assumptions about whether this is the *best* lowering, it simply
13806 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13807 ArrayRef<int> Mask, SDValue V1,
13808 SDValue V2, SelectionDAG &DAG) {
13809 SDValue LowV = V1, HighV = V2;
13810 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
13811 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13813 if (NumV2Elements == 1) {
13814 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13816 // Compute the index adjacent to V2Index and in the same half by toggling
13818 int V2AdjIndex = V2Index ^ 1;
13820 if (Mask[V2AdjIndex] < 0) {
13821 // Handles all the cases where we have a single V2 element and an undef.
13822 // This will only ever happen in the high lanes because we commute the
13823 // vector otherwise.
13825 std::swap(LowV, HighV);
13826 NewMask[V2Index] -= 4;
13828 // Handle the case where the V2 element ends up adjacent to a V1 element.
13829 // To make this work, blend them together as the first step.
13830 int V1Index = V2AdjIndex;
13831 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13832 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13833 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13835 // Now proceed to reconstruct the final blend as we have the necessary
13836 // high or low half formed.
13843 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13844 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13846 } else if (NumV2Elements == 2) {
13847 if (Mask[0] < 4 && Mask[1] < 4) {
13848 // Handle the easy case where we have V1 in the low lanes and V2 in the
13852 } else if (Mask[2] < 4 && Mask[3] < 4) {
13853 // We also handle the reversed case because this utility may get called
13854 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13855 // arrange things in the right direction.
13861 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13862 // trying to place elements directly, just blend them and set up the final
13863 // shuffle to place them.
13865 // The first two blend mask elements are for V1, the second two are for
13867 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13868 Mask[2] < 4 ? Mask[2] : Mask[3],
13869 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13870 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13871 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13872 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13874 // Now we do a normal shuffle of V1 by giving V1 as both operands to
13877 NewMask[0] = Mask[0] < 4 ? 0 : 2;
13878 NewMask[1] = Mask[0] < 4 ? 2 : 0;
13879 NewMask[2] = Mask[2] < 4 ? 1 : 3;
13880 NewMask[3] = Mask[2] < 4 ? 3 : 1;
13883 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13884 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13887 /// Lower 4-lane 32-bit floating point shuffles.
13889 /// Uses instructions exclusively from the floating point unit to minimize
13890 /// domain crossing penalties, as these are sufficient to implement all v4f32
13892 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13893 const APInt &Zeroable, SDValue V1, SDValue V2,
13894 const X86Subtarget &Subtarget,
13895 SelectionDAG &DAG) {
13896 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13897 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13898 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13900 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13902 if (NumV2Elements == 0) {
13903 // Check for being able to broadcast a single element.
13904 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13905 Mask, Subtarget, DAG))
13908 // Use even/odd duplicate instructions for masks that match their pattern.
13909 if (Subtarget.hasSSE3()) {
13910 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13911 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13912 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13913 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13916 if (Subtarget.hasAVX()) {
13917 // If we have AVX, we can use VPERMILPS which will allow folding a load
13918 // into the shuffle.
13919 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13920 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13923 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13924 // in SSE1 because otherwise they are widened to v2f64 and never get here.
13925 if (!Subtarget.hasSSE2()) {
13926 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13927 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13928 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13929 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13932 // Otherwise, use a straight shuffle of a single input vector. We pass the
13933 // input vector to both operands to simulate this with a SHUFPS.
13934 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13935 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13938 if (Subtarget.hasAVX2())
13939 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13942 // There are special ways we can lower some single-element blends. However, we
13943 // have custom ways we can lower more complex single-element blends below that
13944 // we defer to if both this and BLENDPS fail to match, so restrict this to
13945 // when the V2 input is targeting element 0 of the mask -- that is the fast
13947 if (NumV2Elements == 1 && Mask[0] >= 4)
13948 if (SDValue V = lowerShuffleAsElementInsertion(
13949 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13952 if (Subtarget.hasSSE41()) {
13953 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13954 Zeroable, Subtarget, DAG))
13957 // Use INSERTPS if we can complete the shuffle efficiently.
13958 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13961 if (!isSingleSHUFPSMask(Mask))
13962 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13967 // Use low/high mov instructions. These are only valid in SSE1 because
13968 // otherwise they are widened to v2f64 and never get here.
13969 if (!Subtarget.hasSSE2()) {
13970 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13971 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13972 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13973 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13976 // Use dedicated unpack instructions for masks that match their pattern.
13977 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13980 // Otherwise fall back to a SHUFPS lowering strategy.
13981 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13984 /// Lower 4-lane i32 vector shuffles.
13986 /// We try to handle these with integer-domain shuffles where we can, but for
13987 /// blends we use the floating point domain blend instructions.
13988 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13989 const APInt &Zeroable, SDValue V1, SDValue V2,
13990 const X86Subtarget &Subtarget,
13991 SelectionDAG &DAG) {
13992 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13993 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13994 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13996 // Whenever we can lower this as a zext, that instruction is strictly faster
13997 // than any alternative. It also allows us to fold memory operands into the
13998 // shuffle in many cases.
13999 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
14000 Zeroable, Subtarget, DAG))
14003 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
14005 if (NumV2Elements == 0) {
14006 // Try to use broadcast unless the mask only has one non-undef element.
14007 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
14008 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
14009 Mask, Subtarget, DAG))
14013 // Straight shuffle of a single input vector. For everything from SSE2
14014 // onward this has a single fast instruction with no scary immediates.
14015 // We coerce the shuffle pattern to be compatible with UNPCK instructions
14016 // but we aren't actually going to use the UNPCK instruction because doing
14017 // so prevents folding a load into this instruction or making a copy.
14018 const int UnpackLoMask[] = {0, 0, 1, 1};
14019 const int UnpackHiMask[] = {2, 2, 3, 3};
14020 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
14021 Mask = UnpackLoMask;
14022 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
14023 Mask = UnpackHiMask;
14025 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
14026 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
14029 if (Subtarget.hasAVX2())
14030 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
14033 // Try to use shift instructions.
14034 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
14035 Zeroable, Subtarget, DAG))
14038 // There are special ways we can lower some single-element blends.
14039 if (NumV2Elements == 1)
14040 if (SDValue V = lowerShuffleAsElementInsertion(
14041 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
14044 // We have different paths for blend lowering, but they all must use the
14045 // *exact* same predicate.
14046 bool IsBlendSupported = Subtarget.hasSSE41();
14047 if (IsBlendSupported)
14048 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
14049 Zeroable, Subtarget, DAG))
14052 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
14053 Zeroable, Subtarget, DAG))
14056 // Use dedicated unpack instructions for masks that match their pattern.
14057 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
14060 // Try to use byte rotation instructions.
14061 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
14062 if (Subtarget.hasSSSE3()) {
14063 if (Subtarget.hasVLX())
14064 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
14068 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
14073 // Assume that a single SHUFPS is faster than an alternative sequence of
14074 // multiple instructions (even if the CPU has a domain penalty).
14075 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
14076 if (!isSingleSHUFPSMask(Mask)) {
14077 // If we have direct support for blends, we should lower by decomposing into
14078 // a permute. That will be faster than the domain cross.
14079 if (IsBlendSupported)
14080 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
14083 // Try to lower by permuting the inputs into an unpack instruction.
14084 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
14085 Mask, Subtarget, DAG))
14089 // We implement this with SHUFPS because it can blend from two vectors.
14090 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
14091 // up the inputs, bypassing domain shift penalties that we would incur if we
14092 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
14094 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
14095 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
14096 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
14097 return DAG.getBitcast(MVT::v4i32, ShufPS);
14100 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
14101 /// shuffle lowering, and the most complex part.
14103 /// The lowering strategy is to try to form pairs of input lanes which are
14104 /// targeted at the same half of the final vector, and then use a dword shuffle
14105 /// to place them onto the right half, and finally unpack the paired lanes into
14106 /// their final position.
14108 /// The exact breakdown of how to form these dword pairs and align them on the
14109 /// correct sides is really tricky. See the comments within the function for
14110 /// more of the details.
14112 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
14113 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
14114 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
14115 /// vector, form the analogous 128-bit 8-element Mask.
14116 static SDValue lowerV8I16GeneralSingleInputShuffle(
14117 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
14118 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14119 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
14120 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
14122 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
14123 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
14124 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
14126 // Attempt to directly match PSHUFLW or PSHUFHW.
14127 if (isUndefOrInRange(LoMask, 0, 4) &&
14128 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
14129 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14130 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14132 if (isUndefOrInRange(HiMask, 4, 8) &&
14133 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
14134 for (int i = 0; i != 4; ++i)
14135 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
14136 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14137 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14140 SmallVector<int, 4> LoInputs;
14141 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
14142 array_pod_sort(LoInputs.begin(), LoInputs.end());
14143 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
14144 SmallVector<int, 4> HiInputs;
14145 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
14146 array_pod_sort(HiInputs.begin(), HiInputs.end());
14147 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
14148 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
14149 int NumHToL = LoInputs.size() - NumLToL;
14150 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
14151 int NumHToH = HiInputs.size() - NumLToH;
14152 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
14153 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
14154 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
14155 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
14157 // If we are shuffling values from one half - check how many different DWORD
14158 // pairs we need to create. If only 1 or 2 then we can perform this as a
14159 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
14160 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
14161 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
14162 V = DAG.getNode(ShufWOp, DL, VT, V,
14163 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
14164 V = DAG.getBitcast(PSHUFDVT, V);
14165 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
14166 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
14167 return DAG.getBitcast(VT, V);
14170 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
14171 int PSHUFDMask[4] = { -1, -1, -1, -1 };
14172 SmallVector<std::pair<int, int>, 4> DWordPairs;
14173 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
14175 // Collect the different DWORD pairs.
14176 for (int DWord = 0; DWord != 4; ++DWord) {
14177 int M0 = Mask[2 * DWord + 0];
14178 int M1 = Mask[2 * DWord + 1];
14179 M0 = (M0 >= 0 ? M0 % 4 : M0);
14180 M1 = (M1 >= 0 ? M1 % 4 : M1);
14181 if (M0 < 0 && M1 < 0)
14184 bool Match = false;
14185 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
14186 auto &DWordPair = DWordPairs[j];
14187 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
14188 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
14189 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
14190 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
14191 PSHUFDMask[DWord] = DOffset + j;
14197 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
14198 DWordPairs.push_back(std::make_pair(M0, M1));
14202 if (DWordPairs.size() <= 2) {
14203 DWordPairs.resize(2, std::make_pair(-1, -1));
14204 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
14205 DWordPairs[1].first, DWordPairs[1].second};
14206 if ((NumHToL + NumHToH) == 0)
14207 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
14208 if ((NumLToL + NumLToH) == 0)
14209 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
14213 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
14214 // such inputs we can swap two of the dwords across the half mark and end up
14215 // with <=2 inputs to each half in each half. Once there, we can fall through
14216 // to the generic code below. For example:
14218 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
14219 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
14221 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
14222 // and an existing 2-into-2 on the other half. In this case we may have to
14223 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
14224 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
14225 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
14226 // because any other situation (including a 3-into-1 or 1-into-3 in the other
14227 // half than the one we target for fixing) will be fixed when we re-enter this
14228 // path. We will also combine away any sequence of PSHUFD instructions that
14229 // result into a single instruction. Here is an example of the tricky case:
14231 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
14232 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
14234 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
14236 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
14237 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
14239 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
14240 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
14242 // The result is fine to be handled by the generic logic.
14243 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
14244 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
14245 int AOffset, int BOffset) {
14246 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
14247 "Must call this with A having 3 or 1 inputs from the A half.");
14248 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
14249 "Must call this with B having 1 or 3 inputs from the B half.");
14250 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
14251 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
14253 bool ThreeAInputs = AToAInputs.size() == 3;
14255 // Compute the index of dword with only one word among the three inputs in
14256 // a half by taking the sum of the half with three inputs and subtracting
14257 // the sum of the actual three inputs. The difference is the remaining
14259 int ADWord = 0, BDWord = 0;
14260 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
14261 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
14262 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
14263 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
14264 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
14265 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
14266 int TripleNonInputIdx =
14267 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
14268 TripleDWord = TripleNonInputIdx / 2;
14270 // We use xor with one to compute the adjacent DWord to whichever one the
14272 OneInputDWord = (OneInput / 2) ^ 1;
14274 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
14275 // and BToA inputs. If there is also such a problem with the BToB and AToB
14276 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
14277 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
14278 // is essential that we don't *create* a 3<-1 as then we might oscillate.
14279 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
14280 // Compute how many inputs will be flipped by swapping these DWords. We
14282 // to balance this to ensure we don't form a 3-1 shuffle in the other
14284 int NumFlippedAToBInputs =
14285 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
14286 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
14287 int NumFlippedBToBInputs =
14288 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
14289 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
14290 if ((NumFlippedAToBInputs == 1 &&
14291 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
14292 (NumFlippedBToBInputs == 1 &&
14293 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
14294 // We choose whether to fix the A half or B half based on whether that
14295 // half has zero flipped inputs. At zero, we may not be able to fix it
14296 // with that half. We also bias towards fixing the B half because that
14297 // will more commonly be the high half, and we have to bias one way.
14298 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
14299 ArrayRef<int> Inputs) {
14300 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
14301 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
14302 // Determine whether the free index is in the flipped dword or the
14303 // unflipped dword based on where the pinned index is. We use this bit
14304 // in an xor to conditionally select the adjacent dword.
14305 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
14306 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
14307 if (IsFixIdxInput == IsFixFreeIdxInput)
14309 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
14310 assert(IsFixIdxInput != IsFixFreeIdxInput &&
14311 "We need to be changing the number of flipped inputs!");
14312 int PSHUFHalfMask[] = {0, 1, 2, 3};
14313 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
14315 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
14316 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
14317 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
14319 for (int &M : Mask)
14320 if (M >= 0 && M == FixIdx)
14322 else if (M >= 0 && M == FixFreeIdx)
14325 if (NumFlippedBToBInputs != 0) {
14327 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
14328 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
14330 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
14331 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
14332 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
14337 int PSHUFDMask[] = {0, 1, 2, 3};
14338 PSHUFDMask[ADWord] = BDWord;
14339 PSHUFDMask[BDWord] = ADWord;
14340 V = DAG.getBitcast(
14342 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14343 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14345 // Adjust the mask to match the new locations of A and B.
14346 for (int &M : Mask)
14347 if (M >= 0 && M/2 == ADWord)
14348 M = 2 * BDWord + M % 2;
14349 else if (M >= 0 && M/2 == BDWord)
14350 M = 2 * ADWord + M % 2;
14352 // Recurse back into this routine to re-compute state now that this isn't
14353 // a 3 and 1 problem.
14354 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
14356 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
14357 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
14358 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
14359 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
14361 // At this point there are at most two inputs to the low and high halves from
14362 // each half. That means the inputs can always be grouped into dwords and
14363 // those dwords can then be moved to the correct half with a dword shuffle.
14364 // We use at most one low and one high word shuffle to collect these paired
14365 // inputs into dwords, and finally a dword shuffle to place them.
14366 int PSHUFLMask[4] = {-1, -1, -1, -1};
14367 int PSHUFHMask[4] = {-1, -1, -1, -1};
14368 int PSHUFDMask[4] = {-1, -1, -1, -1};
14370 // First fix the masks for all the inputs that are staying in their
14371 // original halves. This will then dictate the targets of the cross-half
14373 auto fixInPlaceInputs =
14374 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
14375 MutableArrayRef<int> SourceHalfMask,
14376 MutableArrayRef<int> HalfMask, int HalfOffset) {
14377 if (InPlaceInputs.empty())
14379 if (InPlaceInputs.size() == 1) {
14380 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
14381 InPlaceInputs[0] - HalfOffset;
14382 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
14385 if (IncomingInputs.empty()) {
14386 // Just fix all of the in place inputs.
14387 for (int Input : InPlaceInputs) {
14388 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
14389 PSHUFDMask[Input / 2] = Input / 2;
14394 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
14395 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
14396 InPlaceInputs[0] - HalfOffset;
14397 // Put the second input next to the first so that they are packed into
14398 // a dword. We find the adjacent index by toggling the low bit.
14399 int AdjIndex = InPlaceInputs[0] ^ 1;
14400 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
14401 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
14402 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
14404 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
14405 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
14407 // Now gather the cross-half inputs and place them into a free dword of
14408 // their target half.
14409 // FIXME: This operation could almost certainly be simplified dramatically to
14410 // look more like the 3-1 fixing operation.
14411 auto moveInputsToRightHalf = [&PSHUFDMask](
14412 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
14413 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
14414 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
14416 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
14417 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
14419 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
14421 int LowWord = Word & ~1;
14422 int HighWord = Word | 1;
14423 return isWordClobbered(SourceHalfMask, LowWord) ||
14424 isWordClobbered(SourceHalfMask, HighWord);
14427 if (IncomingInputs.empty())
14430 if (ExistingInputs.empty()) {
14431 // Map any dwords with inputs from them into the right half.
14432 for (int Input : IncomingInputs) {
14433 // If the source half mask maps over the inputs, turn those into
14434 // swaps and use the swapped lane.
14435 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
14436 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
14437 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
14438 Input - SourceOffset;
14439 // We have to swap the uses in our half mask in one sweep.
14440 for (int &M : HalfMask)
14441 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
14443 else if (M == Input)
14444 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
14446 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
14447 Input - SourceOffset &&
14448 "Previous placement doesn't match!");
14450 // Note that this correctly re-maps both when we do a swap and when
14451 // we observe the other side of the swap above. We rely on that to
14452 // avoid swapping the members of the input list directly.
14453 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
14456 // Map the input's dword into the correct half.
14457 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
14458 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
14460 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
14462 "Previous placement doesn't match!");
14465 // And just directly shift any other-half mask elements to be same-half
14466 // as we will have mirrored the dword containing the element into the
14467 // same position within that half.
14468 for (int &M : HalfMask)
14469 if (M >= SourceOffset && M < SourceOffset + 4) {
14470 M = M - SourceOffset + DestOffset;
14471 assert(M >= 0 && "This should never wrap below zero!");
14476 // Ensure we have the input in a viable dword of its current half. This
14477 // is particularly tricky because the original position may be clobbered
14478 // by inputs being moved and *staying* in that half.
14479 if (IncomingInputs.size() == 1) {
14480 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
14481 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
14483 SourceHalfMask[InputFixed - SourceOffset] =
14484 IncomingInputs[0] - SourceOffset;
14485 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
14487 IncomingInputs[0] = InputFixed;
14489 } else if (IncomingInputs.size() == 2) {
14490 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
14491 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
14492 // We have two non-adjacent or clobbered inputs we need to extract from
14493 // the source half. To do this, we need to map them into some adjacent
14494 // dword slot in the source mask.
14495 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
14496 IncomingInputs[1] - SourceOffset};
14498 // If there is a free slot in the source half mask adjacent to one of
14499 // the inputs, place the other input in it. We use (Index XOR 1) to
14500 // compute an adjacent index.
14501 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
14502 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
14503 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
14504 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14505 InputsFixed[1] = InputsFixed[0] ^ 1;
14506 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
14507 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
14508 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
14509 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
14510 InputsFixed[0] = InputsFixed[1] ^ 1;
14511 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
14512 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
14513 // The two inputs are in the same DWord but it is clobbered and the
14514 // adjacent DWord isn't used at all. Move both inputs to the free
14516 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
14517 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
14518 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
14519 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
14521 // The only way we hit this point is if there is no clobbering
14522 // (because there are no off-half inputs to this half) and there is no
14523 // free slot adjacent to one of the inputs. In this case, we have to
14524 // swap an input with a non-input.
14525 for (int i = 0; i < 4; ++i)
14526 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
14527 "We can't handle any clobbers here!");
14528 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
14529 "Cannot have adjacent inputs here!");
14531 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14532 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
14534 // We also have to update the final source mask in this case because
14535 // it may need to undo the above swap.
14536 for (int &M : FinalSourceHalfMask)
14537 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
14538 M = InputsFixed[1] + SourceOffset;
14539 else if (M == InputsFixed[1] + SourceOffset)
14540 M = (InputsFixed[0] ^ 1) + SourceOffset;
14542 InputsFixed[1] = InputsFixed[0] ^ 1;
14545 // Point everything at the fixed inputs.
14546 for (int &M : HalfMask)
14547 if (M == IncomingInputs[0])
14548 M = InputsFixed[0] + SourceOffset;
14549 else if (M == IncomingInputs[1])
14550 M = InputsFixed[1] + SourceOffset;
14552 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
14553 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
14556 llvm_unreachable("Unhandled input size!");
14559 // Now hoist the DWord down to the right half.
14560 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
14561 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
14562 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
14563 for (int &M : HalfMask)
14564 for (int Input : IncomingInputs)
14566 M = FreeDWord * 2 + Input % 2;
14568 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
14569 /*SourceOffset*/ 4, /*DestOffset*/ 0);
14570 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
14571 /*SourceOffset*/ 0, /*DestOffset*/ 4);
14573 // Now enact all the shuffles we've computed to move the inputs into their
14575 if (!isNoopShuffleMask(PSHUFLMask))
14576 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14577 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
14578 if (!isNoopShuffleMask(PSHUFHMask))
14579 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14580 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
14581 if (!isNoopShuffleMask(PSHUFDMask))
14582 V = DAG.getBitcast(
14584 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14585 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14587 // At this point, each half should contain all its inputs, and we can then
14588 // just shuffle them into their final position.
14589 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
14590 "Failed to lift all the high half inputs to the low mask!");
14591 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
14592 "Failed to lift all the low half inputs to the high mask!");
14594 // Do a half shuffle for the low mask.
14595 if (!isNoopShuffleMask(LoMask))
14596 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14597 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14599 // Do a half shuffle with the high mask after shifting its values down.
14600 for (int &M : HiMask)
14603 if (!isNoopShuffleMask(HiMask))
14604 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14605 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14610 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
14611 /// blend if only one input is used.
14612 static SDValue lowerShuffleAsBlendOfPSHUFBs(
14613 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14614 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
14615 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
14616 "Lane crossing shuffle masks not supported");
14618 int NumBytes = VT.getSizeInBits() / 8;
14619 int Size = Mask.size();
14620 int Scale = NumBytes / Size;
14622 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14623 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14627 for (int i = 0; i < NumBytes; ++i) {
14628 int M = Mask[i / Scale];
14632 const int ZeroMask = 0x80;
14633 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
14634 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
14635 if (Zeroable[i / Scale])
14636 V1Idx = V2Idx = ZeroMask;
14638 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
14639 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
14640 V1InUse |= (ZeroMask != V1Idx);
14641 V2InUse |= (ZeroMask != V2Idx);
14644 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
14646 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
14647 DAG.getBuildVector(ShufVT, DL, V1Mask));
14649 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
14650 DAG.getBuildVector(ShufVT, DL, V2Mask));
14652 // If we need shuffled inputs from both, blend the two.
14654 if (V1InUse && V2InUse)
14655 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
14657 V = V1InUse ? V1 : V2;
14659 // Cast the result back to the correct type.
14660 return DAG.getBitcast(VT, V);
14663 /// Generic lowering of 8-lane i16 shuffles.
14665 /// This handles both single-input shuffles and combined shuffle/blends with
14666 /// two inputs. The single input shuffles are immediately delegated to
14667 /// a dedicated lowering routine.
14669 /// The blends are lowered in one of three fundamental ways. If there are few
14670 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
14671 /// of the input is significantly cheaper when lowered as an interleaving of
14672 /// the two inputs, try to interleave them. Otherwise, blend the low and high
14673 /// halves of the inputs separately (making them have relatively few inputs)
14674 /// and then concatenate them.
14675 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14676 const APInt &Zeroable, SDValue V1, SDValue V2,
14677 const X86Subtarget &Subtarget,
14678 SelectionDAG &DAG) {
14679 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14680 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14681 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
14683 // Whenever we can lower this as a zext, that instruction is strictly faster
14684 // than any alternative.
14685 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
14686 Zeroable, Subtarget, DAG))
14689 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
14691 if (NumV2Inputs == 0) {
14692 // Try to use shift instructions.
14693 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
14694 Zeroable, Subtarget, DAG))
14697 // Check for being able to broadcast a single element.
14698 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
14699 Mask, Subtarget, DAG))
14702 // Try to use bit rotation instructions.
14703 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
14707 // Use dedicated unpack instructions for masks that match their pattern.
14708 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14711 // Use dedicated pack instructions for masks that match their pattern.
14712 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14716 // Try to use byte rotation instructions.
14717 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
14721 // Make a copy of the mask so it can be modified.
14722 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
14723 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
14727 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
14728 "All single-input shuffles should be canonicalized to be V1-input "
14731 // Try to use shift instructions.
14732 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
14733 Zeroable, Subtarget, DAG))
14736 // See if we can use SSE4A Extraction / Insertion.
14737 if (Subtarget.hasSSE4A())
14738 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14742 // There are special ways we can lower some single-element blends.
14743 if (NumV2Inputs == 1)
14744 if (SDValue V = lowerShuffleAsElementInsertion(
14745 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14748 // We have different paths for blend lowering, but they all must use the
14749 // *exact* same predicate.
14750 bool IsBlendSupported = Subtarget.hasSSE41();
14751 if (IsBlendSupported)
14752 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14753 Zeroable, Subtarget, DAG))
14756 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14757 Zeroable, Subtarget, DAG))
14760 // Use dedicated unpack instructions for masks that match their pattern.
14761 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14764 // Use dedicated pack instructions for masks that match their pattern.
14765 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14769 // Try to use byte rotation instructions.
14770 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14774 if (SDValue BitBlend =
14775 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14778 // Try to use byte shift instructions to mask.
14779 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
14780 Zeroable, Subtarget, DAG))
14783 // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
14784 // We could use SIGN_EXTEND_INREG+PACKSSDW for older targets but this seems to
14785 // be slower than a PSHUFLW+PSHUFHW+PSHUFD chain.
14786 int NumEvenDrops = canLowerByDroppingEvenElements(Mask, false);
14787 if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() &&
14788 !Subtarget.hasVLX()) {
14789 SmallVector<SDValue, 8> DWordClearOps(4, DAG.getConstant(0, DL, MVT::i32));
14790 for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
14791 DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
14792 SDValue DWordClearMask = DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
14793 V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
14795 V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
14797 // Now pack things back together.
14798 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, V1, V2);
14799 if (NumEvenDrops == 2) {
14800 Result = DAG.getBitcast(MVT::v4i32, Result);
14801 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, Result, Result);
14806 // Try to lower by permuting the inputs into an unpack instruction.
14807 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14808 Mask, Subtarget, DAG))
14811 // If we can't directly blend but can use PSHUFB, that will be better as it
14812 // can both shuffle and set up the inefficient blend.
14813 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14814 bool V1InUse, V2InUse;
14815 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14816 Zeroable, DAG, V1InUse, V2InUse);
14819 // We can always bit-blend if we have to so the fallback strategy is to
14820 // decompose into single-input permutes and blends.
14821 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14822 Mask, Subtarget, DAG);
14825 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14826 ArrayRef<int> Mask, SDValue V1,
14827 SDValue V2, SelectionDAG &DAG) {
14828 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14829 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14831 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14833 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14835 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14838 /// Generic lowering of v16i8 shuffles.
14840 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14841 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14842 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14843 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14845 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14846 const APInt &Zeroable, SDValue V1, SDValue V2,
14847 const X86Subtarget &Subtarget,
14848 SelectionDAG &DAG) {
14849 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14850 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14851 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14853 // Try to use shift instructions.
14854 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14855 Zeroable, Subtarget, DAG))
14858 // Try to use byte rotation instructions.
14859 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14863 // Use dedicated pack instructions for masks that match their pattern.
14864 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14868 // Try to use a zext lowering.
14869 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14870 Zeroable, Subtarget, DAG))
14873 // See if we can use SSE4A Extraction / Insertion.
14874 if (Subtarget.hasSSE4A())
14875 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14879 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14881 // For single-input shuffles, there are some nicer lowering tricks we can use.
14882 if (NumV2Elements == 0) {
14883 // Check for being able to broadcast a single element.
14884 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14885 Mask, Subtarget, DAG))
14888 // Try to use bit rotation instructions.
14889 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
14893 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14896 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14897 // Notably, this handles splat and partial-splat shuffles more efficiently.
14898 // However, it only makes sense if the pre-duplication shuffle simplifies
14899 // things significantly. Currently, this means we need to be able to
14900 // express the pre-duplication shuffle as an i16 shuffle.
14902 // FIXME: We should check for other patterns which can be widened into an
14903 // i16 shuffle as well.
14904 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14905 for (int i = 0; i < 16; i += 2)
14906 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14911 auto tryToWidenViaDuplication = [&]() -> SDValue {
14912 if (!canWidenViaDuplication(Mask))
14914 SmallVector<int, 4> LoInputs;
14915 copy_if(Mask, std::back_inserter(LoInputs),
14916 [](int M) { return M >= 0 && M < 8; });
14917 array_pod_sort(LoInputs.begin(), LoInputs.end());
14918 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14920 SmallVector<int, 4> HiInputs;
14921 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14922 array_pod_sort(HiInputs.begin(), HiInputs.end());
14923 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14926 bool TargetLo = LoInputs.size() >= HiInputs.size();
14927 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14928 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14930 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14931 SmallDenseMap<int, int, 8> LaneMap;
14932 for (int I : InPlaceInputs) {
14933 PreDupI16Shuffle[I/2] = I/2;
14936 int j = TargetLo ? 0 : 4, je = j + 4;
14937 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14938 // Check if j is already a shuffle of this input. This happens when
14939 // there are two adjacent bytes after we move the low one.
14940 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14941 // If we haven't yet mapped the input, search for a slot into which
14943 while (j < je && PreDupI16Shuffle[j] >= 0)
14947 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14950 // Map this input with the i16 shuffle.
14951 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14954 // Update the lane map based on the mapping we ended up with.
14955 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14957 V1 = DAG.getBitcast(
14959 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14960 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14962 // Unpack the bytes to form the i16s that will be shuffled into place.
14963 bool EvenInUse = false, OddInUse = false;
14964 for (int i = 0; i < 16; i += 2) {
14965 EvenInUse |= (Mask[i + 0] >= 0);
14966 OddInUse |= (Mask[i + 1] >= 0);
14967 if (EvenInUse && OddInUse)
14970 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14971 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14972 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14974 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14975 for (int i = 0; i < 16; ++i)
14976 if (Mask[i] >= 0) {
14977 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14978 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14979 if (PostDupI16Shuffle[i / 2] < 0)
14980 PostDupI16Shuffle[i / 2] = MappedMask;
14982 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14983 "Conflicting entries in the original shuffle!");
14985 return DAG.getBitcast(
14987 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14988 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14990 if (SDValue V = tryToWidenViaDuplication())
14994 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14995 Zeroable, Subtarget, DAG))
14998 // Use dedicated unpack instructions for masks that match their pattern.
14999 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
15002 // Try to use byte shift instructions to mask.
15003 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
15004 Zeroable, Subtarget, DAG))
15007 // Check for compaction patterns.
15008 bool IsSingleInput = V2.isUndef();
15009 int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput);
15011 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
15012 // with PSHUFB. It is important to do this before we attempt to generate any
15013 // blends but after all of the single-input lowerings. If the single input
15014 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
15015 // want to preserve that and we can DAG combine any longer sequences into
15016 // a PSHUFB in the end. But once we start blending from multiple inputs,
15017 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
15018 // and there are *very* few patterns that would actually be faster than the
15019 // PSHUFB approach because of its ability to zero lanes.
15021 // If the mask is a binary compaction, we can more efficiently perform this
15022 // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
15024 // FIXME: The only exceptions to the above are blends which are exact
15025 // interleavings with direct instructions supporting them. We currently don't
15026 // handle those well here.
15027 if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
15028 bool V1InUse = false;
15029 bool V2InUse = false;
15031 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
15032 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
15034 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
15035 // do so. This avoids using them to handle blends-with-zero which is
15036 // important as a single pshufb is significantly faster for that.
15037 if (V1InUse && V2InUse) {
15038 if (Subtarget.hasSSE41())
15039 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
15040 Zeroable, Subtarget, DAG))
15043 // We can use an unpack to do the blending rather than an or in some
15044 // cases. Even though the or may be (very minorly) more efficient, we
15045 // preference this lowering because there are common cases where part of
15046 // the complexity of the shuffles goes away when we do the final blend as
15048 // FIXME: It might be worth trying to detect if the unpack-feeding
15049 // shuffles will both be pshufb, in which case we shouldn't bother with
15051 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
15052 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
15055 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
15056 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
15057 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
15059 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
15060 // PALIGNR will be cheaper than the second PSHUFB+OR.
15061 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
15062 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
15069 // There are special ways we can lower some single-element blends.
15070 if (NumV2Elements == 1)
15071 if (SDValue V = lowerShuffleAsElementInsertion(
15072 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
15075 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
15078 // Check whether a compaction lowering can be done. This handles shuffles
15079 // which take every Nth element for some even N. See the helper function for
15082 // We special case these as they can be particularly efficiently handled with
15083 // the PACKUSB instruction on x86 and they show up in common patterns of
15084 // rearranging bytes to truncate wide elements.
15085 if (NumEvenDrops) {
15086 // NumEvenDrops is the power of two stride of the elements. Another way of
15087 // thinking about it is that we need to drop the even elements this many
15088 // times to get the original input.
15090 // First we need to zero all the dropped bytes.
15091 assert(NumEvenDrops <= 3 &&
15092 "No support for dropping even elements more than 3 times.");
15093 SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
15094 for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
15095 WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
15096 SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
15097 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
15099 if (!IsSingleInput)
15100 V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
15103 // Now pack things back together.
15104 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
15105 IsSingleInput ? V1 : V2);
15106 for (int i = 1; i < NumEvenDrops; ++i) {
15107 Result = DAG.getBitcast(MVT::v8i16, Result);
15108 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
15113 // Handle multi-input cases by blending single-input shuffles.
15114 if (NumV2Elements > 0)
15115 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
15118 // The fallback path for single-input shuffles widens this into two v8i16
15119 // vectors with unpacks, shuffles those, and then pulls them back together
15123 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
15124 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
15125 for (int i = 0; i < 16; ++i)
15127 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
15129 SDValue VLoHalf, VHiHalf;
15130 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
15131 // them out and avoid using UNPCK{L,H} to extract the elements of V as
15133 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
15134 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
15135 // Use a mask to drop the high bytes.
15136 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
15137 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
15138 DAG.getConstant(0x00FF, DL, MVT::v8i16));
15140 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
15141 VHiHalf = DAG.getUNDEF(MVT::v8i16);
15143 // Squash the masks to point directly into VLoHalf.
15144 for (int &M : LoBlendMask)
15147 for (int &M : HiBlendMask)
15151 // Otherwise just unpack the low half of V into VLoHalf and the high half into
15152 // VHiHalf so that we can blend them as i16s.
15153 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
15155 VLoHalf = DAG.getBitcast(
15156 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
15157 VHiHalf = DAG.getBitcast(
15158 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
15161 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
15162 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
15164 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
15167 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
15169 /// This routine breaks down the specific type of 128-bit shuffle and
15170 /// dispatches to the lowering routines accordingly.
15171 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
15172 MVT VT, SDValue V1, SDValue V2,
15173 const APInt &Zeroable,
15174 const X86Subtarget &Subtarget,
15175 SelectionDAG &DAG) {
15176 switch (VT.SimpleTy) {
15178 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15180 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15182 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15184 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15186 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15188 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15191 llvm_unreachable("Unimplemented!");
15195 /// Generic routine to split vector shuffle into half-sized shuffles.
15197 /// This routine just extracts two subvectors, shuffles them independently, and
15198 /// then concatenates them back together. This should work effectively with all
15199 /// AVX vector shuffle types.
15200 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
15201 SDValue V2, ArrayRef<int> Mask,
15202 SelectionDAG &DAG) {
15203 assert(VT.getSizeInBits() >= 256 &&
15204 "Only for 256-bit or wider vector shuffles!");
15205 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
15206 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
15208 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
15209 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
15211 int NumElements = VT.getVectorNumElements();
15212 int SplitNumElements = NumElements / 2;
15213 MVT ScalarVT = VT.getVectorElementType();
15214 MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
15216 // Use splitVector/extractSubVector so that split build-vectors just build two
15217 // narrower build vectors. This helps shuffling with splats and zeros.
15218 auto SplitVector = [&](SDValue V) {
15220 std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
15221 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
15222 DAG.getBitcast(SplitVT, HiV));
15225 SDValue LoV1, HiV1, LoV2, HiV2;
15226 std::tie(LoV1, HiV1) = SplitVector(V1);
15227 std::tie(LoV2, HiV2) = SplitVector(V2);
15229 // Now create two 4-way blends of these half-width vectors.
15230 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
15231 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
15232 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
15233 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
15234 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
15235 for (int i = 0; i < SplitNumElements; ++i) {
15236 int M = HalfMask[i];
15237 if (M >= NumElements) {
15238 if (M >= NumElements + SplitNumElements)
15242 V2BlendMask[i] = M - NumElements;
15243 BlendMask[i] = SplitNumElements + i;
15244 } else if (M >= 0) {
15245 if (M >= SplitNumElements)
15249 V1BlendMask[i] = M;
15254 // Because the lowering happens after all combining takes place, we need to
15255 // manually combine these blend masks as much as possible so that we create
15256 // a minimal number of high-level vector shuffle nodes.
15258 // First try just blending the halves of V1 or V2.
15259 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
15260 return DAG.getUNDEF(SplitVT);
15261 if (!UseLoV2 && !UseHiV2)
15262 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
15263 if (!UseLoV1 && !UseHiV1)
15264 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
15266 SDValue V1Blend, V2Blend;
15267 if (UseLoV1 && UseHiV1) {
15269 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
15271 // We only use half of V1 so map the usage down into the final blend mask.
15272 V1Blend = UseLoV1 ? LoV1 : HiV1;
15273 for (int i = 0; i < SplitNumElements; ++i)
15274 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
15275 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
15277 if (UseLoV2 && UseHiV2) {
15279 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
15281 // We only use half of V2 so map the usage down into the final blend mask.
15282 V2Blend = UseLoV2 ? LoV2 : HiV2;
15283 for (int i = 0; i < SplitNumElements; ++i)
15284 if (BlendMask[i] >= SplitNumElements)
15285 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
15287 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
15289 SDValue Lo = HalfBlend(LoMask);
15290 SDValue Hi = HalfBlend(HiMask);
15291 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
15294 /// Either split a vector in halves or decompose the shuffles and the
15297 /// This is provided as a good fallback for many lowerings of non-single-input
15298 /// shuffles with more than one 128-bit lane. In those cases, we want to select
15299 /// between splitting the shuffle into 128-bit components and stitching those
15300 /// back together vs. extracting the single-input shuffles and blending those
15302 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
15303 SDValue V2, ArrayRef<int> Mask,
15304 const X86Subtarget &Subtarget,
15305 SelectionDAG &DAG) {
15306 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
15307 "shuffles as it could then recurse on itself.");
15308 int Size = Mask.size();
15310 // If this can be modeled as a broadcast of two elements followed by a blend,
15311 // prefer that lowering. This is especially important because broadcasts can
15312 // often fold with memory operands.
15313 auto DoBothBroadcast = [&] {
15314 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
15317 if (V2BroadcastIdx < 0)
15318 V2BroadcastIdx = M - Size;
15319 else if (M - Size != V2BroadcastIdx)
15321 } else if (M >= 0) {
15322 if (V1BroadcastIdx < 0)
15323 V1BroadcastIdx = M;
15324 else if (M != V1BroadcastIdx)
15329 if (DoBothBroadcast())
15330 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
15333 // If the inputs all stem from a single 128-bit lane of each input, then we
15334 // split them rather than blending because the split will decompose to
15335 // unusually few instructions.
15336 int LaneCount = VT.getSizeInBits() / 128;
15337 int LaneSize = Size / LaneCount;
15338 SmallBitVector LaneInputs[2];
15339 LaneInputs[0].resize(LaneCount, false);
15340 LaneInputs[1].resize(LaneCount, false);
15341 for (int i = 0; i < Size; ++i)
15343 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
15344 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
15345 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15347 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
15348 // that the decomposed single-input shuffles don't end up here.
15349 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
15353 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15354 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
15355 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
15356 SDValue V1, SDValue V2,
15357 ArrayRef<int> Mask,
15358 SelectionDAG &DAG) {
15359 assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
15361 int LHSMask[4] = {-1, -1, -1, -1};
15362 int RHSMask[4] = {-1, -1, -1, -1};
15363 unsigned SHUFPMask = 0;
15365 // As SHUFPD uses a single LHS/RHS element per lane, we can always
15366 // perform the shuffle once the lanes have been shuffled in place.
15367 for (int i = 0; i != 4; ++i) {
15371 int LaneBase = i & ~1;
15372 auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
15373 LaneMask[LaneBase + (M & 1)] = M;
15374 SHUFPMask |= (M & 1) << i;
15377 SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
15378 SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
15379 return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
15380 DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
15383 /// Lower a vector shuffle crossing multiple 128-bit lanes as
15384 /// a lane permutation followed by a per-lane permutation.
15386 /// This is mainly for cases where we can have non-repeating permutes
15389 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
15390 /// we should investigate merging them.
15391 static SDValue lowerShuffleAsLanePermuteAndPermute(
15392 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15393 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
15394 int NumElts = VT.getVectorNumElements();
15395 int NumLanes = VT.getSizeInBits() / 128;
15396 int NumEltsPerLane = NumElts / NumLanes;
15398 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
15399 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
15401 for (int i = 0; i != NumElts; ++i) {
15406 // Ensure that each lane comes from a single source lane.
15407 int SrcLane = M / NumEltsPerLane;
15408 int DstLane = i / NumEltsPerLane;
15409 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
15411 SrcLaneMask[DstLane] = SrcLane;
15413 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
15416 // Make sure we set all elements of the lane mask, to avoid undef propagation.
15417 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
15418 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
15419 int SrcLane = SrcLaneMask[DstLane];
15421 for (int j = 0; j != NumEltsPerLane; ++j) {
15422 LaneMask[(DstLane * NumEltsPerLane) + j] =
15423 (SrcLane * NumEltsPerLane) + j;
15427 // If we're only shuffling a single lowest lane and the rest are identity
15428 // then don't bother.
15429 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
15430 int NumIdentityLanes = 0;
15431 bool OnlyShuffleLowestLane = true;
15432 for (int i = 0; i != NumLanes; ++i) {
15433 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
15434 i * NumEltsPerLane))
15435 NumIdentityLanes++;
15436 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
15437 OnlyShuffleLowestLane = false;
15439 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
15442 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
15443 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
15446 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
15447 /// source with a lane permutation.
15449 /// This lowering strategy results in four instructions in the worst case for a
15450 /// single-input cross lane shuffle which is lower than any other fully general
15451 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
15452 /// shuffle pattern should be handled prior to trying this lowering.
15453 static SDValue lowerShuffleAsLanePermuteAndShuffle(
15454 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15455 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
15456 // FIXME: This should probably be generalized for 512-bit vectors as well.
15457 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
15458 int Size = Mask.size();
15459 int LaneSize = Size / 2;
15461 // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15462 // Only do this if the elements aren't all from the lower lane,
15463 // otherwise we're (probably) better off doing a split.
15464 if (VT == MVT::v4f64 &&
15465 !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
15467 lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG))
15470 // If there are only inputs from one 128-bit lane, splitting will in fact be
15471 // less expensive. The flags track whether the given lane contains an element
15472 // that crosses to another lane.
15473 if (!Subtarget.hasAVX2()) {
15474 bool LaneCrossing[2] = {false, false};
15475 for (int i = 0; i < Size; ++i)
15476 if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
15477 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
15478 if (!LaneCrossing[0] || !LaneCrossing[1])
15479 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15481 bool LaneUsed[2] = {false, false};
15482 for (int i = 0; i < Size; ++i)
15484 LaneUsed[(Mask[i] % Size) / LaneSize] = true;
15485 if (!LaneUsed[0] || !LaneUsed[1])
15486 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15489 // TODO - we could support shuffling V2 in the Flipped input.
15490 assert(V2.isUndef() &&
15491 "This last part of this routine only works on single input shuffles");
15493 SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
15494 for (int i = 0; i < Size; ++i) {
15495 int &M = InLaneMask[i];
15498 if (((M % Size) / LaneSize) != (i / LaneSize))
15499 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
15501 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
15502 "In-lane shuffle mask expected");
15504 // Flip the lanes, and shuffle the results which should now be in-lane.
15505 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
15506 SDValue Flipped = DAG.getBitcast(PVT, V1);
15508 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
15509 Flipped = DAG.getBitcast(VT, Flipped);
15510 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
15513 /// Handle lowering 2-lane 128-bit shuffles.
15514 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
15515 SDValue V2, ArrayRef<int> Mask,
15516 const APInt &Zeroable,
15517 const X86Subtarget &Subtarget,
15518 SelectionDAG &DAG) {
15519 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
15520 if (Subtarget.hasAVX2() && V2.isUndef())
15523 bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
15525 SmallVector<int, 4> WidenedMask;
15526 if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
15529 bool IsLowZero = (Zeroable & 0x3) == 0x3;
15530 bool IsHighZero = (Zeroable & 0xc) == 0xc;
15532 // Try to use an insert into a zero vector.
15533 if (WidenedMask[0] == 0 && IsHighZero) {
15534 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15535 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15536 DAG.getIntPtrConstant(0, DL));
15537 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15538 getZeroVector(VT, Subtarget, DAG, DL), LoV,
15539 DAG.getIntPtrConstant(0, DL));
15542 // TODO: If minimizing size and one of the inputs is a zero vector and the
15543 // the zero vector has only one use, we could use a VPERM2X128 to save the
15544 // instruction bytes needed to explicitly generate the zero vector.
15546 // Blends are faster and handle all the non-lane-crossing cases.
15547 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
15551 // If either input operand is a zero vector, use VPERM2X128 because its mask
15552 // allows us to replace the zero input with an implicit zero.
15553 if (!IsLowZero && !IsHighZero) {
15554 // Check for patterns which can be matched with a single insert of a 128-bit
15556 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
15557 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
15559 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
15560 // this will likely become vinsertf128 which can't fold a 256-bit memop.
15561 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
15562 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15563 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15564 OnlyUsesV1 ? V1 : V2,
15565 DAG.getIntPtrConstant(0, DL));
15566 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15567 DAG.getIntPtrConstant(2, DL));
15571 // Try to use SHUF128 if possible.
15572 if (Subtarget.hasVLX()) {
15573 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
15574 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
15575 ((WidenedMask[1] % 2) << 1);
15576 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
15577 DAG.getTargetConstant(PermMask, DL, MVT::i8));
15582 // Otherwise form a 128-bit permutation. After accounting for undefs,
15583 // convert the 64-bit shuffle mask selection values into 128-bit
15584 // selection bits by dividing the indexes by 2 and shifting into positions
15585 // defined by a vperm2*128 instruction's immediate control byte.
15587 // The immediate permute control byte looks like this:
15588 // [1:0] - select 128 bits from sources for low half of destination
15590 // [3] - zero low half of destination
15591 // [5:4] - select 128 bits from sources for high half of destination
15593 // [7] - zero high half of destination
15595 assert((WidenedMask[0] >= 0 || IsLowZero) &&
15596 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
15598 unsigned PermMask = 0;
15599 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
15600 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
15602 // Check the immediate mask and replace unused sources with undef.
15603 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
15604 V1 = DAG.getUNDEF(VT);
15605 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
15606 V2 = DAG.getUNDEF(VT);
15608 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
15609 DAG.getTargetConstant(PermMask, DL, MVT::i8));
15612 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
15613 /// shuffling each lane.
15615 /// This attempts to create a repeated lane shuffle where each lane uses one
15616 /// or two of the lanes of the inputs. The lanes of the input vectors are
15617 /// shuffled in one or two independent shuffles to get the lanes into the
15618 /// position needed by the final shuffle.
15619 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
15620 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15621 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15622 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
15624 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15627 int NumElts = Mask.size();
15628 int NumLanes = VT.getSizeInBits() / 128;
15629 int NumLaneElts = 128 / VT.getScalarSizeInBits();
15630 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
15631 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
15633 // First pass will try to fill in the RepeatMask from lanes that need two
15635 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15636 int Srcs[2] = {-1, -1};
15637 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
15638 for (int i = 0; i != NumLaneElts; ++i) {
15639 int M = Mask[(Lane * NumLaneElts) + i];
15642 // Determine which of the possible input lanes (NumLanes from each source)
15643 // this element comes from. Assign that as one of the sources for this
15644 // lane. We can assign up to 2 sources for this lane. If we run out
15645 // sources we can't do anything.
15646 int LaneSrc = M / NumLaneElts;
15648 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
15650 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
15655 Srcs[Src] = LaneSrc;
15656 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
15659 // If this lane has two sources, see if it fits with the repeat mask so far.
15663 LaneSrcs[Lane][0] = Srcs[0];
15664 LaneSrcs[Lane][1] = Srcs[1];
15666 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
15667 assert(M1.size() == M2.size() && "Unexpected mask size");
15668 for (int i = 0, e = M1.size(); i != e; ++i)
15669 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
15674 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
15675 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
15676 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
15680 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
15681 "Unexpected mask element");
15686 if (MatchMasks(InLaneMask, RepeatMask)) {
15687 // Merge this lane mask into the final repeat mask.
15688 MergeMasks(InLaneMask, RepeatMask);
15692 // Didn't find a match. Swap the operands and try again.
15693 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15694 ShuffleVectorSDNode::commuteMask(InLaneMask);
15696 if (MatchMasks(InLaneMask, RepeatMask)) {
15697 // Merge this lane mask into the final repeat mask.
15698 MergeMasks(InLaneMask, RepeatMask);
15702 // Couldn't find a match with the operands in either order.
15706 // Now handle any lanes with only one source.
15707 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15708 // If this lane has already been processed, skip it.
15709 if (LaneSrcs[Lane][0] >= 0)
15712 for (int i = 0; i != NumLaneElts; ++i) {
15713 int M = Mask[(Lane * NumLaneElts) + i];
15717 // If RepeatMask isn't defined yet we can define it ourself.
15718 if (RepeatMask[i] < 0)
15719 RepeatMask[i] = M % NumLaneElts;
15721 if (RepeatMask[i] < NumElts) {
15722 if (RepeatMask[i] != M % NumLaneElts)
15724 LaneSrcs[Lane][0] = M / NumLaneElts;
15726 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15728 LaneSrcs[Lane][1] = M / NumLaneElts;
15732 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15736 SmallVector<int, 16> NewMask(NumElts, -1);
15737 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15738 int Src = LaneSrcs[Lane][0];
15739 for (int i = 0; i != NumLaneElts; ++i) {
15742 M = Src * NumLaneElts + i;
15743 NewMask[Lane * NumLaneElts + i] = M;
15746 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15747 // Ensure we didn't get back the shuffle we started with.
15748 // FIXME: This is a hack to make up for some splat handling code in
15749 // getVectorShuffle.
15750 if (isa<ShuffleVectorSDNode>(NewV1) &&
15751 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15754 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15755 int Src = LaneSrcs[Lane][1];
15756 for (int i = 0; i != NumLaneElts; ++i) {
15759 M = Src * NumLaneElts + i;
15760 NewMask[Lane * NumLaneElts + i] = M;
15763 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15764 // Ensure we didn't get back the shuffle we started with.
15765 // FIXME: This is a hack to make up for some splat handling code in
15766 // getVectorShuffle.
15767 if (isa<ShuffleVectorSDNode>(NewV2) &&
15768 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15771 for (int i = 0; i != NumElts; ++i) {
15772 NewMask[i] = RepeatMask[i % NumLaneElts];
15773 if (NewMask[i] < 0)
15776 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15778 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15781 /// If the input shuffle mask results in a vector that is undefined in all upper
15782 /// or lower half elements and that mask accesses only 2 halves of the
15783 /// shuffle's operands, return true. A mask of half the width with mask indexes
15784 /// adjusted to access the extracted halves of the original shuffle operands is
15785 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15786 /// lower half of each input operand is accessed.
15788 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15789 int &HalfIdx1, int &HalfIdx2) {
15790 assert((Mask.size() == HalfMask.size() * 2) &&
15791 "Expected input mask to be twice as long as output");
15793 // Exactly one half of the result must be undef to allow narrowing.
15794 bool UndefLower = isUndefLowerHalf(Mask);
15795 bool UndefUpper = isUndefUpperHalf(Mask);
15796 if (UndefLower == UndefUpper)
15799 unsigned HalfNumElts = HalfMask.size();
15800 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15803 for (unsigned i = 0; i != HalfNumElts; ++i) {
15804 int M = Mask[i + MaskIndexOffset];
15810 // Determine which of the 4 half vectors this element is from.
15811 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15812 int HalfIdx = M / HalfNumElts;
15814 // Determine the element index into its half vector source.
15815 int HalfElt = M % HalfNumElts;
15817 // We can shuffle with up to 2 half vectors, set the new 'half'
15818 // shuffle mask accordingly.
15819 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15820 HalfMask[i] = HalfElt;
15821 HalfIdx1 = HalfIdx;
15824 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15825 HalfMask[i] = HalfElt + HalfNumElts;
15826 HalfIdx2 = HalfIdx;
15830 // Too many half vectors referenced.
15837 /// Given the output values from getHalfShuffleMask(), create a half width
15838 /// shuffle of extracted vectors followed by an insert back to full width.
15839 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15840 ArrayRef<int> HalfMask, int HalfIdx1,
15841 int HalfIdx2, bool UndefLower,
15842 SelectionDAG &DAG, bool UseConcat = false) {
15843 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15844 assert(V1.getValueType().isSimple() && "Expecting only simple types");
15846 MVT VT = V1.getSimpleValueType();
15847 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15848 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15850 auto getHalfVector = [&](int HalfIdx) {
15852 return DAG.getUNDEF(HalfVT);
15853 SDValue V = (HalfIdx < 2 ? V1 : V2);
15854 HalfIdx = (HalfIdx % 2) * HalfNumElts;
15855 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15856 DAG.getIntPtrConstant(HalfIdx, DL));
15859 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15860 SDValue Half1 = getHalfVector(HalfIdx1);
15861 SDValue Half2 = getHalfVector(HalfIdx2);
15862 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15865 SDValue Op1 = DAG.getUNDEF(HalfVT);
15867 std::swap(Op0, Op1);
15868 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15871 unsigned Offset = UndefLower ? HalfNumElts : 0;
15872 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15873 DAG.getIntPtrConstant(Offset, DL));
15876 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15877 /// This allows for fast cases such as subvector extraction/insertion
15878 /// or shuffling smaller vector types which can lower more efficiently.
15879 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15880 SDValue V2, ArrayRef<int> Mask,
15881 const X86Subtarget &Subtarget,
15882 SelectionDAG &DAG) {
15883 assert((VT.is256BitVector() || VT.is512BitVector()) &&
15884 "Expected 256-bit or 512-bit vector");
15886 bool UndefLower = isUndefLowerHalf(Mask);
15887 if (!UndefLower && !isUndefUpperHalf(Mask))
15890 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15891 "Completely undef shuffle mask should have been simplified already");
15893 // Upper half is undef and lower half is whole upper subvector.
15894 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15895 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15896 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15898 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15899 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15900 DAG.getIntPtrConstant(HalfNumElts, DL));
15901 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15902 DAG.getIntPtrConstant(0, DL));
15905 // Lower half is undef and upper half is whole lower subvector.
15906 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15908 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15909 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15910 DAG.getIntPtrConstant(0, DL));
15911 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15912 DAG.getIntPtrConstant(HalfNumElts, DL));
15915 int HalfIdx1, HalfIdx2;
15916 SmallVector<int, 8> HalfMask(HalfNumElts);
15917 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15920 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15922 // Only shuffle the halves of the inputs when useful.
15923 unsigned NumLowerHalves =
15924 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15925 unsigned NumUpperHalves =
15926 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15927 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15929 // Determine the larger pattern of undef/halves, then decide if it's worth
15930 // splitting the shuffle based on subtarget capabilities and types.
15931 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15933 // XXXXuuuu: no insert is needed.
15934 // Always extract lowers when setting lower - these are all free subreg ops.
15935 if (NumUpperHalves == 0)
15936 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15939 if (NumUpperHalves == 1) {
15940 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15941 if (Subtarget.hasAVX2()) {
15942 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15943 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15944 !is128BitUnpackShuffleMask(HalfMask) &&
15945 (!isSingleSHUFPSMask(HalfMask) ||
15946 Subtarget.hasFastVariableShuffle()))
15948 // If this is a unary shuffle (assume that the 2nd operand is
15949 // canonicalized to undef), then we can use vpermpd. Otherwise, we
15950 // are better off extracting the upper half of 1 operand and using a
15952 if (EltWidth == 64 && V2.isUndef())
15955 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15956 if (Subtarget.hasAVX512() && VT.is512BitVector())
15958 // Extract + narrow shuffle is better than the wide alternative.
15959 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15963 // Don't extract both uppers, instead shuffle and then extract.
15964 assert(NumUpperHalves == 2 && "Half vector count went wrong");
15968 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15969 if (NumUpperHalves == 0) {
15970 // AVX2 has efficient 64-bit element cross-lane shuffles.
15971 // TODO: Refine to account for unary shuffle, splat, and other masks?
15972 if (Subtarget.hasAVX2() && EltWidth == 64)
15974 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15975 if (Subtarget.hasAVX512() && VT.is512BitVector())
15977 // Narrow shuffle + insert is better than the wide alternative.
15978 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15982 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15986 /// Test whether the specified input (0 or 1) is in-place blended by the
15989 /// This returns true if the elements from a particular input are already in the
15990 /// slot required by the given mask and require no permutation.
15991 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15992 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
15993 int Size = Mask.size();
15994 for (int i = 0; i < Size; ++i)
15995 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
16001 /// Handle case where shuffle sources are coming from the same 128-bit lane and
16002 /// every lane can be represented as the same repeating mask - allowing us to
16003 /// shuffle the sources with the repeating shuffle and then permute the result
16004 /// to the destination lanes.
16005 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
16006 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16007 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
16008 int NumElts = VT.getVectorNumElements();
16009 int NumLanes = VT.getSizeInBits() / 128;
16010 int NumLaneElts = NumElts / NumLanes;
16012 // On AVX2 we may be able to just shuffle the lowest elements and then
16013 // broadcast the result.
16014 if (Subtarget.hasAVX2()) {
16015 for (unsigned BroadcastSize : {16, 32, 64}) {
16016 if (BroadcastSize <= VT.getScalarSizeInBits())
16018 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
16020 // Attempt to match a repeating pattern every NumBroadcastElts,
16021 // accounting for UNDEFs but only references the lowest 128-bit
16022 // lane of the inputs.
16023 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
16024 for (int i = 0; i != NumElts; i += NumBroadcastElts)
16025 for (int j = 0; j != NumBroadcastElts; ++j) {
16026 int M = Mask[i + j];
16029 int &R = RepeatMask[j];
16030 if (0 != ((M % NumElts) / NumLaneElts))
16032 if (0 <= R && R != M)
16039 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
16040 if (!FindRepeatingBroadcastMask(RepeatMask))
16043 // Shuffle the (lowest) repeated elements in place for broadcast.
16044 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
16046 // Shuffle the actual broadcast.
16047 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
16048 for (int i = 0; i != NumElts; i += NumBroadcastElts)
16049 for (int j = 0; j != NumBroadcastElts; ++j)
16050 BroadcastMask[i + j] = j;
16051 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
16056 // Bail if the shuffle mask doesn't cross 128-bit lanes.
16057 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
16060 // Bail if we already have a repeated lane shuffle mask.
16061 SmallVector<int, 8> RepeatedShuffleMask;
16062 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
16065 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
16066 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
16067 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
16068 int NumSubLanes = NumLanes * SubLaneScale;
16069 int NumSubLaneElts = NumLaneElts / SubLaneScale;
16071 // Check that all the sources are coming from the same lane and see if we can
16072 // form a repeating shuffle mask (local to each sub-lane). At the same time,
16073 // determine the source sub-lane for each destination sub-lane.
16074 int TopSrcSubLane = -1;
16075 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
16076 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
16077 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
16078 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
16080 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
16081 // Extract the sub-lane mask, check that it all comes from the same lane
16082 // and normalize the mask entries to come from the first lane.
16084 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
16085 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
16086 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
16089 int Lane = (M % NumElts) / NumLaneElts;
16090 if ((0 <= SrcLane) && (SrcLane != Lane))
16093 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
16094 SubLaneMask[Elt] = LocalM;
16097 // Whole sub-lane is UNDEF.
16101 // Attempt to match against the candidate repeated sub-lane masks.
16102 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
16103 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
16104 for (int i = 0; i != NumSubLaneElts; ++i) {
16105 if (M1[i] < 0 || M2[i] < 0)
16107 if (M1[i] != M2[i])
16113 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
16114 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
16117 // Merge the sub-lane mask into the matching repeated sub-lane mask.
16118 for (int i = 0; i != NumSubLaneElts; ++i) {
16119 int M = SubLaneMask[i];
16122 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
16123 "Unexpected mask element");
16124 RepeatedSubLaneMask[i] = M;
16127 // Track the top most source sub-lane - by setting the remaining to UNDEF
16128 // we can greatly simplify shuffle matching.
16129 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
16130 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
16131 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
16135 // Bail if we failed to find a matching repeated sub-lane mask.
16136 if (Dst2SrcSubLanes[DstSubLane] < 0)
16139 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
16140 "Unexpected source lane");
16142 // Create a repeating shuffle mask for the entire vector.
16143 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
16144 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
16145 int Lane = SubLane / SubLaneScale;
16146 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
16147 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
16148 int M = RepeatedSubLaneMask[Elt];
16151 int Idx = (SubLane * NumSubLaneElts) + Elt;
16152 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
16155 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
16157 // Shuffle each source sub-lane to its destination.
16158 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
16159 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
16160 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
16161 if (SrcSubLane < 0)
16163 for (int j = 0; j != NumSubLaneElts; ++j)
16164 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
16167 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
16171 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
16172 bool &ForceV1Zero, bool &ForceV2Zero,
16173 unsigned &ShuffleImm, ArrayRef<int> Mask,
16174 const APInt &Zeroable) {
16175 int NumElts = VT.getVectorNumElements();
16176 assert(VT.getScalarSizeInBits() == 64 &&
16177 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
16178 "Unexpected data type for VSHUFPD");
16179 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
16180 "Illegal shuffle mask");
16182 bool ZeroLane[2] = { true, true };
16183 for (int i = 0; i < NumElts; ++i)
16184 ZeroLane[i & 1] &= Zeroable[i];
16186 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
16187 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
16189 bool ShufpdMask = true;
16190 bool CommutableMask = true;
16191 for (int i = 0; i < NumElts; ++i) {
16192 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
16196 int Val = (i & 6) + NumElts * (i & 1);
16197 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
16198 if (Mask[i] < Val || Mask[i] > Val + 1)
16199 ShufpdMask = false;
16200 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
16201 CommutableMask = false;
16202 ShuffleImm |= (Mask[i] % 2) << i;
16205 if (!ShufpdMask && !CommutableMask)
16208 if (!ShufpdMask && CommutableMask)
16211 ForceV1Zero = ZeroLane[0];
16212 ForceV2Zero = ZeroLane[1];
16216 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
16217 SDValue V2, ArrayRef<int> Mask,
16218 const APInt &Zeroable,
16219 const X86Subtarget &Subtarget,
16220 SelectionDAG &DAG) {
16221 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
16222 "Unexpected data type for VSHUFPD");
16224 unsigned Immediate = 0;
16225 bool ForceV1Zero = false, ForceV2Zero = false;
16226 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
16230 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
16232 V1 = getZeroVector(VT, Subtarget, DAG, DL);
16234 V2 = getZeroVector(VT, Subtarget, DAG, DL);
16236 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
16237 DAG.getTargetConstant(Immediate, DL, MVT::i8));
16240 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16241 // by zeroable elements in the remaining 24 elements. Turn this into two
16242 // vmovqb instructions shuffled together.
16243 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
16244 SDValue V1, SDValue V2,
16245 ArrayRef<int> Mask,
16246 const APInt &Zeroable,
16247 SelectionDAG &DAG) {
16248 assert(VT == MVT::v32i8 && "Unexpected type!");
16250 // The first 8 indices should be every 8th element.
16251 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
16254 // Remaining elements need to be zeroable.
16255 if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
16258 V1 = DAG.getBitcast(MVT::v4i64, V1);
16259 V2 = DAG.getBitcast(MVT::v4i64, V2);
16261 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
16262 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
16264 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
16265 // the upper bits of the result using an unpckldq.
16266 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
16267 { 0, 1, 2, 3, 16, 17, 18, 19,
16268 4, 5, 6, 7, 20, 21, 22, 23 });
16269 // Insert the unpckldq into a zero vector to widen to v32i8.
16270 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
16271 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
16272 DAG.getIntPtrConstant(0, DL));
16276 /// Handle lowering of 4-lane 64-bit floating point shuffles.
16278 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
16279 /// isn't available.
16280 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16281 const APInt &Zeroable, SDValue V1, SDValue V2,
16282 const X86Subtarget &Subtarget,
16283 SelectionDAG &DAG) {
16284 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
16285 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
16286 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
16288 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
16292 if (V2.isUndef()) {
16293 // Check for being able to broadcast a single element.
16294 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
16295 Mask, Subtarget, DAG))
16298 // Use low duplicate instructions for masks that match their pattern.
16299 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
16300 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
16302 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
16303 // Non-half-crossing single input shuffles can be lowered with an
16304 // interleaved permutation.
16305 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16306 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
16307 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
16308 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16311 // With AVX2 we have direct support for this permutation.
16312 if (Subtarget.hasAVX2())
16313 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
16314 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
16316 // Try to create an in-lane repeating shuffle mask and then shuffle the
16317 // results into the target lanes.
16318 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16319 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
16322 // Try to permute the lanes and then use a per-lane permute.
16323 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
16324 Mask, DAG, Subtarget))
16327 // Otherwise, fall back.
16328 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
16332 // Use dedicated unpack instructions for masks that match their pattern.
16333 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
16336 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
16337 Zeroable, Subtarget, DAG))
16340 // Check if the blend happens to exactly fit that of SHUFPD.
16341 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
16342 Zeroable, Subtarget, DAG))
16345 // If we have lane crossing shuffles AND they don't all come from the lower
16346 // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
16347 // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
16348 // canonicalize to a blend of splat which isn't necessary for this combine.
16349 if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
16350 !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
16351 (V1.getOpcode() != ISD::BUILD_VECTOR) &&
16352 (V2.getOpcode() != ISD::BUILD_VECTOR))
16353 if (SDValue Op = lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2,
16357 // If we have one input in place, then we can permute the other input and
16358 // blend the result.
16359 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16360 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
16363 // Try to create an in-lane repeating shuffle mask and then shuffle the
16364 // results into the target lanes.
16365 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16366 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
16369 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16370 // shuffle. However, if we have AVX2 and either inputs are already in place,
16371 // we will be able to shuffle even across lanes the other input in a single
16372 // instruction so skip this pattern.
16373 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
16374 isShuffleMaskInputInPlace(1, Mask))))
16375 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
16376 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
16379 // If we have VLX support, we can use VEXPAND.
16380 if (Subtarget.hasVLX())
16381 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
16385 // If we have AVX2 then we always want to lower with a blend because an v4 we
16386 // can fully permute the elements.
16387 if (Subtarget.hasAVX2())
16388 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
16391 // Otherwise fall back on generic lowering.
16392 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
16396 /// Handle lowering of 4-lane 64-bit integer shuffles.
16398 /// This routine is only called when we have AVX2 and thus a reasonable
16399 /// instruction set for v4i64 shuffling..
16400 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16401 const APInt &Zeroable, SDValue V1, SDValue V2,
16402 const X86Subtarget &Subtarget,
16403 SelectionDAG &DAG) {
16404 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
16405 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
16406 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
16407 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
16409 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
16413 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
16414 Zeroable, Subtarget, DAG))
16417 // Check for being able to broadcast a single element.
16418 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
16422 if (V2.isUndef()) {
16423 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16424 // can use lower latency instructions that will operate on both lanes.
16425 SmallVector<int, 2> RepeatedMask;
16426 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
16427 SmallVector<int, 4> PSHUFDMask;
16428 narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
16429 return DAG.getBitcast(
16431 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
16432 DAG.getBitcast(MVT::v8i32, V1),
16433 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16436 // AVX2 provides a direct instruction for permuting a single input across
16438 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
16439 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
16442 // Try to use shift instructions.
16443 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
16444 Zeroable, Subtarget, DAG))
16447 // If we have VLX support, we can use VALIGN or VEXPAND.
16448 if (Subtarget.hasVLX()) {
16449 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
16453 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
16458 // Try to use PALIGNR.
16459 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
16463 // Use dedicated unpack instructions for masks that match their pattern.
16464 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
16467 // If we have one input in place, then we can permute the other input and
16468 // blend the result.
16469 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16470 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16473 // Try to create an in-lane repeating shuffle mask and then shuffle the
16474 // results into the target lanes.
16475 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16476 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16479 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16480 // shuffle. However, if we have AVX2 and either inputs are already in place,
16481 // we will be able to shuffle even across lanes the other input in a single
16482 // instruction so skip this pattern.
16483 if (!isShuffleMaskInputInPlace(0, Mask) &&
16484 !isShuffleMaskInputInPlace(1, Mask))
16485 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16486 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16489 // Otherwise fall back on generic blend lowering.
16490 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16494 /// Handle lowering of 8-lane 32-bit floating point shuffles.
16496 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
16497 /// isn't available.
16498 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16499 const APInt &Zeroable, SDValue V1, SDValue V2,
16500 const X86Subtarget &Subtarget,
16501 SelectionDAG &DAG) {
16502 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16503 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16504 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16506 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
16507 Zeroable, Subtarget, DAG))
16510 // Check for being able to broadcast a single element.
16511 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
16515 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16516 // options to efficiently lower the shuffle.
16517 SmallVector<int, 4> RepeatedMask;
16518 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
16519 assert(RepeatedMask.size() == 4 &&
16520 "Repeated masks must be half the mask width!");
16522 // Use even/odd duplicate instructions for masks that match their pattern.
16523 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16524 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
16525 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16526 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
16529 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
16530 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16532 // Use dedicated unpack instructions for masks that match their pattern.
16533 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
16536 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
16537 // have already handled any direct blends.
16538 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
16541 // Try to create an in-lane repeating shuffle mask and then shuffle the
16542 // results into the target lanes.
16543 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16544 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16547 // If we have a single input shuffle with different shuffle patterns in the
16548 // two 128-bit lanes use the variable mask to VPERMILPS.
16549 if (V2.isUndef()) {
16550 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
16551 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16552 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
16554 if (Subtarget.hasAVX2()) {
16555 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16556 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16558 // Otherwise, fall back.
16559 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16563 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16565 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16566 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16569 // If we have VLX support, we can use VEXPAND.
16570 if (Subtarget.hasVLX())
16571 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16575 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16576 // since after split we get a more efficient code using vpunpcklwd and
16577 // vpunpckhwd instrs than vblend.
16578 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
16579 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16583 // If we have AVX2 then we always want to lower with a blend because at v8 we
16584 // can fully permute the elements.
16585 if (Subtarget.hasAVX2())
16586 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
16589 // Otherwise fall back on generic lowering.
16590 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16594 /// Handle lowering of 8-lane 32-bit integer shuffles.
16596 /// This routine is only called when we have AVX2 and thus a reasonable
16597 /// instruction set for v8i32 shuffling..
16598 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16599 const APInt &Zeroable, SDValue V1, SDValue V2,
16600 const X86Subtarget &Subtarget,
16601 SelectionDAG &DAG) {
16602 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16603 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16604 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16605 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16607 // Whenever we can lower this as a zext, that instruction is strictly faster
16608 // than any alternative. It also allows us to fold memory operands into the
16609 // shuffle in many cases.
16610 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16611 Zeroable, Subtarget, DAG))
16614 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16615 // since after split we get a more efficient code than vblend by using
16616 // vpunpcklwd and vpunpckhwd instrs.
16617 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
16618 !Subtarget.hasAVX512())
16619 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
16623 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16624 Zeroable, Subtarget, DAG))
16627 // Check for being able to broadcast a single element.
16628 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16632 // If the shuffle mask is repeated in each 128-bit lane we can use more
16633 // efficient instructions that mirror the shuffles across the two 128-bit
16635 SmallVector<int, 4> RepeatedMask;
16636 bool Is128BitLaneRepeatedShuffle =
16637 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16638 if (Is128BitLaneRepeatedShuffle) {
16639 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16641 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16642 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16644 // Use dedicated unpack instructions for masks that match their pattern.
16645 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16649 // Try to use shift instructions.
16650 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
16651 Zeroable, Subtarget, DAG))
16654 // If we have VLX support, we can use VALIGN or EXPAND.
16655 if (Subtarget.hasVLX()) {
16656 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
16660 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16665 // Try to use byte rotation instructions.
16666 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16670 // Try to create an in-lane repeating shuffle mask and then shuffle the
16671 // results into the target lanes.
16672 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16673 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16676 if (V2.isUndef()) {
16677 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16678 // because that should be faster than the variable permute alternatives.
16679 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
16682 // If the shuffle patterns aren't repeated but it's a single input, directly
16683 // generate a cross-lane VPERMD instruction.
16684 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16685 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16688 // Assume that a single SHUFPS is faster than an alternative sequence of
16689 // multiple instructions (even if the CPU has a domain penalty).
16690 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16691 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16692 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16693 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16694 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16695 CastV1, CastV2, DAG);
16696 return DAG.getBitcast(MVT::v8i32, ShufPS);
16699 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16701 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16702 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16705 // Otherwise fall back on generic blend lowering.
16706 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
16710 /// Handle lowering of 16-lane 16-bit integer shuffles.
16712 /// This routine is only called when we have AVX2 and thus a reasonable
16713 /// instruction set for v16i16 shuffling..
16714 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16715 const APInt &Zeroable, SDValue V1, SDValue V2,
16716 const X86Subtarget &Subtarget,
16717 SelectionDAG &DAG) {
16718 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16719 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16720 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16721 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16723 // Whenever we can lower this as a zext, that instruction is strictly faster
16724 // than any alternative. It also allows us to fold memory operands into the
16725 // shuffle in many cases.
16726 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16727 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16730 // Check for being able to broadcast a single element.
16731 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16735 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16736 Zeroable, Subtarget, DAG))
16739 // Use dedicated unpack instructions for masks that match their pattern.
16740 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16743 // Use dedicated pack instructions for masks that match their pattern.
16744 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16748 // Try to use shift instructions.
16749 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16750 Zeroable, Subtarget, DAG))
16753 // Try to use byte rotation instructions.
16754 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16758 // Try to create an in-lane repeating shuffle mask and then shuffle the
16759 // results into the target lanes.
16760 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16761 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16764 if (V2.isUndef()) {
16765 // Try to use bit rotation instructions.
16766 if (SDValue Rotate =
16767 lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
16770 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16771 // because that should be faster than the variable permute alternatives.
16772 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
16775 // There are no generalized cross-lane shuffle operations available on i16
16777 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16778 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16779 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16782 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16786 SmallVector<int, 8> RepeatedMask;
16787 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16788 // As this is a single-input shuffle, the repeated mask should be
16789 // a strictly valid v8i16 mask that we can pass through to the v8i16
16790 // lowering to handle even the v16 case.
16791 return lowerV8I16GeneralSingleInputShuffle(
16792 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16796 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16797 Zeroable, Subtarget, DAG))
16800 // AVX512BWVL can lower to VPERMW.
16801 if (Subtarget.hasBWI() && Subtarget.hasVLX())
16802 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16804 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16806 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16807 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16810 // Try to permute the lanes and then use a per-lane permute.
16811 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16812 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16815 // Otherwise fall back on generic lowering.
16816 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16820 /// Handle lowering of 32-lane 8-bit integer shuffles.
16822 /// This routine is only called when we have AVX2 and thus a reasonable
16823 /// instruction set for v32i8 shuffling..
16824 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16825 const APInt &Zeroable, SDValue V1, SDValue V2,
16826 const X86Subtarget &Subtarget,
16827 SelectionDAG &DAG) {
16828 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16829 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16830 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16831 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16833 // Whenever we can lower this as a zext, that instruction is strictly faster
16834 // than any alternative. It also allows us to fold memory operands into the
16835 // shuffle in many cases.
16836 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16837 Zeroable, Subtarget, DAG))
16840 // Check for being able to broadcast a single element.
16841 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16845 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16846 Zeroable, Subtarget, DAG))
16849 // Use dedicated unpack instructions for masks that match their pattern.
16850 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16853 // Use dedicated pack instructions for masks that match their pattern.
16854 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16858 // Try to use shift instructions.
16859 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16860 Zeroable, Subtarget, DAG))
16863 // Try to use byte rotation instructions.
16864 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16868 // Try to use bit rotation instructions.
16870 if (SDValue Rotate =
16871 lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
16874 // Try to create an in-lane repeating shuffle mask and then shuffle the
16875 // results into the target lanes.
16876 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16877 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16880 // There are no generalized cross-lane shuffle operations available on i8
16882 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16883 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16884 // because that should be faster than the variable permute alternatives.
16885 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
16888 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16889 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16892 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16896 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16897 Zeroable, Subtarget, DAG))
16900 // AVX512VBMIVL can lower to VPERMB.
16901 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16902 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16904 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16906 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16907 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16910 // Try to permute the lanes and then use a per-lane permute.
16911 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16912 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16915 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16916 // by zeroable elements in the remaining 24 elements. Turn this into two
16917 // vmovqb instructions shuffled together.
16918 if (Subtarget.hasVLX())
16919 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16920 Mask, Zeroable, DAG))
16923 // Otherwise fall back on generic lowering.
16924 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16928 /// High-level routine to lower various 256-bit x86 vector shuffles.
16930 /// This routine either breaks down the specific type of a 256-bit x86 vector
16931 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16932 /// together based on the available instructions.
16933 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16934 SDValue V1, SDValue V2, const APInt &Zeroable,
16935 const X86Subtarget &Subtarget,
16936 SelectionDAG &DAG) {
16937 // If we have a single input to the zero element, insert that into V1 if we
16938 // can do so cheaply.
16939 int NumElts = VT.getVectorNumElements();
16940 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16942 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16943 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16944 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16947 // Handle special cases where the lower or upper half is UNDEF.
16949 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16952 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16953 // can check for those subtargets here and avoid much of the subtarget
16954 // querying in the per-vector-type lowering routines. With AVX1 we have
16955 // essentially *zero* ability to manipulate a 256-bit vector with integer
16956 // types. Since we'll use floating point types there eventually, just
16957 // immediately cast everything to a float and operate entirely in that domain.
16958 if (VT.isInteger() && !Subtarget.hasAVX2()) {
16959 int ElementBits = VT.getScalarSizeInBits();
16960 if (ElementBits < 32) {
16961 // No floating point type available, if we can't use the bit operations
16962 // for masking/blending then decompose into 128-bit vectors.
16963 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16966 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16968 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16971 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16972 VT.getVectorNumElements());
16973 V1 = DAG.getBitcast(FpVT, V1);
16974 V2 = DAG.getBitcast(FpVT, V2);
16975 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16978 switch (VT.SimpleTy) {
16980 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16982 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16984 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16986 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16988 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16990 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16993 llvm_unreachable("Not a valid 256-bit x86 vector type!");
16997 /// Try to lower a vector shuffle as a 128-bit shuffles.
16998 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16999 const APInt &Zeroable, SDValue V1, SDValue V2,
17000 const X86Subtarget &Subtarget,
17001 SelectionDAG &DAG) {
17002 assert(VT.getScalarSizeInBits() == 64 &&
17003 "Unexpected element type size for 128bit shuffle.");
17005 // To handle 256 bit vector requires VLX and most probably
17006 // function lowerV2X128VectorShuffle() is better solution.
17007 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
17009 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
17010 SmallVector<int, 4> Widened128Mask;
17011 if (!canWidenShuffleElements(Mask, Widened128Mask))
17013 assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
17015 // Try to use an insert into a zero vector.
17016 if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
17017 (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
17018 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
17019 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
17020 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
17021 DAG.getIntPtrConstant(0, DL));
17022 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17023 getZeroVector(VT, Subtarget, DAG, DL), LoV,
17024 DAG.getIntPtrConstant(0, DL));
17027 // Check for patterns which can be matched with a single insert of a 256-bit
17029 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 2, 3, 0, 1, 2, 3});
17031 isShuffleEquivalent(V1, V2, Mask, {0, 1, 2, 3, 8, 9, 10, 11})) {
17032 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
17034 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
17035 DAG.getIntPtrConstant(0, DL));
17036 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
17037 DAG.getIntPtrConstant(4, DL));
17040 // See if this is an insertion of the lower 128-bits of V2 into V1.
17041 bool IsInsert = true;
17043 for (int i = 0; i < 4; ++i) {
17044 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
17045 if (Widened128Mask[i] < 0)
17048 // Make sure all V1 subvectors are in place.
17049 if (Widened128Mask[i] < 4) {
17050 if (Widened128Mask[i] != i) {
17055 // Make sure we only have a single V2 index and its the lowest 128-bits.
17056 if (V2Index >= 0 || Widened128Mask[i] != 4) {
17063 if (IsInsert && V2Index >= 0) {
17064 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17065 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
17066 DAG.getIntPtrConstant(0, DL));
17067 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
17070 // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
17071 // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
17072 // possible we at least ensure the lanes stay sequential to help later
17074 SmallVector<int, 2> Widened256Mask;
17075 if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
17076 Widened128Mask.clear();
17077 narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
17080 // Try to lower to vshuf64x2/vshuf32x4.
17081 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
17082 unsigned PermMask = 0;
17083 // Insure elements came from the same Op.
17084 for (int i = 0; i < 4; ++i) {
17085 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
17086 if (Widened128Mask[i] < 0)
17089 SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
17090 unsigned OpIndex = i / 2;
17091 if (Ops[OpIndex].isUndef())
17093 else if (Ops[OpIndex] != Op)
17096 // Convert the 128-bit shuffle mask selection values into 128-bit selection
17097 // bits defined by a vshuf64x2 instruction's immediate control byte.
17098 PermMask |= (Widened128Mask[i] % 4) << (i * 2);
17101 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
17102 DAG.getTargetConstant(PermMask, DL, MVT::i8));
17105 /// Handle lowering of 8-lane 64-bit floating point shuffles.
17106 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17107 const APInt &Zeroable, SDValue V1, SDValue V2,
17108 const X86Subtarget &Subtarget,
17109 SelectionDAG &DAG) {
17110 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
17111 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
17112 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
17114 if (V2.isUndef()) {
17115 // Use low duplicate instructions for masks that match their pattern.
17116 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
17117 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
17119 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
17120 // Non-half-crossing single input shuffles can be lowered with an
17121 // interleaved permutation.
17122 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
17123 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
17124 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
17125 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
17126 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
17127 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
17130 SmallVector<int, 4> RepeatedMask;
17131 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
17132 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
17133 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17136 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
17137 V2, Subtarget, DAG))
17140 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
17143 // Check if the blend happens to exactly fit that of SHUFPD.
17144 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
17145 Zeroable, Subtarget, DAG))
17148 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
17152 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
17153 Zeroable, Subtarget, DAG))
17156 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
17159 /// Handle lowering of 16-lane 32-bit floating point shuffles.
17160 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17161 const APInt &Zeroable, SDValue V1, SDValue V2,
17162 const X86Subtarget &Subtarget,
17163 SelectionDAG &DAG) {
17164 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
17165 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
17166 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
17168 // If the shuffle mask is repeated in each 128-bit lane, we have many more
17169 // options to efficiently lower the shuffle.
17170 SmallVector<int, 4> RepeatedMask;
17171 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
17172 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
17174 // Use even/odd duplicate instructions for masks that match their pattern.
17175 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
17176 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
17177 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
17178 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
17181 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
17182 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17184 // Use dedicated unpack instructions for masks that match their pattern.
17185 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
17188 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
17189 Zeroable, Subtarget, DAG))
17192 // Otherwise, fall back to a SHUFPS sequence.
17193 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
17196 // Try to create an in-lane repeating shuffle mask and then shuffle the
17197 // results into the target lanes.
17198 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17199 DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
17202 // If we have a single input shuffle with different shuffle patterns in the
17203 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
17204 if (V2.isUndef() &&
17205 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
17206 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
17207 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
17210 // If we have AVX512F support, we can use VEXPAND.
17211 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
17212 V1, V2, DAG, Subtarget))
17215 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
17218 /// Handle lowering of 8-lane 64-bit integer shuffles.
17219 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17220 const APInt &Zeroable, SDValue V1, SDValue V2,
17221 const X86Subtarget &Subtarget,
17222 SelectionDAG &DAG) {
17223 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
17224 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
17225 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
17227 if (V2.isUndef()) {
17228 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
17229 // can use lower latency instructions that will operate on all four
17231 SmallVector<int, 2> Repeated128Mask;
17232 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
17233 SmallVector<int, 4> PSHUFDMask;
17234 narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
17235 return DAG.getBitcast(
17237 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
17238 DAG.getBitcast(MVT::v16i32, V1),
17239 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
17242 SmallVector<int, 4> Repeated256Mask;
17243 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
17244 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
17245 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
17248 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
17249 V2, Subtarget, DAG))
17252 // Try to use shift instructions.
17253 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
17254 Zeroable, Subtarget, DAG))
17257 // Try to use VALIGN.
17258 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
17262 // Try to use PALIGNR.
17263 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
17267 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
17269 // If we have AVX512F support, we can use VEXPAND.
17270 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
17274 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
17275 Zeroable, Subtarget, DAG))
17278 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
17281 /// Handle lowering of 16-lane 32-bit integer shuffles.
17282 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17283 const APInt &Zeroable, SDValue V1, SDValue V2,
17284 const X86Subtarget &Subtarget,
17285 SelectionDAG &DAG) {
17286 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
17287 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
17288 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
17290 // Whenever we can lower this as a zext, that instruction is strictly faster
17291 // than any alternative. It also allows us to fold memory operands into the
17292 // shuffle in many cases.
17293 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
17294 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
17297 // If the shuffle mask is repeated in each 128-bit lane we can use more
17298 // efficient instructions that mirror the shuffles across the four 128-bit
17300 SmallVector<int, 4> RepeatedMask;
17301 bool Is128BitLaneRepeatedShuffle =
17302 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
17303 if (Is128BitLaneRepeatedShuffle) {
17304 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
17306 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
17307 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17309 // Use dedicated unpack instructions for masks that match their pattern.
17310 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
17314 // Try to use shift instructions.
17315 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
17316 Zeroable, Subtarget, DAG))
17319 // Try to use VALIGN.
17320 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
17324 // Try to use byte rotation instructions.
17325 if (Subtarget.hasBWI())
17326 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
17330 // Assume that a single SHUFPS is faster than using a permv shuffle.
17331 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
17332 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
17333 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
17334 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
17335 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
17336 CastV1, CastV2, DAG);
17337 return DAG.getBitcast(MVT::v16i32, ShufPS);
17340 // Try to create an in-lane repeating shuffle mask and then shuffle the
17341 // results into the target lanes.
17342 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17343 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
17346 // If we have AVX512F support, we can use VEXPAND.
17347 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
17351 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
17352 Zeroable, Subtarget, DAG))
17355 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
17358 /// Handle lowering of 32-lane 16-bit integer shuffles.
17359 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17360 const APInt &Zeroable, SDValue V1, SDValue V2,
17361 const X86Subtarget &Subtarget,
17362 SelectionDAG &DAG) {
17363 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
17364 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
17365 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
17366 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
17368 // Whenever we can lower this as a zext, that instruction is strictly faster
17369 // than any alternative. It also allows us to fold memory operands into the
17370 // shuffle in many cases.
17371 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
17372 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
17375 // Use dedicated unpack instructions for masks that match their pattern.
17376 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
17379 // Use dedicated pack instructions for masks that match their pattern.
17381 lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
17384 // Try to use shift instructions.
17385 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
17386 Zeroable, Subtarget, DAG))
17389 // Try to use byte rotation instructions.
17390 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
17394 if (V2.isUndef()) {
17395 // Try to use bit rotation instructions.
17396 if (SDValue Rotate =
17397 lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
17400 SmallVector<int, 8> RepeatedMask;
17401 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
17402 // As this is a single-input shuffle, the repeated mask should be
17403 // a strictly valid v8i16 mask that we can pass through to the v8i16
17404 // lowering to handle even the v32 case.
17405 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
17406 RepeatedMask, Subtarget, DAG);
17410 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
17411 Zeroable, Subtarget, DAG))
17414 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
17415 Zeroable, Subtarget, DAG))
17418 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
17421 /// Handle lowering of 64-lane 8-bit integer shuffles.
17422 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17423 const APInt &Zeroable, SDValue V1, SDValue V2,
17424 const X86Subtarget &Subtarget,
17425 SelectionDAG &DAG) {
17426 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
17427 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
17428 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
17429 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
17431 // Whenever we can lower this as a zext, that instruction is strictly faster
17432 // than any alternative. It also allows us to fold memory operands into the
17433 // shuffle in many cases.
17434 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
17435 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
17438 // Use dedicated unpack instructions for masks that match their pattern.
17439 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
17442 // Use dedicated pack instructions for masks that match their pattern.
17443 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
17447 // Try to use shift instructions.
17448 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
17449 Zeroable, Subtarget, DAG))
17452 // Try to use byte rotation instructions.
17453 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
17457 // Try to use bit rotation instructions.
17459 if (SDValue Rotate =
17460 lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
17463 // Lower as AND if possible.
17464 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
17465 Zeroable, Subtarget, DAG))
17468 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
17469 Zeroable, Subtarget, DAG))
17472 // VBMI can use VPERMV/VPERMV3 byte shuffles.
17473 if (Subtarget.hasVBMI())
17474 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
17476 // Try to create an in-lane repeating shuffle mask and then shuffle the
17477 // results into the target lanes.
17478 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17479 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17482 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
17483 Zeroable, Subtarget, DAG))
17486 // Try to simplify this by merging 128-bit lanes to enable a lane-based
17489 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
17490 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17493 // FIXME: Implement direct support for this type!
17494 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
17497 /// High-level routine to lower various 512-bit x86 vector shuffles.
17499 /// This routine either breaks down the specific type of a 512-bit x86 vector
17500 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
17501 /// together based on the available instructions.
17502 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17503 MVT VT, SDValue V1, SDValue V2,
17504 const APInt &Zeroable,
17505 const X86Subtarget &Subtarget,
17506 SelectionDAG &DAG) {
17507 assert(Subtarget.hasAVX512() &&
17508 "Cannot lower 512-bit vectors w/ basic ISA!");
17510 // If we have a single input to the zero element, insert that into V1 if we
17511 // can do so cheaply.
17512 int NumElts = Mask.size();
17513 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17515 if (NumV2Elements == 1 && Mask[0] >= NumElts)
17516 if (SDValue Insertion = lowerShuffleAsElementInsertion(
17517 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
17520 // Handle special cases where the lower or upper half is UNDEF.
17522 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
17525 // Check for being able to broadcast a single element.
17526 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
17530 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
17531 // Try using bit ops for masking and blending before falling back to
17533 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
17536 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
17539 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
17542 // Dispatch to each element type for lowering. If we don't have support for
17543 // specific element type shuffles at 512 bits, immediately split them and
17544 // lower them. Each lowering routine of a given type is allowed to assume that
17545 // the requisite ISA extensions for that element type are available.
17546 switch (VT.SimpleTy) {
17548 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17550 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17552 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17554 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17556 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17558 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17561 llvm_unreachable("Not a valid 512-bit x86 vector type!");
17565 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17566 MVT VT, SDValue V1, SDValue V2,
17567 const X86Subtarget &Subtarget,
17568 SelectionDAG &DAG) {
17569 // Shuffle should be unary.
17574 int NumElts = Mask.size();
17575 for (int i = 0; i != NumElts; ++i) {
17577 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17578 "Unexpected mask index.");
17582 // The first non-undef element determines our shift amount.
17583 if (ShiftAmt < 0) {
17585 // Need to be shifting right.
17589 // All non-undef elements must shift by the same amount.
17590 if (ShiftAmt != M - i)
17593 assert(ShiftAmt >= 0 && "All undef?");
17595 // Great we found a shift right.
17597 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17598 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17599 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17600 DAG.getUNDEF(WideVT), V1,
17601 DAG.getIntPtrConstant(0, DL));
17602 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
17603 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17604 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17605 DAG.getIntPtrConstant(0, DL));
17608 // Determine if this shuffle can be implemented with a KSHIFT instruction.
17609 // Returns the shift amount if possible or -1 if not. This is a simplified
17610 // version of matchShuffleAsShift.
17611 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17612 int MaskOffset, const APInt &Zeroable) {
17613 int Size = Mask.size();
17615 auto CheckZeros = [&](int Shift, bool Left) {
17616 for (int j = 0; j < Shift; ++j)
17617 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17623 auto MatchShift = [&](int Shift, bool Left) {
17624 unsigned Pos = Left ? Shift : 0;
17625 unsigned Low = Left ? 0 : Shift;
17626 unsigned Len = Size - Shift;
17627 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17630 for (int Shift = 1; Shift != Size; ++Shift)
17631 for (bool Left : {true, false})
17632 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17633 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17641 // Lower vXi1 vector shuffles.
17642 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
17643 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
17644 // vector, shuffle and then truncate it back.
17645 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17646 MVT VT, SDValue V1, SDValue V2,
17647 const APInt &Zeroable,
17648 const X86Subtarget &Subtarget,
17649 SelectionDAG &DAG) {
17650 assert(Subtarget.hasAVX512() &&
17651 "Cannot lower 512-bit vectors w/o basic ISA!");
17653 int NumElts = Mask.size();
17655 // Try to recognize shuffles that are just padding a subvector with zeros.
17656 int SubvecElts = 0;
17658 for (int i = 0; i != NumElts; ++i) {
17659 if (Mask[i] >= 0) {
17660 // Grab the source from the first valid mask. All subsequent elements need
17661 // to use this same source.
17663 Src = Mask[i] / NumElts;
17664 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17670 assert(SubvecElts != NumElts && "Identity shuffle?");
17672 // Clip to a power 2.
17673 SubvecElts = PowerOf2Floor(SubvecElts);
17675 // Make sure the number of zeroable bits in the top at least covers the bits
17676 // not covered by the subvector.
17677 if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
17678 assert(Src >= 0 && "Expected a source!");
17679 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17680 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17681 Src == 0 ? V1 : V2,
17682 DAG.getIntPtrConstant(0, DL));
17683 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17684 DAG.getConstant(0, DL, VT),
17685 Extract, DAG.getIntPtrConstant(0, DL));
17688 // Try a simple shift right with undef elements. Later we'll try with zeros.
17689 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17693 // Try to match KSHIFTs.
17694 unsigned Offset = 0;
17695 for (SDValue V : { V1, V2 }) {
17697 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17698 if (ShiftAmt >= 0) {
17700 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17701 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17702 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17703 DAG.getUNDEF(WideVT), V,
17704 DAG.getIntPtrConstant(0, DL));
17705 // Widened right shifts need two shifts to ensure we shift in zeroes.
17706 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17707 int WideElts = WideVT.getVectorNumElements();
17708 // Shift left to put the original vector in the MSBs of the new size.
17709 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17710 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17711 // Increase the shift amount to account for the left shift.
17712 ShiftAmt += WideElts - NumElts;
17715 Res = DAG.getNode(Opcode, DL, WideVT, Res,
17716 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17717 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17718 DAG.getIntPtrConstant(0, DL));
17720 Offset += NumElts; // Increment for next iteration.
17726 switch (VT.SimpleTy) {
17728 llvm_unreachable("Expected a vector of i1 elements");
17730 ExtVT = MVT::v2i64;
17733 ExtVT = MVT::v4i32;
17736 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17738 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17741 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17742 // 256-bit operation available.
17743 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17746 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17747 // 256-bit operation available.
17748 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17749 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17752 // Fall back to scalarization. FIXME: We can do better if the shuffle
17753 // can be partitioned cleanly.
17754 if (!Subtarget.useBWIRegs())
17756 ExtVT = MVT::v64i8;
17760 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17761 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17763 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17764 // i1 was sign extended we can use X86ISD::CVT2MASK.
17765 int NumElems = VT.getVectorNumElements();
17766 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17767 (Subtarget.hasDQI() && (NumElems < 32)))
17768 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17769 Shuffle, ISD::SETGT);
17771 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17774 /// Helper function that returns true if the shuffle mask should be
17775 /// commuted to improve canonicalization.
17776 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17777 int NumElements = Mask.size();
17779 int NumV1Elements = 0, NumV2Elements = 0;
17783 else if (M < NumElements)
17788 // Commute the shuffle as needed such that more elements come from V1 than
17789 // V2. This allows us to match the shuffle pattern strictly on how many
17790 // elements come from V1 without handling the symmetric cases.
17791 if (NumV2Elements > NumV1Elements)
17794 assert(NumV1Elements > 0 && "No V1 indices");
17796 if (NumV2Elements == 0)
17799 // When the number of V1 and V2 elements are the same, try to minimize the
17800 // number of uses of V2 in the low half of the vector. When that is tied,
17801 // ensure that the sum of indices for V1 is equal to or lower than the sum
17802 // indices for V2. When those are equal, try to ensure that the number of odd
17803 // indices for V1 is lower than the number of odd indices for V2.
17804 if (NumV1Elements == NumV2Elements) {
17805 int LowV1Elements = 0, LowV2Elements = 0;
17806 for (int M : Mask.slice(0, NumElements / 2))
17807 if (M >= NumElements)
17811 if (LowV2Elements > LowV1Elements)
17813 if (LowV2Elements == LowV1Elements) {
17814 int SumV1Indices = 0, SumV2Indices = 0;
17815 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17816 if (Mask[i] >= NumElements)
17818 else if (Mask[i] >= 0)
17820 if (SumV2Indices < SumV1Indices)
17822 if (SumV2Indices == SumV1Indices) {
17823 int NumV1OddIndices = 0, NumV2OddIndices = 0;
17824 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17825 if (Mask[i] >= NumElements)
17826 NumV2OddIndices += i % 2;
17827 else if (Mask[i] >= 0)
17828 NumV1OddIndices += i % 2;
17829 if (NumV2OddIndices < NumV1OddIndices)
17838 /// Top-level lowering for x86 vector shuffles.
17840 /// This handles decomposition, canonicalization, and lowering of all x86
17841 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17842 /// above in helper routines. The canonicalization attempts to widen shuffles
17843 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17844 /// s.t. only one of the two inputs needs to be tested, etc.
17845 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17846 SelectionDAG &DAG) {
17847 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17848 ArrayRef<int> OrigMask = SVOp->getMask();
17849 SDValue V1 = Op.getOperand(0);
17850 SDValue V2 = Op.getOperand(1);
17851 MVT VT = Op.getSimpleValueType();
17852 int NumElements = VT.getVectorNumElements();
17854 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17856 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17857 "Can't lower MMX shuffles");
17859 bool V1IsUndef = V1.isUndef();
17860 bool V2IsUndef = V2.isUndef();
17861 if (V1IsUndef && V2IsUndef)
17862 return DAG.getUNDEF(VT);
17864 // When we create a shuffle node we put the UNDEF node to second operand,
17865 // but in some cases the first operand may be transformed to UNDEF.
17866 // In this case we should just commute the node.
17868 return DAG.getCommutedVectorShuffle(*SVOp);
17870 // Check for non-undef masks pointing at an undef vector and make the masks
17871 // undef as well. This makes it easier to match the shuffle based solely on
17874 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17875 SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17876 for (int &M : NewMask)
17877 if (M >= NumElements)
17879 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17882 // Check for illegal shuffle mask element index values.
17883 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17884 (void)MaskUpperLimit;
17885 assert(llvm::all_of(OrigMask,
17886 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17887 "Out of bounds shuffle index");
17889 // We actually see shuffles that are entirely re-arrangements of a set of
17890 // zero inputs. This mostly happens while decomposing complex shuffles into
17891 // simple ones. Directly lower these as a buildvector of zeros.
17892 APInt KnownUndef, KnownZero;
17893 computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17895 APInt Zeroable = KnownUndef | KnownZero;
17896 if (Zeroable.isAllOnesValue())
17897 return getZeroVector(VT, Subtarget, DAG, DL);
17899 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17901 // Try to collapse shuffles into using a vector type with fewer elements but
17902 // wider element types. We cap this to not form integers or floating point
17903 // elements wider than 64 bits, but it might be interesting to form i128
17904 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17905 SmallVector<int, 16> WidenedMask;
17906 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17907 canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17908 // Shuffle mask widening should not interfere with a broadcast opportunity
17909 // by obfuscating the operands with bitcasts.
17910 // TODO: Avoid lowering directly from this top-level function: make this
17911 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17912 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17916 MVT NewEltVT = VT.isFloatingPoint()
17917 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17918 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17919 int NewNumElts = NumElements / 2;
17920 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17921 // Make sure that the new vector type is legal. For example, v2f64 isn't
17923 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17925 // Modify the new Mask to take all zeros from the all-zero vector.
17926 // Choose indices that are blend-friendly.
17927 bool UsedZeroVector = false;
17928 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
17929 "V2's non-undef elements are used?!");
17930 for (int i = 0; i != NewNumElts; ++i)
17931 if (WidenedMask[i] == SM_SentinelZero) {
17932 WidenedMask[i] = i + NewNumElts;
17933 UsedZeroVector = true;
17935 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17936 // some elements to be undef.
17937 if (UsedZeroVector)
17938 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17940 V1 = DAG.getBitcast(NewVT, V1);
17941 V2 = DAG.getBitcast(NewVT, V2);
17942 return DAG.getBitcast(
17943 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17947 // Commute the shuffle if it will improve canonicalization.
17948 SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17949 if (canonicalizeShuffleMaskWithCommute(Mask)) {
17950 ShuffleVectorSDNode::commuteMask(Mask);
17954 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17957 // For each vector width, delegate to a specialized lowering routine.
17958 if (VT.is128BitVector())
17959 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17961 if (VT.is256BitVector())
17962 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17964 if (VT.is512BitVector())
17965 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17968 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17970 llvm_unreachable("Unimplemented!");
17973 /// Try to lower a VSELECT instruction to a vector shuffle.
17974 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17975 const X86Subtarget &Subtarget,
17976 SelectionDAG &DAG) {
17977 SDValue Cond = Op.getOperand(0);
17978 SDValue LHS = Op.getOperand(1);
17979 SDValue RHS = Op.getOperand(2);
17980 MVT VT = Op.getSimpleValueType();
17982 // Only non-legal VSELECTs reach this lowering, convert those into generic
17983 // shuffles and re-use the shuffle lowering path for blends.
17984 SmallVector<int, 32> Mask;
17985 if (createShuffleMaskFromVSELECT(Mask, Cond))
17986 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17991 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17992 SDValue Cond = Op.getOperand(0);
17993 SDValue LHS = Op.getOperand(1);
17994 SDValue RHS = Op.getOperand(2);
17996 // A vselect where all conditions and data are constants can be optimized into
17997 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17998 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17999 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
18000 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
18003 // Try to lower this to a blend-style vector shuffle. This can handle all
18004 // constant condition cases.
18005 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
18008 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
18009 // with patterns on the mask registers on AVX-512.
18010 MVT CondVT = Cond.getSimpleValueType();
18011 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
18012 if (CondEltSize == 1)
18015 // Variable blends are only legal from SSE4.1 onward.
18016 if (!Subtarget.hasSSE41())
18020 MVT VT = Op.getSimpleValueType();
18021 unsigned EltSize = VT.getScalarSizeInBits();
18022 unsigned NumElts = VT.getVectorNumElements();
18024 // Expand v32i16/v64i8 without BWI.
18025 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
18028 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
18029 // into an i1 condition so that we can use the mask-based 512-bit blend
18031 if (VT.getSizeInBits() == 512) {
18032 // Build a mask by testing the condition against zero.
18033 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
18034 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
18035 DAG.getConstant(0, dl, CondVT),
18037 // Now return a new VSELECT using the mask.
18038 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
18041 // SEXT/TRUNC cases where the mask doesn't match the destination size.
18042 if (CondEltSize != EltSize) {
18043 // If we don't have a sign splat, rely on the expansion.
18044 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
18047 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
18048 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
18049 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
18050 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
18053 // Only some types will be legal on some subtargets. If we can emit a legal
18054 // VSELECT-matching blend, return Op, and but if we need to expand, return
18056 switch (VT.SimpleTy) {
18058 // Most of the vector types have blends past SSE4.1.
18062 // The byte blends for AVX vectors were introduced only in AVX2.
18063 if (Subtarget.hasAVX2())
18069 case MVT::v16i16: {
18070 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
18071 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
18072 Cond = DAG.getBitcast(CastVT, Cond);
18073 LHS = DAG.getBitcast(CastVT, LHS);
18074 RHS = DAG.getBitcast(CastVT, RHS);
18075 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
18076 return DAG.getBitcast(VT, Select);
18081 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
18082 MVT VT = Op.getSimpleValueType();
18083 SDValue Vec = Op.getOperand(0);
18084 SDValue Idx = Op.getOperand(1);
18085 assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
18088 if (!Vec.getSimpleValueType().is128BitVector())
18091 if (VT.getSizeInBits() == 8) {
18092 // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
18093 // we're going to zero extend the register or fold the store.
18094 if (llvm::isNullConstant(Idx) && !MayFoldIntoZeroExtend(Op) &&
18095 !MayFoldIntoStore(Op))
18096 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
18097 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18098 DAG.getBitcast(MVT::v4i32, Vec), Idx));
18100 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec, Idx);
18101 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
18104 if (VT == MVT::f32) {
18105 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
18106 // the result back to FR32 register. It's only worth matching if the
18107 // result has a single use which is a store or a bitcast to i32. And in
18108 // the case of a store, it's not worth it if the index is a constant 0,
18109 // because a MOVSSmr can be used instead, which is smaller and faster.
18110 if (!Op.hasOneUse())
18112 SDNode *User = *Op.getNode()->use_begin();
18113 if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
18114 (User->getOpcode() != ISD::BITCAST ||
18115 User->getValueType(0) != MVT::i32))
18117 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18118 DAG.getBitcast(MVT::v4i32, Vec), Idx);
18119 return DAG.getBitcast(MVT::f32, Extract);
18122 if (VT == MVT::i32 || VT == MVT::i64)
18128 /// Extract one bit from mask vector, like v16i1 or v8i1.
18129 /// AVX-512 feature.
18130 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
18131 const X86Subtarget &Subtarget) {
18132 SDValue Vec = Op.getOperand(0);
18134 MVT VecVT = Vec.getSimpleValueType();
18135 SDValue Idx = Op.getOperand(1);
18136 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
18137 MVT EltVT = Op.getSimpleValueType();
18139 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
18140 "Unexpected vector type in ExtractBitFromMaskVector");
18142 // variable index can't be handled in mask registers,
18143 // extend vector to VR512/128
18145 unsigned NumElts = VecVT.getVectorNumElements();
18146 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
18147 // than extending to 128/256bit.
18148 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18149 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18150 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
18151 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
18152 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
18155 unsigned IdxVal = IdxC->getZExtValue();
18156 if (IdxVal == 0) // the operation is legal
18159 // Extend to natively supported kshift.
18160 unsigned NumElems = VecVT.getVectorNumElements();
18161 MVT WideVecVT = VecVT;
18162 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
18163 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18164 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18165 DAG.getUNDEF(WideVecVT), Vec,
18166 DAG.getIntPtrConstant(0, dl));
18169 // Use kshiftr instruction to move to the lower element.
18170 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18171 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18173 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
18174 DAG.getIntPtrConstant(0, dl));
18178 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
18179 SelectionDAG &DAG) const {
18181 SDValue Vec = Op.getOperand(0);
18182 MVT VecVT = Vec.getSimpleValueType();
18183 SDValue Idx = Op.getOperand(1);
18184 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
18186 if (VecVT.getVectorElementType() == MVT::i1)
18187 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
18190 // Its more profitable to go through memory (1 cycles throughput)
18191 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
18192 // IACA tool was used to get performance estimation
18193 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
18195 // example : extractelement <16 x i8> %a, i32 %i
18197 // Block Throughput: 3.00 Cycles
18198 // Throughput Bottleneck: Port5
18200 // | Num Of | Ports pressure in cycles | |
18201 // | Uops | 0 - DV | 5 | 6 | 7 | |
18202 // ---------------------------------------------
18203 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
18204 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
18205 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
18206 // Total Num Of Uops: 4
18209 // Block Throughput: 1.00 Cycles
18210 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
18212 // | | Ports pressure in cycles | |
18213 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
18214 // ---------------------------------------------------------
18215 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
18216 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
18217 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
18218 // Total Num Of Uops: 4
18223 unsigned IdxVal = IdxC->getZExtValue();
18225 // If this is a 256-bit vector result, first extract the 128-bit vector and
18226 // then extract the element from the 128-bit vector.
18227 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
18228 // Get the 128-bit vector.
18229 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
18230 MVT EltVT = VecVT.getVectorElementType();
18232 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
18233 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
18235 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
18236 // this can be done with a mask.
18237 IdxVal &= ElemsPerChunk - 1;
18238 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
18239 DAG.getIntPtrConstant(IdxVal, dl));
18242 assert(VecVT.is128BitVector() && "Unexpected vector length");
18244 MVT VT = Op.getSimpleValueType();
18246 if (VT.getSizeInBits() == 16) {
18247 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
18248 // we're going to zero extend the register or fold the store (SSE41 only).
18249 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
18250 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
18251 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
18252 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18253 DAG.getBitcast(MVT::v4i32, Vec), Idx));
18255 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec, Idx);
18256 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
18259 if (Subtarget.hasSSE41())
18260 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
18263 // TODO: We only extract a single element from v16i8, we can probably afford
18264 // to be more aggressive here before using the default approach of spilling to
18266 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
18267 // Extract either the lowest i32 or any i16, and extract the sub-byte.
18268 int DWordIdx = IdxVal / 4;
18269 if (DWordIdx == 0) {
18270 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18271 DAG.getBitcast(MVT::v4i32, Vec),
18272 DAG.getIntPtrConstant(DWordIdx, dl));
18273 int ShiftVal = (IdxVal % 4) * 8;
18275 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
18276 DAG.getConstant(ShiftVal, dl, MVT::i8));
18277 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
18280 int WordIdx = IdxVal / 2;
18281 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
18282 DAG.getBitcast(MVT::v8i16, Vec),
18283 DAG.getIntPtrConstant(WordIdx, dl));
18284 int ShiftVal = (IdxVal % 2) * 8;
18286 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
18287 DAG.getConstant(ShiftVal, dl, MVT::i8));
18288 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
18291 if (VT.getSizeInBits() == 32) {
18295 // SHUFPS the element to the lowest double word, then movss.
18296 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
18297 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18298 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18299 DAG.getIntPtrConstant(0, dl));
18302 if (VT.getSizeInBits() == 64) {
18303 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
18304 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
18305 // to match extract_elt for f64.
18309 // UNPCKHPD the element to the lowest double word, then movsd.
18310 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
18311 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
18312 int Mask[2] = { 1, -1 };
18313 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18314 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18315 DAG.getIntPtrConstant(0, dl));
18321 /// Insert one bit to mask vector, like v16i1 or v8i1.
18322 /// AVX-512 feature.
18323 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
18324 const X86Subtarget &Subtarget) {
18326 SDValue Vec = Op.getOperand(0);
18327 SDValue Elt = Op.getOperand(1);
18328 SDValue Idx = Op.getOperand(2);
18329 MVT VecVT = Vec.getSimpleValueType();
18331 if (!isa<ConstantSDNode>(Idx)) {
18332 // Non constant index. Extend source and destination,
18333 // insert element and then truncate the result.
18334 unsigned NumElts = VecVT.getVectorNumElements();
18335 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18336 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18337 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
18338 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
18339 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
18340 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
18343 // Copy into a k-register, extract to v1i1 and insert_subvector.
18344 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
18345 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
18348 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
18349 SelectionDAG &DAG) const {
18350 MVT VT = Op.getSimpleValueType();
18351 MVT EltVT = VT.getVectorElementType();
18352 unsigned NumElts = VT.getVectorNumElements();
18354 if (EltVT == MVT::i1)
18355 return InsertBitToMaskVector(Op, DAG, Subtarget);
18358 SDValue N0 = Op.getOperand(0);
18359 SDValue N1 = Op.getOperand(1);
18360 SDValue N2 = Op.getOperand(2);
18362 auto *N2C = dyn_cast<ConstantSDNode>(N2);
18363 if (!N2C || N2C->getAPIntValue().uge(NumElts))
18365 uint64_t IdxVal = N2C->getZExtValue();
18367 bool IsZeroElt = X86::isZeroNode(N1);
18368 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
18370 // If we are inserting a element, see if we can do this more efficiently with
18371 // a blend shuffle with a rematerializable vector than a costly integer
18373 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
18374 16 <= EltVT.getSizeInBits()) {
18375 SmallVector<int, 8> BlendMask;
18376 for (unsigned i = 0; i != NumElts; ++i)
18377 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18378 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
18379 : getOnesVector(VT, DAG, dl);
18380 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
18383 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
18384 // into that, and then insert the subvector back into the result.
18385 if (VT.is256BitVector() || VT.is512BitVector()) {
18386 // With a 256-bit vector, we can insert into the zero element efficiently
18387 // using a blend if we have AVX or AVX2 and the right data type.
18388 if (VT.is256BitVector() && IdxVal == 0) {
18389 // TODO: It is worthwhile to cast integer to floating point and back
18390 // and incur a domain crossing penalty if that's what we'll end up
18391 // doing anyway after extracting to a 128-bit vector.
18392 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
18393 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
18394 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18395 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
18396 DAG.getTargetConstant(1, dl, MVT::i8));
18400 // Get the desired 128-bit vector chunk.
18401 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
18403 // Insert the element into the desired chunk.
18404 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
18405 assert(isPowerOf2_32(NumEltsIn128));
18406 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
18407 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
18409 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
18410 DAG.getIntPtrConstant(IdxIn128, dl));
18412 // Insert the changed part back into the bigger vector
18413 return insert128BitVector(N0, V, IdxVal, DAG, dl);
18415 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
18417 // This will be just movd/movq/movss/movsd.
18418 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
18419 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
18420 EltVT == MVT::i64) {
18421 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18422 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18425 // We can't directly insert an i8 or i16 into a vector, so zero extend
18426 // it to i32 first.
18427 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
18428 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
18429 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
18430 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
18431 N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18432 return DAG.getBitcast(VT, N1);
18436 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
18437 // argument. SSE41 required for pinsrb.
18438 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
18440 if (VT == MVT::v8i16) {
18441 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
18442 Opc = X86ISD::PINSRW;
18444 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
18445 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
18446 Opc = X86ISD::PINSRB;
18449 if (N1.getValueType() != MVT::i32)
18450 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
18451 if (N2.getValueType() != MVT::i32)
18452 N2 = DAG.getIntPtrConstant(IdxVal, dl);
18453 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
18456 if (Subtarget.hasSSE41()) {
18457 if (EltVT == MVT::f32) {
18458 // Bits [7:6] of the constant are the source select. This will always be
18459 // zero here. The DAG Combiner may combine an extract_elt index into
18460 // these bits. For example (insert (extract, 3), 2) could be matched by
18461 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
18462 // Bits [5:4] of the constant are the destination select. This is the
18463 // value of the incoming immediate.
18464 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
18465 // combine either bitwise AND or insert of float 0.0 to set these bits.
18467 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
18468 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
18469 // If this is an insertion of 32-bits into the low 32-bits of
18470 // a vector, we prefer to generate a blend with immediate rather
18471 // than an insertps. Blends are simpler operations in hardware and so
18472 // will always have equal or better performance than insertps.
18473 // But if optimizing for size and there's a load folding opportunity,
18474 // generate insertps because blendps does not have a 32-bit memory
18476 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18477 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
18478 DAG.getTargetConstant(1, dl, MVT::i8));
18480 // Create this as a scalar to vector..
18481 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18482 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
18483 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
18486 // PINSR* works with constant index.
18487 if (EltVT == MVT::i32 || EltVT == MVT::i64)
18494 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
18495 SelectionDAG &DAG) {
18497 MVT OpVT = Op.getSimpleValueType();
18499 // It's always cheaper to replace a xor+movd with xorps and simplifies further
18501 if (X86::isZeroNode(Op.getOperand(0)))
18502 return getZeroVector(OpVT, Subtarget, DAG, dl);
18504 // If this is a 256-bit vector result, first insert into a 128-bit
18505 // vector and then insert into the 256-bit vector.
18506 if (!OpVT.is128BitVector()) {
18507 // Insert into a 128-bit vector.
18508 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
18509 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
18510 OpVT.getVectorNumElements() / SizeFactor);
18512 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
18514 // Insert the 128-bit vector.
18515 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
18517 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
18518 "Expected an SSE type!");
18520 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
18521 if (OpVT == MVT::v4i32)
18524 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
18525 return DAG.getBitcast(
18526 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
18529 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
18530 // simple superregister reference or explicit instructions to insert
18531 // the upper bits of a vector.
18532 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18533 SelectionDAG &DAG) {
18534 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
18536 return insert1BitVector(Op, DAG, Subtarget);
18539 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18540 SelectionDAG &DAG) {
18541 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
18542 "Only vXi1 extract_subvectors need custom lowering");
18545 SDValue Vec = Op.getOperand(0);
18546 uint64_t IdxVal = Op.getConstantOperandVal(1);
18548 if (IdxVal == 0) // the operation is legal
18551 MVT VecVT = Vec.getSimpleValueType();
18552 unsigned NumElems = VecVT.getVectorNumElements();
18554 // Extend to natively supported kshift.
18555 MVT WideVecVT = VecVT;
18556 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
18557 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18558 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18559 DAG.getUNDEF(WideVecVT), Vec,
18560 DAG.getIntPtrConstant(0, dl));
18563 // Shift to the LSB.
18564 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18565 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18567 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18568 DAG.getIntPtrConstant(0, dl));
18571 // Returns the appropriate wrapper opcode for a global reference.
18572 unsigned X86TargetLowering::getGlobalWrapperKind(
18573 const GlobalValue *GV, const unsigned char OpFlags) const {
18574 // References to absolute symbols are never PC-relative.
18575 if (GV && GV->isAbsoluteSymbolRef())
18576 return X86ISD::Wrapper;
18578 CodeModel::Model M = getTargetMachine().getCodeModel();
18579 if (Subtarget.isPICStyleRIPRel() &&
18580 (M == CodeModel::Small || M == CodeModel::Kernel))
18581 return X86ISD::WrapperRIP;
18583 // GOTPCREL references must always use RIP.
18584 if (OpFlags == X86II::MO_GOTPCREL)
18585 return X86ISD::WrapperRIP;
18587 return X86ISD::Wrapper;
18590 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18591 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18592 // one of the above mentioned nodes. It has to be wrapped because otherwise
18593 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18594 // be used to form addressing mode. These wrapped nodes will be selected
18597 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18598 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18600 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18601 // global base reg.
18602 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18604 auto PtrVT = getPointerTy(DAG.getDataLayout());
18605 SDValue Result = DAG.getTargetConstantPool(
18606 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
18608 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18609 // With PIC, the address is actually $g + Offset.
18612 DAG.getNode(ISD::ADD, DL, PtrVT,
18613 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18619 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18620 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18622 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18623 // global base reg.
18624 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18626 auto PtrVT = getPointerTy(DAG.getDataLayout());
18627 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18629 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18631 // With PIC, the address is actually $g + Offset.
18634 DAG.getNode(ISD::ADD, DL, PtrVT,
18635 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18640 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18641 SelectionDAG &DAG) const {
18642 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18646 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18647 // Create the TargetBlockAddressAddress node.
18648 unsigned char OpFlags =
18649 Subtarget.classifyBlockAddressReference();
18650 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18651 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18653 auto PtrVT = getPointerTy(DAG.getDataLayout());
18654 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18655 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
18657 // With PIC, the address is actually $g + Offset.
18658 if (isGlobalRelativeToPICBase(OpFlags)) {
18659 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18660 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18666 /// Creates target global address or external symbol nodes for calls or
18668 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18669 bool ForCall) const {
18670 // Unpack the global address or external symbol.
18671 const SDLoc &dl = SDLoc(Op);
18672 const GlobalValue *GV = nullptr;
18673 int64_t Offset = 0;
18674 const char *ExternalSym = nullptr;
18675 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18676 GV = G->getGlobal();
18677 Offset = G->getOffset();
18679 const auto *ES = cast<ExternalSymbolSDNode>(Op);
18680 ExternalSym = ES->getSymbol();
18683 // Calculate some flags for address lowering.
18684 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18685 unsigned char OpFlags;
18687 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18689 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18690 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18691 bool NeedsLoad = isGlobalStubReference(OpFlags);
18693 CodeModel::Model M = DAG.getTarget().getCodeModel();
18694 auto PtrVT = getPointerTy(DAG.getDataLayout());
18698 // Create a target global address if this is a global. If possible, fold the
18699 // offset into the global address reference. Otherwise, ADD it on later.
18700 int64_t GlobalOffset = 0;
18701 if (OpFlags == X86II::MO_NO_FLAG &&
18702 X86::isOffsetSuitableForCodeModel(Offset, M)) {
18703 std::swap(GlobalOffset, Offset);
18705 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18707 // If this is not a global address, this must be an external symbol.
18708 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18711 // If this is a direct call, avoid the wrapper if we don't need to do any
18712 // loads or adds. This allows SDAG ISel to match direct calls.
18713 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18716 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18718 // With PIC, the address is actually $g + Offset.
18720 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18721 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18724 // For globals that require a load from a stub to get the address, emit the
18727 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18728 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18730 // If there was a non-zero offset that we didn't fold, create an explicit
18731 // addition for it.
18733 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18734 DAG.getConstant(Offset, dl, PtrVT));
18740 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18741 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18745 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18746 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
18747 unsigned char OperandFlags, bool LocalDynamic = false) {
18748 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18749 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18751 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18752 GA->getValueType(0),
18756 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18760 SDValue Ops[] = { Chain, TGA, *InFlag };
18761 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18763 SDValue Ops[] = { Chain, TGA };
18764 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18767 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18768 MFI.setAdjustsStack(true);
18769 MFI.setHasCalls(true);
18771 SDValue Flag = Chain.getValue(1);
18772 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
18775 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18777 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18780 SDLoc dl(GA); // ? function entry point might be better
18781 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18782 DAG.getNode(X86ISD::GlobalBaseReg,
18783 SDLoc(), PtrVT), InFlag);
18784 InFlag = Chain.getValue(1);
18786 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
18789 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
18791 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18793 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18794 X86::RAX, X86II::MO_TLSGD);
18797 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18803 // Get the start address of the TLS block for this module.
18804 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18805 .getInfo<X86MachineFunctionInfo>();
18806 MFI->incNumLocalDynamicTLSAccesses();
18810 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
18811 X86II::MO_TLSLD, /*LocalDynamic=*/true);
18814 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18815 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
18816 InFlag = Chain.getValue(1);
18817 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
18818 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18821 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18825 unsigned char OperandFlags = X86II::MO_DTPOFF;
18826 unsigned WrapperKind = X86ISD::Wrapper;
18827 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18828 GA->getValueType(0),
18829 GA->getOffset(), OperandFlags);
18830 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18832 // Add x@dtpoff with the base.
18833 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18836 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18837 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18838 const EVT PtrVT, TLSModel::Model model,
18839 bool is64Bit, bool isPIC) {
18842 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18843 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18844 is64Bit ? 257 : 256));
18846 SDValue ThreadPointer =
18847 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18848 MachinePointerInfo(Ptr));
18850 unsigned char OperandFlags = 0;
18851 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
18853 unsigned WrapperKind = X86ISD::Wrapper;
18854 if (model == TLSModel::LocalExec) {
18855 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18856 } else if (model == TLSModel::InitialExec) {
18858 OperandFlags = X86II::MO_GOTTPOFF;
18859 WrapperKind = X86ISD::WrapperRIP;
18861 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18864 llvm_unreachable("Unexpected model");
18867 // emit "addl x@ntpoff,%eax" (local exec)
18868 // or "addl x@indntpoff,%eax" (initial exec)
18869 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18871 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18872 GA->getOffset(), OperandFlags);
18873 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18875 if (model == TLSModel::InitialExec) {
18876 if (isPIC && !is64Bit) {
18877 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18878 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18882 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18883 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18886 // The address of the thread local variable is the add of the thread
18887 // pointer with the offset of the variable.
18888 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18892 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18894 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18896 if (DAG.getTarget().useEmulatedTLS())
18897 return LowerToTLSEmulatedModel(GA, DAG);
18899 const GlobalValue *GV = GA->getGlobal();
18900 auto PtrVT = getPointerTy(DAG.getDataLayout());
18901 bool PositionIndependent = isPositionIndependent();
18903 if (Subtarget.isTargetELF()) {
18904 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18906 case TLSModel::GeneralDynamic:
18907 if (Subtarget.is64Bit())
18908 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18909 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18910 case TLSModel::LocalDynamic:
18911 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18912 Subtarget.is64Bit());
18913 case TLSModel::InitialExec:
18914 case TLSModel::LocalExec:
18915 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18916 PositionIndependent);
18918 llvm_unreachable("Unknown TLS model.");
18921 if (Subtarget.isTargetDarwin()) {
18922 // Darwin only has one model of TLS. Lower to that.
18923 unsigned char OpFlag = 0;
18924 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18925 X86ISD::WrapperRIP : X86ISD::Wrapper;
18927 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18928 // global base reg.
18929 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18931 OpFlag = X86II::MO_TLVP_PIC_BASE;
18933 OpFlag = X86II::MO_TLVP;
18935 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18936 GA->getValueType(0),
18937 GA->getOffset(), OpFlag);
18938 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18940 // With PIC32, the address is actually $g + Offset.
18942 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18943 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18946 // Lowering the machine isd will make sure everything is in the right
18948 SDValue Chain = DAG.getEntryNode();
18949 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18950 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18951 SDValue Args[] = { Chain, Offset };
18952 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18953 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18954 DAG.getIntPtrConstant(0, DL, true),
18955 Chain.getValue(1), DL);
18957 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18958 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18959 MFI.setAdjustsStack(true);
18961 // And our return value (tls address) is in the standard call return value
18963 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18964 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18967 if (Subtarget.isOSWindows()) {
18968 // Just use the implicit TLS architecture
18969 // Need to generate something similar to:
18970 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18972 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
18973 // mov rcx, qword [rdx+rcx*8]
18974 // mov eax, .tls$:tlsvar
18975 // [rax+rcx] contains the address
18976 // Windows 64bit: gs:0x58
18977 // Windows 32bit: fs:__tls_array
18980 SDValue Chain = DAG.getEntryNode();
18982 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18983 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18984 // use its literal value of 0x2C.
18985 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18986 ? Type::getInt8PtrTy(*DAG.getContext(),
18988 : Type::getInt32PtrTy(*DAG.getContext(),
18991 SDValue TlsArray = Subtarget.is64Bit()
18992 ? DAG.getIntPtrConstant(0x58, dl)
18993 : (Subtarget.isTargetWindowsGNU()
18994 ? DAG.getIntPtrConstant(0x2C, dl)
18995 : DAG.getExternalSymbol("_tls_array", PtrVT));
18997 SDValue ThreadPointer =
18998 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
19001 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
19002 res = ThreadPointer;
19004 // Load the _tls_index variable
19005 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
19006 if (Subtarget.is64Bit())
19007 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
19008 MachinePointerInfo(), MVT::i32);
19010 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
19012 auto &DL = DAG.getDataLayout();
19014 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
19015 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
19017 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
19020 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
19022 // Get the offset of start of .tls section
19023 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
19024 GA->getValueType(0),
19025 GA->getOffset(), X86II::MO_SECREL);
19026 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
19028 // The address of the thread local variable is the add of the thread
19029 // pointer with the offset of the variable.
19030 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
19033 llvm_unreachable("TLS not implemented for this target.");
19036 /// Lower SRA_PARTS and friends, which return two i32 values
19037 /// and take a 2 x i32 value to shift plus a shift amount.
19038 /// TODO: Can this be moved to general expansion code?
19039 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
19040 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
19041 MVT VT = Op.getSimpleValueType();
19042 unsigned VTBits = VT.getSizeInBits();
19044 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
19045 SDValue ShOpLo = Op.getOperand(0);
19046 SDValue ShOpHi = Op.getOperand(1);
19047 SDValue ShAmt = Op.getOperand(2);
19048 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
19049 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
19051 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
19052 DAG.getConstant(VTBits - 1, dl, MVT::i8));
19053 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
19054 DAG.getConstant(VTBits - 1, dl, MVT::i8))
19055 : DAG.getConstant(0, dl, VT);
19057 SDValue Tmp2, Tmp3;
19058 if (Op.getOpcode() == ISD::SHL_PARTS) {
19059 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
19060 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
19062 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
19063 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
19066 // If the shift amount is larger or equal than the width of a part we can't
19067 // rely on the results of shld/shrd. Insert a test and select the appropriate
19068 // values for large shift amounts.
19069 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
19070 DAG.getConstant(VTBits, dl, MVT::i8));
19071 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
19072 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
19075 if (Op.getOpcode() == ISD::SHL_PARTS) {
19076 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
19077 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
19079 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
19080 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
19083 return DAG.getMergeValues({ Lo, Hi }, dl);
19086 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
19087 SelectionDAG &DAG) {
19088 MVT VT = Op.getSimpleValueType();
19089 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
19090 "Unexpected funnel shift opcode!");
19093 SDValue Op0 = Op.getOperand(0);
19094 SDValue Op1 = Op.getOperand(1);
19095 SDValue Amt = Op.getOperand(2);
19097 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
19099 if (VT.isVector()) {
19100 assert(Subtarget.hasVBMI2() && "Expected VBMI2");
19103 std::swap(Op0, Op1);
19105 APInt APIntShiftAmt;
19106 if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
19107 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
19108 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
19109 Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19112 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
19116 (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
19117 "Unexpected funnel shift type!");
19119 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
19120 bool OptForSize = DAG.shouldOptForSize();
19121 bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
19123 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
19124 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
19125 if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
19126 !isa<ConstantSDNode>(Amt)) {
19127 unsigned EltSizeInBits = VT.getScalarSizeInBits();
19128 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
19129 SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
19130 Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
19131 Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
19132 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
19133 SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
19134 Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
19136 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
19138 Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
19139 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
19141 return DAG.getZExtOrTrunc(Res, DL, VT);
19144 if (VT == MVT::i8 || ExpandFunnel)
19147 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
19148 if (VT == MVT::i16) {
19149 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
19150 DAG.getConstant(15, DL, Amt.getValueType()));
19151 unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
19152 return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
19158 // Try to use a packed vector operation to handle i64 on 32-bit targets when
19159 // AVX512DQ is enabled.
19160 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
19161 const X86Subtarget &Subtarget) {
19162 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
19163 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
19164 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
19165 Op.getOpcode() == ISD::UINT_TO_FP) &&
19166 "Unexpected opcode!");
19167 bool IsStrict = Op->isStrictFPOpcode();
19168 unsigned OpNo = IsStrict ? 1 : 0;
19169 SDValue Src = Op.getOperand(OpNo);
19170 MVT SrcVT = Src.getSimpleValueType();
19171 MVT VT = Op.getSimpleValueType();
19173 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
19174 (VT != MVT::f32 && VT != MVT::f64))
19177 // Pack the i64 into a vector, do the operation and extract.
19179 // Using 256-bit to ensure result is 128-bits for f32 case.
19180 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
19181 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
19182 MVT VecVT = MVT::getVectorVT(VT, NumElts);
19185 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
19187 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
19188 {Op.getOperand(0), InVec});
19189 SDValue Chain = CvtVec.getValue(1);
19190 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
19191 DAG.getIntPtrConstant(0, dl));
19192 return DAG.getMergeValues({Value, Chain}, dl);
19195 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
19197 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
19198 DAG.getIntPtrConstant(0, dl));
19201 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
19202 const X86Subtarget &Subtarget) {
19204 case ISD::SINT_TO_FP:
19205 // TODO: Handle wider types with AVX/AVX512.
19206 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
19208 // CVTDQ2PS or (V)CVTDQ2PD
19209 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
19211 case ISD::UINT_TO_FP:
19212 // TODO: Handle wider types and i64 elements.
19213 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
19215 // VCVTUDQ2PS or VCVTUDQ2PD
19216 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
19223 /// Given a scalar cast operation that is extracted from a vector, try to
19224 /// vectorize the cast op followed by extraction. This will avoid an expensive
19225 /// round-trip between XMM and GPR.
19226 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
19227 const X86Subtarget &Subtarget) {
19228 // TODO: This could be enhanced to handle smaller integer types by peeking
19229 // through an extend.
19230 SDValue Extract = Cast.getOperand(0);
19231 MVT DestVT = Cast.getSimpleValueType();
19232 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19233 !isa<ConstantSDNode>(Extract.getOperand(1)))
19236 // See if we have a 128-bit vector cast op for this type of cast.
19237 SDValue VecOp = Extract.getOperand(0);
19238 MVT FromVT = VecOp.getSimpleValueType();
19239 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
19240 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
19241 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
19242 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
19245 // If we are extracting from a non-zero element, first shuffle the source
19246 // vector to allow extracting from element zero.
19248 if (!isNullConstant(Extract.getOperand(1))) {
19249 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
19250 Mask[0] = Extract.getConstantOperandVal(1);
19251 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
19253 // If the source vector is wider than 128-bits, extract the low part. Do not
19254 // create an unnecessarily wide vector cast op.
19255 if (FromVT != Vec128VT)
19256 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
19258 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
19259 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
19260 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
19261 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
19262 DAG.getIntPtrConstant(0, DL));
19265 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
19266 /// try to vectorize the cast ops. This will avoid an expensive round-trip
19267 /// between XMM and GPR.
19268 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
19269 const X86Subtarget &Subtarget) {
19270 // TODO: Allow FP_TO_UINT.
19271 SDValue CastToInt = CastToFP.getOperand(0);
19272 MVT VT = CastToFP.getSimpleValueType();
19273 if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
19276 MVT IntVT = CastToInt.getSimpleValueType();
19277 SDValue X = CastToInt.getOperand(0);
19278 MVT SrcVT = X.getSimpleValueType();
19279 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
19282 // See if we have 128-bit vector cast instructions for this type of cast.
19283 // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
19284 if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
19288 unsigned SrcSize = SrcVT.getSizeInBits();
19289 unsigned IntSize = IntVT.getSizeInBits();
19290 unsigned VTSize = VT.getSizeInBits();
19291 MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
19292 MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
19293 MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
19295 // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
19296 unsigned ToIntOpcode =
19297 SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
19298 unsigned ToFPOpcode =
19299 IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
19301 // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
19303 // We are not defining the high elements (for example, zero them) because
19304 // that could nullify any performance advantage that we hoped to gain from
19305 // this vector op hack. We do not expect any adverse effects (like denorm
19306 // penalties) with cast ops.
19307 SDLoc DL(CastToFP);
19308 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
19309 SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
19310 SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
19311 SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
19312 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
19315 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
19316 const X86Subtarget &Subtarget) {
19318 bool IsStrict = Op->isStrictFPOpcode();
19319 MVT VT = Op->getSimpleValueType(0);
19320 SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
19322 if (Subtarget.hasDQI()) {
19323 assert(!Subtarget.hasVLX() && "Unexpected features");
19325 assert((Src.getSimpleValueType() == MVT::v2i64 ||
19326 Src.getSimpleValueType() == MVT::v4i64) &&
19327 "Unsupported custom type");
19329 // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
19330 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
19332 MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
19334 // Need to concat with zero vector for strict fp to avoid spurious
19336 SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
19337 : DAG.getUNDEF(MVT::v8i64);
19338 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
19339 DAG.getIntPtrConstant(0, DL));
19340 SDValue Res, Chain;
19342 Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
19343 {Op->getOperand(0), Src});
19344 Chain = Res.getValue(1);
19346 Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
19349 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19350 DAG.getIntPtrConstant(0, DL));
19353 return DAG.getMergeValues({Res, Chain}, DL);
19357 bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
19358 Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
19359 if (VT != MVT::v4f32 || IsSigned)
19362 SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
19363 SDValue One = DAG.getConstant(1, DL, MVT::v4i64);
19364 SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
19365 DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
19366 DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
19367 SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
19368 SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
19369 SmallVector<SDValue, 4> SignCvts(4);
19370 SmallVector<SDValue, 4> Chains(4);
19371 for (int i = 0; i != 4; ++i) {
19372 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
19373 DAG.getIntPtrConstant(i, DL));
19376 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
19377 {Op.getOperand(0), Elt});
19378 Chains[i] = SignCvts[i].getValue(1);
19380 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
19383 SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
19385 SDValue Slow, Chain;
19387 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
19388 Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
19389 {Chain, SignCvt, SignCvt});
19390 Chain = Slow.getValue(1);
19392 Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
19395 IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
19396 SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
19399 return DAG.getMergeValues({Cvt, Chain}, DL);
19404 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
19405 SelectionDAG &DAG) const {
19406 bool IsStrict = Op->isStrictFPOpcode();
19407 unsigned OpNo = IsStrict ? 1 : 0;
19408 SDValue Src = Op.getOperand(OpNo);
19409 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19410 MVT SrcVT = Src.getSimpleValueType();
19411 MVT VT = Op.getSimpleValueType();
19414 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19417 if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
19420 if (SrcVT.isVector()) {
19421 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
19422 // Note: Since v2f64 is a legal type. We don't need to zero extend the
19423 // source for strict FP.
19425 return DAG.getNode(
19426 X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
19427 {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19428 DAG.getUNDEF(SrcVT))});
19429 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
19430 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19431 DAG.getUNDEF(SrcVT)));
19433 if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
19434 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19439 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
19440 "Unknown SINT_TO_FP to lower!");
19442 bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
19444 // These are really Legal; return the operand so the caller accepts it as
19446 if (SrcVT == MVT::i32 && UseSSEReg)
19448 if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
19451 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19454 // SSE doesn't have an i16 conversion so we need to promote.
19455 if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
19456 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
19458 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
19461 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
19464 if (VT == MVT::f128)
19465 return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
19467 SDValue ValueToStore = Src;
19468 if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
19469 // Bitcasting to f64 here allows us to do a single 64-bit store from
19470 // an SSE register, avoiding the store forwarding penalty that would come
19471 // with two 32-bit stores.
19472 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19474 unsigned Size = SrcVT.getStoreSize();
19475 Align Alignment(Size);
19476 MachineFunction &MF = DAG.getMachineFunction();
19477 auto PtrVT = getPointerTy(MF.getDataLayout());
19478 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
19479 MachinePointerInfo MPI =
19480 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19481 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19482 Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
19483 std::pair<SDValue, SDValue> Tmp =
19484 BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
19487 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19492 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
19493 EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
19494 MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
19497 bool useSSE = isScalarFPTypeInSSEReg(DstVT);
19499 Tys = DAG.getVTList(MVT::f80, MVT::Other);
19501 Tys = DAG.getVTList(DstVT, MVT::Other);
19503 SDValue FILDOps[] = {Chain, Pointer};
19505 DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
19506 Alignment, MachineMemOperand::MOLoad);
19507 Chain = Result.getValue(1);
19510 MachineFunction &MF = DAG.getMachineFunction();
19511 unsigned SSFISize = DstVT.getStoreSize();
19513 MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
19514 auto PtrVT = getPointerTy(MF.getDataLayout());
19515 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19516 Tys = DAG.getVTList(MVT::Other);
19517 SDValue FSTOps[] = {Chain, Result, StackSlot};
19518 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
19519 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19520 MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
19523 DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
19524 Result = DAG.getLoad(
19525 DstVT, DL, Chain, StackSlot,
19526 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
19527 Chain = Result.getValue(1);
19530 return { Result, Chain };
19533 /// Horizontal vector math instructions may be slower than normal math with
19534 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19535 /// implementation, and likely shuffle complexity of the alternate sequence.
19536 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19537 const X86Subtarget &Subtarget) {
19538 bool IsOptimizingSize = DAG.shouldOptForSize();
19539 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19540 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19543 /// 64-bit unsigned integer to double expansion.
19544 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
19545 const X86Subtarget &Subtarget) {
19546 // This algorithm is not obvious. Here it is what we're trying to output:
19549 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
19550 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
19552 haddpd %xmm0, %xmm0
19554 pshufd $0x4e, %xmm0, %xmm1
19559 bool IsStrict = Op->isStrictFPOpcode();
19560 unsigned OpNo = IsStrict ? 1 : 0;
19562 LLVMContext *Context = DAG.getContext();
19564 // Build some magic constants.
19565 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
19566 Constant *C0 = ConstantDataVector::get(*Context, CV0);
19567 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19568 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
19570 SmallVector<Constant*,2> CV1;
19572 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19573 APInt(64, 0x4330000000000000ULL))));
19575 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19576 APInt(64, 0x4530000000000000ULL))));
19577 Constant *C1 = ConstantVector::get(CV1);
19578 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
19580 // Load the 64-bit value into an XMM register.
19582 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(OpNo));
19584 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
19585 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19586 /* Alignment = */ 16);
19588 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
19591 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
19592 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19593 /* Alignment = */ 16);
19594 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
19597 // TODO: Are there any fast-math-flags to propagate here?
19599 Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
19600 {Op.getOperand(0), XR2F, CLod1});
19601 Chain = Sub.getValue(1);
19603 Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
19606 if (!IsStrict && Subtarget.hasSSE3() &&
19607 shouldUseHorizontalOp(true, DAG, Subtarget)) {
19608 // FIXME: Do we need a STRICT version of FHADD?
19609 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
19611 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
19613 Result = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v2f64, MVT::Other},
19614 {Chain, Shuffle, Sub});
19615 Chain = Result.getValue(1);
19617 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19619 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19620 DAG.getIntPtrConstant(0, dl));
19622 return DAG.getMergeValues({Result, Chain}, dl);
19627 /// 32-bit unsigned integer to float expansion.
19628 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19629 const X86Subtarget &Subtarget) {
19630 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19632 // FP constant to bias correct the final result.
19633 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
19636 // Load the 32-bit value into an XMM register.
19638 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19640 // Zero out the upper parts of the register.
19641 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19643 // Or the load with the bias.
19644 SDValue Or = DAG.getNode(
19645 ISD::OR, dl, MVT::v2i64,
19646 DAG.getBitcast(MVT::v2i64, Load),
19647 DAG.getBitcast(MVT::v2i64,
19648 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19650 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19651 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19653 if (Op.getNode()->isStrictFPOpcode()) {
19654 // Subtract the bias.
19655 // TODO: Are there any fast-math-flags to propagate here?
19656 SDValue Chain = Op.getOperand(0);
19657 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19658 {Chain, Or, Bias});
19660 if (Op.getValueType() == Sub.getValueType())
19663 // Handle final rounding.
19664 std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19665 Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19667 return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19670 // Subtract the bias.
19671 // TODO: Are there any fast-math-flags to propagate here?
19672 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19674 // Handle final rounding.
19675 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19678 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19679 const X86Subtarget &Subtarget,
19681 if (Op.getSimpleValueType() != MVT::v2f64)
19684 bool IsStrict = Op->isStrictFPOpcode();
19686 SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19687 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19689 if (Subtarget.hasAVX512()) {
19690 if (!Subtarget.hasVLX()) {
19691 // Let generic type legalization widen this.
19694 // Otherwise pad the integer input with 0s and widen the operation.
19695 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19696 DAG.getConstant(0, DL, MVT::v2i32));
19697 SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19698 {Op.getOperand(0), N0});
19699 SDValue Chain = Res.getValue(1);
19700 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19701 DAG.getIntPtrConstant(0, DL));
19702 return DAG.getMergeValues({Res, Chain}, DL);
19705 // Legalize to v4i32 type.
19706 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19707 DAG.getUNDEF(MVT::v2i32));
19709 return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19710 {Op.getOperand(0), N0});
19711 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19714 // Zero extend to 2i64, OR with the floating point representation of 2^52.
19715 // This gives us the floating point equivalent of 2^52 + the i32 integer
19716 // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19717 // point leaving just our i32 integers in double format.
19718 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19720 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
19721 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19722 DAG.getBitcast(MVT::v2i64, VBias));
19723 Or = DAG.getBitcast(MVT::v2f64, Or);
19726 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19727 {Op.getOperand(0), Or, VBias});
19728 return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19731 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19732 const X86Subtarget &Subtarget) {
19734 bool IsStrict = Op->isStrictFPOpcode();
19735 SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19736 MVT VecIntVT = V.getSimpleValueType();
19737 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19738 "Unsupported custom type");
19740 if (Subtarget.hasAVX512()) {
19741 // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19742 assert(!Subtarget.hasVLX() && "Unexpected features");
19743 MVT VT = Op->getSimpleValueType(0);
19745 // v8i32->v8f64 is legal with AVX512 so just return it.
19746 if (VT == MVT::v8f64)
19749 assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19751 MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19752 MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19753 // Need to concat with zero vector for strict fp to avoid spurious
19756 IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19757 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19758 DAG.getIntPtrConstant(0, DL));
19759 SDValue Res, Chain;
19761 Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19762 {Op->getOperand(0), V});
19763 Chain = Res.getValue(1);
19765 Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19768 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19769 DAG.getIntPtrConstant(0, DL));
19772 return DAG.getMergeValues({Res, Chain}, DL);
19776 if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19777 Op->getSimpleValueType(0) == MVT::v4f64) {
19778 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19779 Constant *Bias = ConstantFP::get(
19781 APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19782 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19783 SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
19784 SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19785 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19786 SDValue VBias = DAG.getMemIntrinsicNode(
19787 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19788 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
19789 MachineMemOperand::MOLoad);
19791 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19792 DAG.getBitcast(MVT::v4i64, VBias));
19793 Or = DAG.getBitcast(MVT::v4f64, Or);
19796 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19797 {Op.getOperand(0), Or, VBias});
19798 return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19801 // The algorithm is the following:
19802 // #ifdef __SSE4_1__
19803 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19804 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19805 // (uint4) 0x53000000, 0xaa);
19807 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19808 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19810 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19811 // return (float4) lo + fhi;
19813 bool Is128 = VecIntVT == MVT::v4i32;
19814 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19815 // If we convert to something else than the supported type, e.g., to v4f64,
19817 if (VecFloatVT != Op->getSimpleValueType(0))
19820 // In the #idef/#else code, we have in common:
19821 // - The vector of constants:
19827 // Create the splat vector for 0x4b000000.
19828 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19829 // Create the splat vector for 0x53000000.
19830 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19832 // Create the right shift.
19833 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19834 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19837 if (Subtarget.hasSSE41()) {
19838 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19839 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19840 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19841 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19842 // Low will be bitcasted right away, so do not bother bitcasting back to its
19844 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19845 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19846 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19847 // (uint4) 0x53000000, 0xaa);
19848 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19849 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19850 // High will be bitcasted right away, so do not bother bitcasting back to
19851 // its original type.
19852 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19853 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19855 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19856 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19857 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19858 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19860 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19861 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19864 // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19865 SDValue VecCstFSub = DAG.getConstantFP(
19866 APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19868 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19869 // NOTE: By using fsub of a positive constant instead of fadd of a negative
19870 // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19871 // enabled. See PR24512.
19872 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19873 // TODO: Are there any fast-math-flags to propagate here?
19875 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19876 // return (float4) lo + fhi;
19878 SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19879 {Op.getOperand(0), HighBitcast, VecCstFSub});
19880 return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19881 {FHigh.getValue(1), LowBitcast, FHigh});
19885 DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19886 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19889 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19890 const X86Subtarget &Subtarget) {
19891 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19892 SDValue N0 = Op.getOperand(OpNo);
19893 MVT SrcVT = N0.getSimpleValueType();
19896 switch (SrcVT.SimpleTy) {
19898 llvm_unreachable("Custom UINT_TO_FP is not supported!");
19900 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19903 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19906 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19910 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19911 SelectionDAG &DAG) const {
19912 bool IsStrict = Op->isStrictFPOpcode();
19913 unsigned OpNo = IsStrict ? 1 : 0;
19914 SDValue Src = Op.getOperand(OpNo);
19916 auto PtrVT = getPointerTy(DAG.getDataLayout());
19917 MVT SrcVT = Src.getSimpleValueType();
19918 MVT DstVT = Op->getSimpleValueType(0);
19919 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19921 if (DstVT == MVT::f128)
19922 return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
19924 if (DstVT.isVector())
19925 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19927 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19930 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19931 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19932 // Conversions from unsigned i32 to f32/f64 are legal,
19933 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
19937 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19938 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19939 Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19941 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19943 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19946 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19949 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
19950 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19951 if (SrcVT == MVT::i32 && X86ScalarSSEf64 && DstVT != MVT::f80)
19952 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19953 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
19956 // Make a 64-bit buffer, and use it to build an FILD.
19957 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
19958 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19959 MachinePointerInfo MPI =
19960 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19961 if (SrcVT == MVT::i32) {
19962 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
19964 DAG.getStore(Chain, dl, Src, StackSlot, MPI, 8 /*Align*/);
19965 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19966 OffsetSlot, MPI.getWithOffset(4), 4);
19967 std::pair<SDValue, SDValue> Tmp =
19968 BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, Align(8), DAG);
19970 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19975 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19976 SDValue ValueToStore = Src;
19977 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19978 // Bitcasting to f64 here allows us to do a single 64-bit store from
19979 // an SSE register, avoiding the store forwarding penalty that would come
19980 // with two 32-bit stores.
19981 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19984 DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Align(8));
19985 // For i64 source, we need to add the appropriate power of 2 if the input
19986 // was negative. This is the same as the optimization in
19987 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
19988 // we must be careful to do the computation in x87 extended precision, not
19989 // in SSE. (The generic code can't know it's OK to do this, or how to.)
19990 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19991 SDValue Ops[] = { Store, StackSlot };
19993 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
19994 Align(8), MachineMemOperand::MOLoad);
19995 Chain = Fild.getValue(1);
19998 // Check whether the sign bit is set.
19999 SDValue SignSet = DAG.getSetCC(
20000 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
20001 Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
20003 // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
20004 APInt FF(64, 0x5F80000000000000ULL);
20005 SDValue FudgePtr = DAG.getConstantPool(
20006 ConstantInt::get(*DAG.getContext(), FF), PtrVT);
20007 Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
20009 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
20010 SDValue Zero = DAG.getIntPtrConstant(0, dl);
20011 SDValue Four = DAG.getIntPtrConstant(4, dl);
20012 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
20013 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
20015 // Load the value out, extending it from f32 to f80.
20016 SDValue Fudge = DAG.getExtLoad(
20017 ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
20018 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
20020 Chain = Fudge.getValue(1);
20021 // Extend everything to 80 bits to force it to be done on x87.
20022 // TODO: Are there any fast-math-flags to propagate here?
20024 SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
20025 {Chain, Fild, Fudge});
20026 // STRICT_FP_ROUND can't handle equal types.
20027 if (DstVT == MVT::f80)
20029 return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
20030 {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
20032 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
20033 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
20034 DAG.getIntPtrConstant(0, dl));
20037 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
20038 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
20039 // just return an SDValue().
20040 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
20041 // to i16, i32 or i64, and we lower it to a legal sequence and return the
20044 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
20045 bool IsSigned, SDValue &Chain) const {
20046 bool IsStrict = Op->isStrictFPOpcode();
20049 EVT DstTy = Op.getValueType();
20050 SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
20051 EVT TheVT = Value.getValueType();
20052 auto PtrVT = getPointerTy(DAG.getDataLayout());
20054 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
20055 // f16 must be promoted before using the lowering in this routine.
20056 // fp128 does not use this lowering.
20060 // If using FIST to compute an unsigned i64, we'll need some fixup
20061 // to handle values above the maximum signed i64. A FIST is always
20062 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
20063 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
20065 // FIXME: This does not generate an invalid exception if the input does not
20066 // fit in i32. PR44019
20067 if (!IsSigned && DstTy != MVT::i64) {
20068 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
20069 // The low 32 bits of the fist result will have the correct uint32 result.
20070 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
20074 assert(DstTy.getSimpleVT() <= MVT::i64 &&
20075 DstTy.getSimpleVT() >= MVT::i16 &&
20076 "Unknown FP_TO_INT to lower!");
20078 // We lower FP->int64 into FISTP64 followed by a load from a temporary
20080 MachineFunction &MF = DAG.getMachineFunction();
20081 unsigned MemSize = DstTy.getStoreSize();
20083 MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
20084 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
20086 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
20088 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
20090 if (UnsignedFixup) {
20092 // Conversion to unsigned i64 is implemented with a select,
20093 // depending on whether the source value fits in the range
20094 // of a signed i64. Let Thresh be the FP equivalent of
20095 // 0x8000000000000000ULL.
20097 // Adjust = (Value < Thresh) ? 0 : 0x80000000;
20098 // FltOfs = (Value < Thresh) ? 0 : 0x80000000;
20099 // FistSrc = (Value - FltOfs);
20100 // Fist-to-mem64 FistSrc
20101 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
20102 // to XOR'ing the high 32 bits with Adjust.
20104 // Being a power of 2, Thresh is exactly representable in all FP formats.
20105 // For X87 we'd like to use the smallest FP type for this constant, but
20106 // for DAG type consistency we have to match the FP operand type.
20108 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
20109 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
20110 bool LosesInfo = false;
20111 if (TheVT == MVT::f64)
20112 // The rounding mode is irrelevant as the conversion should be exact.
20113 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
20115 else if (TheVT == MVT::f80)
20116 Status = Thresh.convert(APFloat::x87DoubleExtended(),
20117 APFloat::rmNearestTiesToEven, &LosesInfo);
20119 assert(Status == APFloat::opOK && !LosesInfo &&
20120 "FP conversion should have been exact");
20122 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
20124 EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
20125 *DAG.getContext(), TheVT);
20128 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT,
20129 Chain, /*IsSignaling*/ true);
20130 Chain = Cmp.getValue(1);
20132 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT);
20135 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
20136 DAG.getConstant(0, DL, MVT::i64),
20137 DAG.getConstant(APInt::getSignMask(64),
20139 SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp,
20140 DAG.getConstantFP(0.0, DL, TheVT),
20144 Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
20145 { Chain, Value, FltOfs });
20146 Chain = Value.getValue(1);
20148 Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
20151 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
20153 // FIXME This causes a redundant load/store if the SSE-class value is already
20154 // in memory, such as if it is on the callstack.
20155 if (isScalarFPTypeInSSEReg(TheVT)) {
20156 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
20157 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
20158 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
20159 SDValue Ops[] = { Chain, StackSlot };
20161 unsigned FLDSize = TheVT.getStoreSize();
20162 assert(FLDSize <= MemSize && "Stack slot not big enough");
20163 MachineMemOperand *MMO = MF.getMachineMemOperand(
20164 MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
20165 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
20166 Chain = Value.getValue(1);
20169 // Build the FP_TO_INT*_IN_MEM
20170 MachineMemOperand *MMO = MF.getMachineMemOperand(
20171 MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
20172 SDValue Ops[] = { Chain, Value, StackSlot };
20173 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
20174 DAG.getVTList(MVT::Other),
20177 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
20178 Chain = Res.getValue(1);
20180 // If we need an unsigned fixup, XOR the result with adjust.
20182 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
20187 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
20188 const X86Subtarget &Subtarget) {
20189 MVT VT = Op.getSimpleValueType();
20190 SDValue In = Op.getOperand(0);
20191 MVT InVT = In.getSimpleValueType();
20193 unsigned Opc = Op.getOpcode();
20195 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
20196 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
20197 "Unexpected extension opcode");
20198 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20199 "Expected same number of elements");
20200 assert((VT.getVectorElementType() == MVT::i16 ||
20201 VT.getVectorElementType() == MVT::i32 ||
20202 VT.getVectorElementType() == MVT::i64) &&
20203 "Unexpected element type");
20204 assert((InVT.getVectorElementType() == MVT::i8 ||
20205 InVT.getVectorElementType() == MVT::i16 ||
20206 InVT.getVectorElementType() == MVT::i32) &&
20207 "Unexpected element type");
20209 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
20211 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
20212 assert(InVT == MVT::v32i8 && "Unexpected VT!");
20213 return splitVectorIntUnary(Op, DAG);
20216 if (Subtarget.hasInt256())
20219 // Optimize vectors in AVX mode:
20222 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
20223 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
20224 // Concat upper and lower parts.
20227 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
20228 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
20229 // Concat upper and lower parts.
20231 MVT HalfVT = VT.getHalfNumVectorElementsVT();
20232 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
20234 // Short-circuit if we can determine that each 128-bit half is the same value.
20235 // Otherwise, this is difficult to match and optimize.
20236 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
20237 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
20238 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
20240 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
20241 SDValue Undef = DAG.getUNDEF(InVT);
20242 bool NeedZero = Opc == ISD::ZERO_EXTEND;
20243 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
20244 OpHi = DAG.getBitcast(HalfVT, OpHi);
20246 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
20249 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
20250 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
20251 const SDLoc &dl, SelectionDAG &DAG) {
20252 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
20253 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20254 DAG.getIntPtrConstant(0, dl));
20255 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20256 DAG.getIntPtrConstant(8, dl));
20257 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
20258 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
20259 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
20260 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20263 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
20264 const X86Subtarget &Subtarget,
20265 SelectionDAG &DAG) {
20266 MVT VT = Op->getSimpleValueType(0);
20267 SDValue In = Op->getOperand(0);
20268 MVT InVT = In.getSimpleValueType();
20269 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20271 unsigned NumElts = VT.getVectorNumElements();
20273 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
20274 // avoids a constant pool load.
20275 if (VT.getVectorElementType() != MVT::i8) {
20276 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
20277 return DAG.getNode(ISD::SRL, DL, VT, Extend,
20278 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
20281 // Extend VT if BWI is not supported.
20283 if (!Subtarget.hasBWI()) {
20284 // If v16i32 is to be avoided, we'll need to split and concatenate.
20285 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20286 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
20288 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20291 // Widen to 512-bits if VLX is not supported.
20292 MVT WideVT = ExtVT;
20293 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20294 NumElts *= 512 / ExtVT.getSizeInBits();
20295 InVT = MVT::getVectorVT(MVT::i1, NumElts);
20296 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
20297 In, DAG.getIntPtrConstant(0, DL));
20298 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
20302 SDValue One = DAG.getConstant(1, DL, WideVT);
20303 SDValue Zero = DAG.getConstant(0, DL, WideVT);
20305 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
20307 // Truncate if we had to extend above.
20309 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
20310 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
20313 // Extract back to 128/256-bit if we widened.
20315 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
20316 DAG.getIntPtrConstant(0, DL));
20318 return SelectedVal;
20321 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20322 SelectionDAG &DAG) {
20323 SDValue In = Op.getOperand(0);
20324 MVT SVT = In.getSimpleValueType();
20326 if (SVT.getVectorElementType() == MVT::i1)
20327 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
20329 assert(Subtarget.hasAVX() && "Expected AVX support");
20330 return LowerAVXExtend(Op, DAG, Subtarget);
20333 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
20334 /// It makes use of the fact that vectors with enough leading sign/zero bits
20335 /// prevent the PACKSS/PACKUS from saturating the results.
20336 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
20337 /// within each 128-bit lane.
20338 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
20339 const SDLoc &DL, SelectionDAG &DAG,
20340 const X86Subtarget &Subtarget) {
20341 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
20342 "Unexpected PACK opcode");
20343 assert(DstVT.isVector() && "VT not a vector?");
20345 // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
20346 if (!Subtarget.hasSSE2())
20349 EVT SrcVT = In.getValueType();
20351 // No truncation required, we might get here due to recursive calls.
20352 if (SrcVT == DstVT)
20355 // We only support vector truncation to 64bits or greater from a
20356 // 128bits or greater source.
20357 unsigned DstSizeInBits = DstVT.getSizeInBits();
20358 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
20359 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
20362 unsigned NumElems = SrcVT.getVectorNumElements();
20363 if (!isPowerOf2_32(NumElems))
20366 LLVMContext &Ctx = *DAG.getContext();
20367 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
20368 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
20370 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
20372 // Pack to the largest type possible:
20373 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
20374 EVT InVT = MVT::i16, OutVT = MVT::i8;
20375 if (SrcVT.getScalarSizeInBits() > 16 &&
20376 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
20381 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
20382 if (SrcVT.is128BitVector()) {
20383 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
20384 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
20385 In = DAG.getBitcast(InVT, In);
20386 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, DAG.getUNDEF(InVT));
20387 Res = extractSubVector(Res, 0, DAG, DL, 64);
20388 return DAG.getBitcast(DstVT, Res);
20391 // Split lower/upper subvectors.
20393 std::tie(Lo, Hi) = splitVector(In, DAG, DL);
20395 unsigned SubSizeInBits = SrcSizeInBits / 2;
20396 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
20397 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
20399 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
20400 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
20401 Lo = DAG.getBitcast(InVT, Lo);
20402 Hi = DAG.getBitcast(InVT, Hi);
20403 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20404 return DAG.getBitcast(DstVT, Res);
20407 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
20408 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
20409 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
20410 Lo = DAG.getBitcast(InVT, Lo);
20411 Hi = DAG.getBitcast(InVT, Hi);
20412 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20414 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
20415 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
20416 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
20417 SmallVector<int, 64> Mask;
20418 int Scale = 64 / OutVT.getScalarSizeInBits();
20419 narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
20420 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
20422 if (DstVT.is256BitVector())
20423 return DAG.getBitcast(DstVT, Res);
20425 // If 512bit -> 128bit truncate another stage.
20426 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20427 Res = DAG.getBitcast(PackedVT, Res);
20428 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20431 // Recursively pack lower/upper subvectors, concat result and pack again.
20432 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
20433 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
20434 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
20435 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
20437 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20438 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
20439 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20442 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
20443 const X86Subtarget &Subtarget) {
20446 MVT VT = Op.getSimpleValueType();
20447 SDValue In = Op.getOperand(0);
20448 MVT InVT = In.getSimpleValueType();
20450 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
20452 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
20453 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
20454 if (InVT.getScalarSizeInBits() <= 16) {
20455 if (Subtarget.hasBWI()) {
20456 // legal, will go to VPMOVB2M, VPMOVW2M
20457 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20458 // We need to shift to get the lsb into sign position.
20459 // Shift packed bytes not supported natively, bitcast to word
20460 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
20461 In = DAG.getNode(ISD::SHL, DL, ExtVT,
20462 DAG.getBitcast(ExtVT, In),
20463 DAG.getConstant(ShiftInx, DL, ExtVT));
20464 In = DAG.getBitcast(InVT, In);
20466 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
20469 // Use TESTD/Q, extended vector to packed dword/qword.
20470 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
20471 "Unexpected vector type.");
20472 unsigned NumElts = InVT.getVectorNumElements();
20473 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
20474 // We need to change to a wider element type that we have support for.
20475 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
20476 // For 16 element vectors we extend to v16i32 unless we are explicitly
20477 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
20478 // we need to split into two 8 element vectors which we can extend to v8i32,
20479 // truncate and concat the results. There's an additional complication if
20480 // the original type is v16i8. In that case we can't split the v16i8
20481 // directly, so we need to shuffle high elements to low and use
20482 // sign_extend_vector_inreg.
20483 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
20485 if (InVT == MVT::v16i8) {
20486 Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
20487 Hi = DAG.getVectorShuffle(
20489 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
20490 Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
20492 assert(InVT == MVT::v16i16 && "Unexpected VT!");
20493 Lo = extract128BitVector(In, 0, DAG, DL);
20494 Hi = extract128BitVector(In, 8, DAG, DL);
20496 // We're split now, just emit two truncates and a concat. The two
20497 // truncates will trigger legalization to come back to this function.
20498 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
20499 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
20500 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20502 // We either have 8 elements or we're allowed to use 512-bit vectors.
20503 // If we have VLX, we want to use the narrowest vector that can get the
20504 // job done so we use vXi32.
20505 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
20506 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
20507 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
20509 ShiftInx = InVT.getScalarSizeInBits() - 1;
20512 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20513 // We need to shift to get the lsb into sign position.
20514 In = DAG.getNode(ISD::SHL, DL, InVT, In,
20515 DAG.getConstant(ShiftInx, DL, InVT));
20517 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
20518 if (Subtarget.hasDQI())
20519 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
20520 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
20523 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
20525 MVT VT = Op.getSimpleValueType();
20526 SDValue In = Op.getOperand(0);
20527 MVT InVT = In.getSimpleValueType();
20528 unsigned InNumEltBits = InVT.getScalarSizeInBits();
20530 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20531 "Invalid TRUNCATE operation");
20533 // If we're called by the type legalizer, handle a few cases.
20534 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20535 if (!TLI.isTypeLegal(InVT)) {
20536 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
20537 VT.is128BitVector()) {
20538 assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
20539 "Unexpected subtarget!");
20540 // The default behavior is to truncate one step, concatenate, and then
20541 // truncate the remainder. We'd rather produce two 64-bit results and
20542 // concatenate those.
20544 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
20547 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
20549 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
20550 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
20551 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20554 // Otherwise let default legalization handle it.
20558 if (VT.getVectorElementType() == MVT::i1)
20559 return LowerTruncateVecI1(Op, DAG, Subtarget);
20561 // vpmovqb/w/d, vpmovdb/w, vpmovwb
20562 if (Subtarget.hasAVX512()) {
20563 if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
20564 assert(VT == MVT::v32i8 && "Unexpected VT!");
20565 return splitVectorIntUnary(Op, DAG);
20568 // word to byte only under BWI. Otherwise we have to promoted to v16i32
20569 // and then truncate that. But we should only do that if we haven't been
20570 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
20571 // handled by isel patterns.
20572 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
20573 Subtarget.canExtendTo512DQ())
20577 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
20578 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
20580 // Truncate with PACKUS if we are truncating a vector with leading zero bits
20581 // that extend all the way to the packed/truncated value.
20582 // Pre-SSE41 we can only use PACKUSWB.
20583 KnownBits Known = DAG.computeKnownBits(In);
20584 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
20586 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
20589 // Truncate with PACKSS if we are truncating a vector with sign-bits that
20590 // extend all the way to the packed/truncated value.
20591 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
20593 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
20596 // Handle truncation of V256 to V128 using shuffles.
20597 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
20599 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
20600 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
20601 if (Subtarget.hasInt256()) {
20602 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
20603 In = DAG.getBitcast(MVT::v8i32, In);
20604 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
20605 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
20606 DAG.getIntPtrConstant(0, DL));
20609 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20610 DAG.getIntPtrConstant(0, DL));
20611 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20612 DAG.getIntPtrConstant(2, DL));
20613 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20614 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20615 static const int ShufMask[] = {0, 2, 4, 6};
20616 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
20619 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
20620 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
20621 if (Subtarget.hasInt256()) {
20622 In = DAG.getBitcast(MVT::v32i8, In);
20624 // The PSHUFB mask:
20625 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
20626 -1, -1, -1, -1, -1, -1, -1, -1,
20627 16, 17, 20, 21, 24, 25, 28, 29,
20628 -1, -1, -1, -1, -1, -1, -1, -1 };
20629 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20630 In = DAG.getBitcast(MVT::v4i64, In);
20632 static const int ShufMask2[] = {0, 2, -1, -1};
20633 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
20634 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20635 DAG.getIntPtrConstant(0, DL));
20636 return DAG.getBitcast(VT, In);
20639 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20640 DAG.getIntPtrConstant(0, DL));
20642 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20643 DAG.getIntPtrConstant(4, DL));
20645 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
20646 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
20648 // The PSHUFB mask:
20649 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
20650 -1, -1, -1, -1, -1, -1, -1, -1};
20652 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
20653 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
20655 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20656 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20658 // The MOVLHPS Mask:
20659 static const int ShufMask2[] = {0, 1, 4, 5};
20660 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
20661 return DAG.getBitcast(MVT::v8i16, res);
20664 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
20665 // Use an AND to zero uppper bits for PACKUS.
20666 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
20668 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20669 DAG.getIntPtrConstant(0, DL));
20670 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20671 DAG.getIntPtrConstant(8, DL));
20672 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
20675 llvm_unreachable("All 256->128 cases should have been handled above!");
20678 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20679 bool IsStrict = Op->isStrictFPOpcode();
20680 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20681 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20682 MVT VT = Op->getSimpleValueType(0);
20683 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20684 MVT SrcVT = Src.getSimpleValueType();
20687 if (VT.isVector()) {
20688 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20689 MVT ResVT = MVT::v4i32;
20690 MVT TruncVT = MVT::v4i1;
20693 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20695 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20697 if (!IsSigned && !Subtarget.hasVLX()) {
20698 assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20699 // Widen to 512-bits.
20700 ResVT = MVT::v8i32;
20701 TruncVT = MVT::v8i1;
20702 Opc = Op.getOpcode();
20703 // Need to concat with zero vector for strict fp to avoid spurious
20705 // TODO: Should we just do this for non-strict as well?
20706 SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20707 : DAG.getUNDEF(MVT::v8f64);
20708 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20709 DAG.getIntPtrConstant(0, dl));
20711 SDValue Res, Chain;
20714 DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Op->getOperand(0), Src});
20715 Chain = Res.getValue(1);
20717 Res = DAG.getNode(Opc, dl, ResVT, Src);
20720 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20721 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20722 DAG.getIntPtrConstant(0, dl));
20724 return DAG.getMergeValues({Res, Chain}, dl);
20728 // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20729 if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20730 assert(!IsSigned && "Expected unsigned conversion!");
20731 assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20735 // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20736 if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20737 (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32)) {
20738 assert(!IsSigned && "Expected unsigned conversion!");
20739 assert(Subtarget.useAVX512Regs() && !Subtarget.hasVLX() &&
20740 "Unexpected features!");
20741 MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20742 MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20743 // Need to concat with zero vector for strict fp to avoid spurious
20745 // TODO: Should we just do this for non-strict as well?
20747 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20748 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20749 DAG.getIntPtrConstant(0, dl));
20751 SDValue Res, Chain;
20753 Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20754 {Op->getOperand(0), Src});
20755 Chain = Res.getValue(1);
20757 Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20760 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20761 DAG.getIntPtrConstant(0, dl));
20764 return DAG.getMergeValues({Res, Chain}, dl);
20768 // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20769 if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20770 (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32)) {
20771 assert(Subtarget.useAVX512Regs() && Subtarget.hasDQI() &&
20772 !Subtarget.hasVLX() && "Unexpected features!");
20773 MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20774 // Need to concat with zero vector for strict fp to avoid spurious
20776 // TODO: Should we just do this for non-strict as well?
20778 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20779 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20780 DAG.getIntPtrConstant(0, dl));
20782 SDValue Res, Chain;
20784 Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20785 {Op->getOperand(0), Src});
20786 Chain = Res.getValue(1);
20788 Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20791 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20792 DAG.getIntPtrConstant(0, dl));
20795 return DAG.getMergeValues({Res, Chain}, dl);
20799 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
20800 if (!Subtarget.hasVLX()) {
20801 // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
20802 // legalizer and then widened again by vector op legalization.
20806 SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
20807 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
20808 {Src, Zero, Zero, Zero});
20809 Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20810 {Op->getOperand(0), Tmp});
20811 SDValue Chain = Tmp.getValue(1);
20812 Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
20813 DAG.getIntPtrConstant(0, dl));
20815 return DAG.getMergeValues({Tmp, Chain}, dl);
20819 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20820 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20821 DAG.getUNDEF(MVT::v2f32));
20823 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20824 : X86ISD::STRICT_CVTTP2UI;
20825 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20827 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20828 return DAG.getNode(Opc, dl, VT, Tmp);
20834 assert(!VT.isVector());
20836 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20838 if (!IsSigned && UseSSEReg) {
20839 // Conversions from f32/f64 with AVX512 should be legal.
20840 if (Subtarget.hasAVX512())
20843 // Use default expansion for i64.
20844 if (VT == MVT::i64)
20847 assert(VT == MVT::i32 && "Unexpected VT!");
20849 // Promote i32 to i64 and use a signed operation on 64-bit targets.
20850 // FIXME: This does not generate an invalid exception if the input does not
20851 // fit in i32. PR44019
20852 if (Subtarget.is64Bit()) {
20853 SDValue Res, Chain;
20855 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other},
20856 { Op.getOperand(0), Src });
20857 Chain = Res.getValue(1);
20859 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20861 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20863 return DAG.getMergeValues({ Res, Chain }, dl);
20867 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20868 // use fisttp which will be handled later.
20869 if (!Subtarget.hasSSE3())
20873 // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20874 // FIXME: This does not generate an invalid exception if the input does not
20875 // fit in i16. PR44019
20876 if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20877 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20878 SDValue Res, Chain;
20880 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other},
20881 { Op.getOperand(0), Src });
20882 Chain = Res.getValue(1);
20884 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20886 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20888 return DAG.getMergeValues({ Res, Chain }, dl);
20892 // If this is a FP_TO_SINT using SSEReg we're done.
20893 if (UseSSEReg && IsSigned)
20896 // fp128 needs to use a libcall.
20897 if (SrcVT == MVT::f128) {
20900 LC = RTLIB::getFPTOSINT(SrcVT, VT);
20902 LC = RTLIB::getFPTOUINT(SrcVT, VT);
20904 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20905 MakeLibCallOptions CallOptions;
20906 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
20910 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20915 // Fall back to X87.
20917 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
20919 return DAG.getMergeValues({V, Chain}, dl);
20923 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
20926 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
20927 SelectionDAG &DAG) const {
20928 SDValue Src = Op.getOperand(0);
20929 MVT SrcVT = Src.getSimpleValueType();
20931 // If the source is in an SSE register, the node is Legal.
20932 if (isScalarFPTypeInSSEReg(SrcVT))
20935 return LRINT_LLRINTHelper(Op.getNode(), DAG);
20938 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
20939 SelectionDAG &DAG) const {
20940 EVT DstVT = N->getValueType(0);
20941 SDValue Src = N->getOperand(0);
20942 EVT SrcVT = Src.getValueType();
20944 if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
20945 // f16 must be promoted before using the lowering in this routine.
20946 // fp128 does not use this lowering.
20951 SDValue Chain = DAG.getEntryNode();
20953 bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
20955 // If we're converting from SSE, the stack slot needs to hold both types.
20956 // Otherwise it only needs to hold the DstVT.
20957 EVT OtherVT = UseSSE ? SrcVT : DstVT;
20958 SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
20959 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
20960 MachinePointerInfo MPI =
20961 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
20964 assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
20965 Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
20966 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
20967 SDValue Ops[] = { Chain, StackPtr };
20969 Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
20970 /*Align*/ None, MachineMemOperand::MOLoad);
20971 Chain = Src.getValue(1);
20974 SDValue StoreOps[] = { Chain, Src, StackPtr };
20975 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
20976 StoreOps, DstVT, MPI, /*Align*/ None,
20977 MachineMemOperand::MOStore);
20979 return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
20982 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
20983 bool IsStrict = Op->isStrictFPOpcode();
20986 MVT VT = Op.getSimpleValueType();
20987 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20988 MVT SVT = In.getSimpleValueType();
20990 if (VT == MVT::f128) {
20991 RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
20992 return LowerF128Call(Op, DAG, LC);
20995 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
20998 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
21000 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21001 {Op->getOperand(0), Res});
21002 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21005 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
21006 bool IsStrict = Op->isStrictFPOpcode();
21008 MVT VT = Op.getSimpleValueType();
21009 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21010 MVT SVT = In.getSimpleValueType();
21012 // It's legal except when f128 is involved
21013 if (SVT != MVT::f128)
21016 RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
21018 // FP_ROUND node has a second operand indicating whether it is known to be
21019 // precise. That doesn't take part in the LibCall so we can't directly use
21023 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21024 MakeLibCallOptions CallOptions;
21025 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, In, CallOptions,
21029 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
21034 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
21035 bool IsStrict = Op->isStrictFPOpcode();
21036 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21037 assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
21041 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
21042 DAG.getConstant(0, dl, MVT::v8i16), Src,
21043 DAG.getIntPtrConstant(0, dl));
21047 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
21048 {Op.getOperand(0), Res});
21049 Chain = Res.getValue(1);
21051 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
21054 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
21055 DAG.getIntPtrConstant(0, dl));
21058 return DAG.getMergeValues({Res, Chain}, dl);
21063 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
21064 bool IsStrict = Op->isStrictFPOpcode();
21065 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21066 assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
21070 SDValue Res, Chain;
21072 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
21073 DAG.getConstantFP(0, dl, MVT::v4f32), Src,
21074 DAG.getIntPtrConstant(0, dl));
21076 X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
21077 {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
21078 Chain = Res.getValue(1);
21080 // FIXME: Should we use zeros for upper elements for non-strict?
21081 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
21082 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
21083 DAG.getTargetConstant(4, dl, MVT::i32));
21086 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
21087 DAG.getIntPtrConstant(0, dl));
21090 return DAG.getMergeValues({Res, Chain}, dl);
21095 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21096 /// vector operation in place of the typical scalar operation.
21097 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
21098 const X86Subtarget &Subtarget) {
21099 // If both operands have other uses, this is probably not profitable.
21100 SDValue LHS = Op.getOperand(0);
21101 SDValue RHS = Op.getOperand(1);
21102 if (!LHS.hasOneUse() && !RHS.hasOneUse())
21105 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
21106 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
21107 if (IsFP && !Subtarget.hasSSE3())
21109 if (!IsFP && !Subtarget.hasSSSE3())
21112 // Extract from a common vector.
21113 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21114 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21115 LHS.getOperand(0) != RHS.getOperand(0) ||
21116 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
21117 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
21118 !shouldUseHorizontalOp(true, DAG, Subtarget))
21121 // Allow commuted 'hadd' ops.
21122 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
21124 switch (Op.getOpcode()) {
21125 case ISD::ADD: HOpcode = X86ISD::HADD; break;
21126 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
21127 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
21128 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
21130 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
21132 unsigned LExtIndex = LHS.getConstantOperandVal(1);
21133 unsigned RExtIndex = RHS.getConstantOperandVal(1);
21134 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
21135 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
21136 std::swap(LExtIndex, RExtIndex);
21138 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
21141 SDValue X = LHS.getOperand(0);
21142 EVT VecVT = X.getValueType();
21143 unsigned BitWidth = VecVT.getSizeInBits();
21144 unsigned NumLanes = BitWidth / 128;
21145 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
21146 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
21147 "Not expecting illegal vector widths here");
21149 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
21150 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
21152 if (BitWidth == 256 || BitWidth == 512) {
21153 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
21154 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
21155 LExtIndex %= NumEltsPerLane;
21158 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
21159 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
21160 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
21161 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
21162 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
21163 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
21164 DAG.getIntPtrConstant(LExtIndex / 2, DL));
21167 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21168 /// vector operation in place of the typical scalar operation.
21169 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
21170 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
21171 "Only expecting float/double");
21172 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
21175 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
21176 /// This mode isn't supported in hardware on X86. But as long as we aren't
21177 /// compiling with trapping math, we can emulate this with
21178 /// floor(X + copysign(nextafter(0.5, 0.0), X)).
21179 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
21180 SDValue N0 = Op.getOperand(0);
21182 MVT VT = Op.getSimpleValueType();
21184 // N0 += copysign(nextafter(0.5, 0.0), N0)
21185 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21187 APFloat Point5Pred = APFloat(0.5f);
21188 Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
21189 Point5Pred.next(/*nextDown*/true);
21191 SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
21192 DAG.getConstantFP(Point5Pred, dl, VT), N0);
21193 N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
21195 // Truncate the result to remove fraction.
21196 return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
21199 /// The only differences between FABS and FNEG are the mask and the logic op.
21200 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
21201 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
21202 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
21203 "Wrong opcode for lowering FABS or FNEG.");
21205 bool IsFABS = (Op.getOpcode() == ISD::FABS);
21207 // If this is a FABS and it has an FNEG user, bail out to fold the combination
21208 // into an FNABS. We'll lower the FABS after that if it is still in use.
21210 for (SDNode *User : Op->uses())
21211 if (User->getOpcode() == ISD::FNEG)
21215 MVT VT = Op.getSimpleValueType();
21217 bool IsF128 = (VT == MVT::f128);
21218 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
21219 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
21220 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
21221 "Unexpected type in LowerFABSorFNEG");
21223 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
21224 // decide if we should generate a 16-byte constant mask when we only need 4 or
21225 // 8 bytes for the scalar case.
21227 // There are no scalar bitwise logical SSE/AVX instructions, so we
21228 // generate a 16-byte vector constant and logic op even for the scalar case.
21229 // Using a 16-byte mask allows folding the load of the mask with
21230 // the logic op, so it can save (~4 bytes) on code size.
21231 bool IsFakeVector = !VT.isVector() && !IsF128;
21234 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
21236 unsigned EltBits = VT.getScalarSizeInBits();
21237 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
21238 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
21239 APInt::getSignMask(EltBits);
21240 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21241 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
21243 SDValue Op0 = Op.getOperand(0);
21244 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
21245 unsigned LogicOp = IsFABS ? X86ISD::FAND :
21246 IsFNABS ? X86ISD::FOR :
21248 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
21250 if (VT.isVector() || IsF128)
21251 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21253 // For the scalar case extend to a 128-bit vector, perform the logic op,
21254 // and extract the scalar result back out.
21255 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
21256 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21257 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
21258 DAG.getIntPtrConstant(0, dl));
21261 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
21262 SDValue Mag = Op.getOperand(0);
21263 SDValue Sign = Op.getOperand(1);
21266 // If the sign operand is smaller, extend it first.
21267 MVT VT = Op.getSimpleValueType();
21268 if (Sign.getSimpleValueType().bitsLT(VT))
21269 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
21271 // And if it is bigger, shrink it first.
21272 if (Sign.getSimpleValueType().bitsGT(VT))
21273 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
21275 // At this point the operands and the result should have the same
21276 // type, and that won't be f80 since that is not custom lowered.
21277 bool IsF128 = (VT == MVT::f128);
21278 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
21279 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
21280 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
21281 "Unexpected type in LowerFCOPYSIGN");
21283 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21285 // Perform all scalar logic operations as 16-byte vectors because there are no
21286 // scalar FP logic instructions in SSE.
21287 // TODO: This isn't necessary. If we used scalar types, we might avoid some
21288 // unnecessary splats, but we might miss load folding opportunities. Should
21289 // this decision be based on OptimizeForSize?
21290 bool IsFakeVector = !VT.isVector() && !IsF128;
21293 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
21295 // The mask constants are automatically splatted for vector types.
21296 unsigned EltSizeInBits = VT.getScalarSizeInBits();
21297 SDValue SignMask = DAG.getConstantFP(
21298 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
21299 SDValue MagMask = DAG.getConstantFP(
21300 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
21302 // First, clear all bits but the sign bit from the second operand (sign).
21304 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
21305 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
21307 // Next, clear the sign bit from the first operand (magnitude).
21308 // TODO: If we had general constant folding for FP logic ops, this check
21309 // wouldn't be necessary.
21311 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
21312 APFloat APF = Op0CN->getValueAPF();
21314 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
21316 // If the magnitude operand wasn't a constant, we need to AND out the sign.
21318 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
21319 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
21322 // OR the magnitude value with the sign bit.
21323 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
21324 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
21325 DAG.getIntPtrConstant(0, dl));
21328 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
21329 SDValue N0 = Op.getOperand(0);
21331 MVT VT = Op.getSimpleValueType();
21333 MVT OpVT = N0.getSimpleValueType();
21334 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
21335 "Unexpected type for FGETSIGN");
21337 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
21338 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
21339 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
21340 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
21341 Res = DAG.getZExtOrTrunc(Res, dl, VT);
21342 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
21346 /// Helper for creating a X86ISD::SETCC node.
21347 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
21348 SelectionDAG &DAG) {
21349 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
21350 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
21353 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
21354 /// style scalarized (associative) reduction patterns. Partial reductions
21355 /// are supported when the pointer SrcMask is non-null.
21356 /// TODO - move this to SelectionDAG?
21357 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
21358 SmallVectorImpl<SDValue> &SrcOps,
21359 SmallVectorImpl<APInt> *SrcMask = nullptr) {
21360 SmallVector<SDValue, 8> Opnds;
21361 DenseMap<SDValue, APInt> SrcOpMap;
21362 EVT VT = MVT::Other;
21364 // Recognize a special case where a vector is casted into wide integer to
21366 assert(Op.getOpcode() == unsigned(BinOp) &&
21367 "Unexpected bit reduction opcode");
21368 Opnds.push_back(Op.getOperand(0));
21369 Opnds.push_back(Op.getOperand(1));
21371 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
21372 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
21373 // BFS traverse all BinOp operands.
21374 if (I->getOpcode() == unsigned(BinOp)) {
21375 Opnds.push_back(I->getOperand(0));
21376 Opnds.push_back(I->getOperand(1));
21377 // Re-evaluate the number of nodes to be traversed.
21378 e += 2; // 2 more nodes (LHS and RHS) are pushed.
21382 // Quit if a non-EXTRACT_VECTOR_ELT
21383 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
21386 // Quit if without a constant index.
21387 auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
21391 SDValue Src = I->getOperand(0);
21392 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
21393 if (M == SrcOpMap.end()) {
21394 VT = Src.getValueType();
21395 // Quit if not the same type.
21396 if (SrcOpMap.begin() != SrcOpMap.end() &&
21397 VT != SrcOpMap.begin()->first.getValueType())
21399 unsigned NumElts = VT.getVectorNumElements();
21400 APInt EltCount = APInt::getNullValue(NumElts);
21401 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
21402 SrcOps.push_back(Src);
21405 // Quit if element already used.
21406 unsigned CIdx = Idx->getZExtValue();
21407 if (M->second[CIdx])
21409 M->second.setBit(CIdx);
21413 // Collect the source partial masks.
21414 for (SDValue &SrcOp : SrcOps)
21415 SrcMask->push_back(SrcOpMap[SrcOp]);
21417 // Quit if not all elements are used.
21418 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
21419 E = SrcOpMap.end();
21421 if (!I->second.isAllOnesValue())
21429 // Helper function for comparing all bits of a vector against zero.
21430 static SDValue LowerVectorAllZero(const SDLoc &DL, SDValue V, ISD::CondCode CC,
21432 const X86Subtarget &Subtarget,
21433 SelectionDAG &DAG, X86::CondCode &X86CC) {
21434 EVT VT = V.getValueType();
21435 assert(Mask.getBitWidth() == VT.getScalarSizeInBits() &&
21436 "Element Mask vs Vector bitwidth mismatch");
21438 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
21439 X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
21441 auto MaskBits = [&](SDValue Src) {
21442 if (Mask.isAllOnesValue())
21444 EVT SrcVT = Src.getValueType();
21445 SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
21446 return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
21449 // For sub-128-bit vector, cast to (legal) integer and compare with zero.
21450 if (VT.getSizeInBits() < 128) {
21451 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
21452 if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT))
21454 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
21455 DAG.getBitcast(IntVT, MaskBits(V)),
21456 DAG.getConstant(0, DL, IntVT));
21459 // Quit if not splittable to 128/256-bit vector.
21460 if (!isPowerOf2_32(VT.getSizeInBits()))
21463 // Split down to 128/256-bit vector.
21464 unsigned TestSize = Subtarget.hasAVX() ? 256 : 128;
21465 while (VT.getSizeInBits() > TestSize) {
21466 auto Split = DAG.SplitVector(V, DL);
21467 VT = Split.first.getValueType();
21468 V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
21471 bool UsePTEST = Subtarget.hasSSE41();
21473 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
21474 V = DAG.getBitcast(TestVT, MaskBits(V));
21475 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
21478 // Without PTEST, a masked v2i64 or-reduction is not faster than
21480 if (!Mask.isAllOnesValue() && VT.getScalarSizeInBits() > 32)
21483 V = DAG.getBitcast(MVT::v16i8, MaskBits(V));
21484 V = DAG.getNode(X86ISD::PCMPEQ, DL, MVT::v16i8, V,
21485 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
21486 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
21487 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
21488 DAG.getConstant(0xFFFF, DL, MVT::i32));
21491 // Check whether an OR'd reduction tree is PTEST-able, or if we can fallback to
21492 // CMP(MOVMSK(PCMPEQB(X,0))).
21493 static SDValue MatchVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
21495 const X86Subtarget &Subtarget,
21496 SelectionDAG &DAG, SDValue &X86CC) {
21497 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
21499 if (!Subtarget.hasSSE2() || !Op->hasOneUse())
21502 // Check whether we're masking/truncating an OR-reduction result, in which
21503 // case track the masked bits.
21504 APInt Mask = APInt::getAllOnesValue(Op.getScalarValueSizeInBits());
21505 switch (Op.getOpcode()) {
21506 case ISD::TRUNCATE: {
21507 SDValue Src = Op.getOperand(0);
21508 Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
21509 Op.getScalarValueSizeInBits());
21514 if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
21515 Mask = Cst->getAPIntValue();
21516 Op = Op.getOperand(0);
21522 SmallVector<SDValue, 8> VecIns;
21523 if (Op.getOpcode() == ISD::OR && matchScalarReduction(Op, ISD::OR, VecIns)) {
21524 EVT VT = VecIns[0].getValueType();
21525 assert(llvm::all_of(VecIns,
21526 [VT](SDValue V) { return VT == V.getValueType(); }) &&
21527 "Reduction source vector mismatch");
21529 // Quit if less than 128-bits or not splittable to 128/256-bit vector.
21530 if (VT.getSizeInBits() < 128 || !isPowerOf2_32(VT.getSizeInBits()))
21533 // If more than one full vector is evaluated, OR them first before PTEST.
21534 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
21535 Slot += 2, e += 1) {
21536 // Each iteration will OR 2 nodes and append the result until there is
21537 // only 1 node left, i.e. the final OR'd value of all vectors.
21538 SDValue LHS = VecIns[Slot];
21539 SDValue RHS = VecIns[Slot + 1];
21540 VecIns.push_back(DAG.getNode(ISD::OR, DL, VT, LHS, RHS));
21543 X86::CondCode CCode;
21544 if (SDValue V = LowerVectorAllZero(DL, VecIns.back(), CC, Mask, Subtarget,
21546 X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
21551 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
21552 ISD::NodeType BinOp;
21553 if (SDValue Match =
21554 DAG.matchBinOpReduction(Op.getNode(), BinOp, {ISD::OR})) {
21555 X86::CondCode CCode;
21557 LowerVectorAllZero(DL, Match, CC, Mask, Subtarget, DAG, CCode)) {
21558 X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
21567 /// return true if \c Op has a use that doesn't just read flags.
21568 static bool hasNonFlagsUse(SDValue Op) {
21569 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
21571 SDNode *User = *UI;
21572 unsigned UOpNo = UI.getOperandNo();
21573 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
21574 // Look pass truncate.
21575 UOpNo = User->use_begin().getOperandNo();
21576 User = *User->use_begin();
21579 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
21580 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
21586 // Transform to an x86-specific ALU node with flags if there is a chance of
21587 // using an RMW op or only the flags are used. Otherwise, leave
21588 // the node alone and emit a 'cmp' or 'test' instruction.
21589 static bool isProfitableToUseFlagOp(SDValue Op) {
21590 for (SDNode *U : Op->uses())
21591 if (U->getOpcode() != ISD::CopyToReg &&
21592 U->getOpcode() != ISD::SETCC &&
21593 U->getOpcode() != ISD::STORE)
21599 /// Emit nodes that will be selected as "test Op0,Op0", or something
21601 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
21602 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
21603 // CF and OF aren't always set the way we want. Determine which
21604 // of these we need.
21605 bool NeedCF = false;
21606 bool NeedOF = false;
21609 case X86::COND_A: case X86::COND_AE:
21610 case X86::COND_B: case X86::COND_BE:
21613 case X86::COND_G: case X86::COND_GE:
21614 case X86::COND_L: case X86::COND_LE:
21615 case X86::COND_O: case X86::COND_NO: {
21616 // Check if we really need to set the
21617 // Overflow flag. If NoSignedWrap is present
21618 // that is not actually needed.
21619 switch (Op->getOpcode()) {
21624 if (Op.getNode()->getFlags().hasNoSignedWrap())
21634 // See if we can use the EFLAGS value from the operand instead of
21635 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
21636 // we prove that the arithmetic won't overflow, we can't use OF or CF.
21637 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
21638 // Emit a CMP with 0, which is the TEST pattern.
21639 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
21640 DAG.getConstant(0, dl, Op.getValueType()));
21642 unsigned Opcode = 0;
21643 unsigned NumOperands = 0;
21645 SDValue ArithOp = Op;
21647 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
21648 // which may be the result of a CAST. We use the variable 'Op', which is the
21649 // non-casted variable when we check for possible users.
21650 switch (ArithOp.getOpcode()) {
21652 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
21653 // because a TEST instruction will be better.
21654 if (!hasNonFlagsUse(Op))
21662 if (!isProfitableToUseFlagOp(Op))
21665 // Otherwise use a regular EFLAGS-setting instruction.
21666 switch (ArithOp.getOpcode()) {
21667 default: llvm_unreachable("unexpected operator!");
21668 case ISD::ADD: Opcode = X86ISD::ADD; break;
21669 case ISD::SUB: Opcode = X86ISD::SUB; break;
21670 case ISD::XOR: Opcode = X86ISD::XOR; break;
21671 case ISD::AND: Opcode = X86ISD::AND; break;
21672 case ISD::OR: Opcode = X86ISD::OR; break;
21682 return SDValue(Op.getNode(), 1);
21685 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
21686 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21687 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
21688 Op->getOperand(1)).getValue(1);
21695 // Emit a CMP with 0, which is the TEST pattern.
21696 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
21697 DAG.getConstant(0, dl, Op.getValueType()));
21699 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21700 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
21702 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
21703 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
21704 return SDValue(New.getNode(), 1);
21707 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
21709 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
21710 const SDLoc &dl, SelectionDAG &DAG,
21711 const X86Subtarget &Subtarget) {
21712 if (isNullConstant(Op1))
21713 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
21715 EVT CmpVT = Op0.getValueType();
21717 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
21718 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
21720 // Only promote the compare up to I32 if it is a 16 bit operation
21721 // with an immediate. 16 bit immediates are to be avoided.
21722 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
21723 !DAG.getMachineFunction().getFunction().hasMinSize()) {
21724 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
21725 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
21726 // Don't do this if the immediate can fit in 8-bits.
21727 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
21728 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
21729 unsigned ExtendOp =
21730 isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
21731 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
21732 // For equality comparisons try to use SIGN_EXTEND if the input was
21733 // truncate from something with enough sign bits.
21734 if (Op0.getOpcode() == ISD::TRUNCATE) {
21735 SDValue In = Op0.getOperand(0);
21737 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
21739 ExtendOp = ISD::SIGN_EXTEND;
21740 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
21741 SDValue In = Op1.getOperand(0);
21743 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
21745 ExtendOp = ISD::SIGN_EXTEND;
21750 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
21751 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
21755 // Try to shrink i64 compares if the input has enough zero bits.
21756 // FIXME: Do this for non-constant compares for constant on LHS?
21757 if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
21758 Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
21759 cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
21760 DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
21762 Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
21763 Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
21766 // 0-x == y --> x+y == 0
21767 // 0-x != y --> x+y != 0
21768 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
21769 Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
21770 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
21771 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
21772 return Add.getValue(1);
21775 // x == 0-y --> x+y == 0
21776 // x != 0-y --> x+y != 0
21777 if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
21778 Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
21779 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
21780 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
21781 return Add.getValue(1);
21784 // Use SUB instead of CMP to enable CSE between SUB and CMP.
21785 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
21786 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
21787 return Sub.getValue(1);
21790 /// Check if replacement of SQRT with RSQRT should be disabled.
21791 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
21792 EVT VT = Op.getValueType();
21794 // We never want to use both SQRT and RSQRT instructions for the same input.
21795 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
21799 return Subtarget.hasFastVectorFSQRT();
21800 return Subtarget.hasFastScalarFSQRT();
21803 /// The minimum architected relative accuracy is 2^-12. We need one
21804 /// Newton-Raphson step to have a good float result (24 bits of precision).
21805 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
21806 SelectionDAG &DAG, int Enabled,
21807 int &RefinementSteps,
21808 bool &UseOneConstNR,
21809 bool Reciprocal) const {
21810 EVT VT = Op.getValueType();
21812 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
21813 // It is likely not profitable to do this for f64 because a double-precision
21814 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
21815 // instructions: convert to single, rsqrtss, convert back to double, refine
21816 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
21817 // along with FMA, this could be a throughput win.
21818 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
21819 // after legalize types.
21820 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
21821 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
21822 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
21823 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
21824 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
21825 if (RefinementSteps == ReciprocalEstimate::Unspecified)
21826 RefinementSteps = 1;
21828 UseOneConstNR = false;
21829 // There is no FSQRT for 512-bits, but there is RSQRT14.
21830 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
21831 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
21836 /// The minimum architected relative accuracy is 2^-12. We need one
21837 /// Newton-Raphson step to have a good float result (24 bits of precision).
21838 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
21840 int &RefinementSteps) const {
21841 EVT VT = Op.getValueType();
21843 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
21844 // It is likely not profitable to do this for f64 because a double-precision
21845 // reciprocal estimate with refinement on x86 prior to FMA requires
21846 // 15 instructions: convert to single, rcpss, convert back to double, refine
21847 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
21848 // along with FMA, this could be a throughput win.
21850 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
21851 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
21852 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
21853 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
21854 // Enable estimate codegen with 1 refinement step for vector division.
21855 // Scalar division estimates are disabled because they break too much
21856 // real-world code. These defaults are intended to match GCC behavior.
21857 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
21860 if (RefinementSteps == ReciprocalEstimate::Unspecified)
21861 RefinementSteps = 1;
21863 // There is no FSQRT for 512-bits, but there is RCP14.
21864 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
21865 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
21870 /// If we have at least two divisions that use the same divisor, convert to
21871 /// multiplication by a reciprocal. This may need to be adjusted for a given
21872 /// CPU if a division's cost is not at least twice the cost of a multiplication.
21873 /// This is because we still need one division to calculate the reciprocal and
21874 /// then we need two multiplies by that reciprocal as replacements for the
21875 /// original divisions.
21876 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
21881 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
21883 SmallVectorImpl<SDNode *> &Created) const {
21884 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
21885 if (isIntDivCheap(N->getValueType(0), Attr))
21886 return SDValue(N,0); // Lower SDIV as SDIV
21888 assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
21889 "Unexpected divisor!");
21891 // Only perform this transform if CMOV is supported otherwise the select
21892 // below will become a branch.
21893 if (!Subtarget.hasCMov())
21896 // fold (sdiv X, pow2)
21897 EVT VT = N->getValueType(0);
21898 // FIXME: Support i8.
21899 if (VT != MVT::i16 && VT != MVT::i32 &&
21900 !(Subtarget.is64Bit() && VT == MVT::i64))
21903 unsigned Lg2 = Divisor.countTrailingZeros();
21905 // If the divisor is 2 or -2, the default expansion is better.
21910 SDValue N0 = N->getOperand(0);
21911 SDValue Zero = DAG.getConstant(0, DL, VT);
21912 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
21913 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
21915 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
21916 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
21917 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
21918 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
21920 Created.push_back(Cmp.getNode());
21921 Created.push_back(Add.getNode());
21922 Created.push_back(CMov.getNode());
21926 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
21928 // If we're dividing by a positive value, we're done. Otherwise, we must
21929 // negate the result.
21930 if (Divisor.isNonNegative())
21933 Created.push_back(SRA.getNode());
21934 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
21937 /// Result of 'and' is compared against zero. Change to a BT node if possible.
21938 /// Returns the BT node and the condition code needed to use it.
21939 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
21940 const SDLoc &dl, SelectionDAG &DAG,
21942 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
21943 SDValue Op0 = And.getOperand(0);
21944 SDValue Op1 = And.getOperand(1);
21945 if (Op0.getOpcode() == ISD::TRUNCATE)
21946 Op0 = Op0.getOperand(0);
21947 if (Op1.getOpcode() == ISD::TRUNCATE)
21948 Op1 = Op1.getOperand(0);
21950 SDValue Src, BitNo;
21951 if (Op1.getOpcode() == ISD::SHL)
21952 std::swap(Op0, Op1);
21953 if (Op0.getOpcode() == ISD::SHL) {
21954 if (isOneConstant(Op0.getOperand(0))) {
21955 // If we looked past a truncate, check that it's only truncating away
21957 unsigned BitWidth = Op0.getValueSizeInBits();
21958 unsigned AndBitWidth = And.getValueSizeInBits();
21959 if (BitWidth > AndBitWidth) {
21960 KnownBits Known = DAG.computeKnownBits(Op0);
21961 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
21965 BitNo = Op0.getOperand(1);
21967 } else if (Op1.getOpcode() == ISD::Constant) {
21968 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
21969 uint64_t AndRHSVal = AndRHS->getZExtValue();
21970 SDValue AndLHS = Op0;
21972 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
21973 Src = AndLHS.getOperand(0);
21974 BitNo = AndLHS.getOperand(1);
21976 // Use BT if the immediate can't be encoded in a TEST instruction or we
21977 // are optimizing for size and the immedaite won't fit in a byte.
21978 bool OptForSize = DAG.shouldOptForSize();
21979 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
21980 isPowerOf2_64(AndRHSVal)) {
21982 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
21983 Src.getValueType());
21988 // No patterns found, give up.
21989 if (!Src.getNode())
21992 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
21993 // instruction. Since the shift amount is in-range-or-undefined, we know
21994 // that doing a bittest on the i32 value is ok. We extend to i32 because
21995 // the encoding for the i16 version is larger than the i32 version.
21996 // Also promote i16 to i32 for performance / code size reason.
21997 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
21998 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
22000 // See if we can use the 32-bit instruction instead of the 64-bit one for a
22001 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
22002 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
22003 // known to be zero.
22004 if (Src.getValueType() == MVT::i64 &&
22005 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
22006 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
22008 // If the operand types disagree, extend the shift amount to match. Since
22009 // BT ignores high bits (like shifts) we can use anyextend.
22010 if (Src.getValueType() != BitNo.getValueType())
22011 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
22013 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
22015 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
22018 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
22020 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
22021 SDValue &Op1, bool &IsAlwaysSignaling) {
22025 // SSE Condition code mapping:
22034 switch (SetCCOpcode) {
22035 default: llvm_unreachable("Unexpected SETCC condition");
22037 case ISD::SETEQ: SSECC = 0; break;
22039 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
22041 case ISD::SETOLT: SSECC = 1; break;
22043 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
22045 case ISD::SETOLE: SSECC = 2; break;
22046 case ISD::SETUO: SSECC = 3; break;
22048 case ISD::SETNE: SSECC = 4; break;
22049 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
22050 case ISD::SETUGE: SSECC = 5; break;
22051 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
22052 case ISD::SETUGT: SSECC = 6; break;
22053 case ISD::SETO: SSECC = 7; break;
22054 case ISD::SETUEQ: SSECC = 8; break;
22055 case ISD::SETONE: SSECC = 12; break;
22058 std::swap(Op0, Op1);
22060 switch (SetCCOpcode) {
22062 IsAlwaysSignaling = true;
22072 IsAlwaysSignaling = false;
22079 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
22080 /// concatenate the result back.
22081 static SDValue splitIntVSETCC(SDValue Op, SelectionDAG &DAG) {
22082 EVT VT = Op.getValueType();
22084 assert(Op.getOpcode() == ISD::SETCC && "Unsupported operation");
22085 assert(Op.getOperand(0).getValueType().isInteger() &&
22086 VT == Op.getOperand(0).getValueType() && "Unsupported VTs!");
22089 SDValue CC = Op.getOperand(2);
22091 // Extract the LHS Lo/Hi vectors
22092 SDValue LHS1, LHS2;
22093 std::tie(LHS1, LHS2) = splitVector(Op.getOperand(0), DAG, dl);
22095 // Extract the RHS Lo/Hi vectors
22096 SDValue RHS1, RHS2;
22097 std::tie(RHS1, RHS2) = splitVector(Op.getOperand(1), DAG, dl);
22099 // Issue the operation on the smaller types and concatenate the result back
22101 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22102 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
22103 DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
22104 DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
22107 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
22109 SDValue Op0 = Op.getOperand(0);
22110 SDValue Op1 = Op.getOperand(1);
22111 SDValue CC = Op.getOperand(2);
22112 MVT VT = Op.getSimpleValueType();
22115 assert(VT.getVectorElementType() == MVT::i1 &&
22116 "Cannot set masked compare for this operation");
22118 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
22120 // Prefer SETGT over SETLT.
22121 if (SetCCOpcode == ISD::SETLT) {
22122 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
22123 std::swap(Op0, Op1);
22126 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
22129 /// Given a buildvector constant, return a new vector constant with each element
22130 /// incremented or decremented. If incrementing or decrementing would result in
22131 /// unsigned overflow or underflow or this is not a simple vector constant,
22132 /// return an empty value.
22133 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
22134 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
22138 MVT VT = V.getSimpleValueType();
22139 MVT EltVT = VT.getVectorElementType();
22140 unsigned NumElts = VT.getVectorNumElements();
22141 SmallVector<SDValue, 8> NewVecC;
22143 for (unsigned i = 0; i < NumElts; ++i) {
22144 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
22145 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
22148 // Avoid overflow/underflow.
22149 const APInt &EltC = Elt->getAPIntValue();
22150 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
22153 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
22156 return DAG.getBuildVector(VT, DL, NewVecC);
22159 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
22161 /// t = psubus Op0, Op1
22162 /// pcmpeq t, <0..0>
22163 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
22164 ISD::CondCode Cond, const SDLoc &dl,
22165 const X86Subtarget &Subtarget,
22166 SelectionDAG &DAG) {
22167 if (!Subtarget.hasSSE2())
22170 MVT VET = VT.getVectorElementType();
22171 if (VET != MVT::i8 && VET != MVT::i16)
22177 case ISD::SETULT: {
22178 // If the comparison is against a constant we can turn this into a
22179 // setule. With psubus, setule does not require a swap. This is
22180 // beneficial because the constant in the register is no longer
22181 // destructed as the destination so it can be hoisted out of a loop.
22182 // Only do this pre-AVX since vpcmp* is no longer destructive.
22183 if (Subtarget.hasAVX())
22185 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
22191 case ISD::SETUGT: {
22192 // If the comparison is against a constant, we can turn this into a setuge.
22193 // This is beneficial because materializing a constant 0 for the PCMPEQ is
22194 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
22195 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
22196 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
22203 // Psubus is better than flip-sign because it requires no inversion.
22205 std::swap(Op0, Op1);
22211 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
22212 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
22213 DAG.getConstant(0, dl, VT));
22216 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
22217 SelectionDAG &DAG) {
22218 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
22219 Op.getOpcode() == ISD::STRICT_FSETCCS;
22220 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
22221 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
22222 SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
22223 MVT VT = Op->getSimpleValueType(0);
22224 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
22225 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
22230 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
22231 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
22234 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
22235 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
22237 // If we have a strict compare with a vXi1 result and the input is 128/256
22238 // bits we can't use a masked compare unless we have VLX. If we use a wider
22239 // compare like we do for non-strict, we might trigger spurious exceptions
22240 // from the upper elements. Instead emit a AVX compare and convert to mask.
22242 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
22243 (!IsStrict || Subtarget.hasVLX() ||
22244 Op0.getSimpleValueType().is512BitVector())) {
22245 assert(VT.getVectorNumElements() <= 16);
22246 Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
22248 Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
22249 // The SSE/AVX packed FP comparison nodes are defined with a
22250 // floating-point vector result that matches the operand type. This allows
22251 // them to work with an SSE1 target (integer vector types are not legal).
22252 VT = Op0.getSimpleValueType();
22256 bool IsAlwaysSignaling;
22257 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
22258 if (!Subtarget.hasAVX()) {
22259 // TODO: We could use following steps to handle a quiet compare with
22260 // signaling encodings.
22261 // 1. Get ordered masks from a quiet ISD::SETO
22262 // 2. Use the masks to mask potential unordered elements in operand A, B
22263 // 3. Get the compare results of masked A, B
22264 // 4. Calculating final result using the mask and result from 3
22265 // But currently, we just fall back to scalar operations.
22266 if (IsStrict && IsAlwaysSignaling && !IsSignaling)
22269 // Insert an extra signaling instruction to raise exception.
22270 if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
22271 SDValue SignalCmp = DAG.getNode(
22272 Opc, dl, {VT, MVT::Other},
22273 {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
22274 // FIXME: It seems we need to update the flags of all new strict nodes.
22275 // Otherwise, mayRaiseFPException in MI will return false due to
22276 // NoFPExcept = false by default. However, I didn't find it in other
22278 SignalCmp->setFlags(Op->getFlags());
22279 Chain = SignalCmp.getValue(1);
22282 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
22283 // emit two comparisons and a logic op to tie them together.
22285 // LLVM predicate is SETUEQ or SETONE.
22287 unsigned CombineOpc;
22288 if (Cond == ISD::SETUEQ) {
22291 CombineOpc = X86ISD::FOR;
22293 assert(Cond == ISD::SETONE);
22296 CombineOpc = X86ISD::FAND;
22299 SDValue Cmp0, Cmp1;
22301 Cmp0 = DAG.getNode(
22302 Opc, dl, {VT, MVT::Other},
22303 {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
22304 Cmp1 = DAG.getNode(
22305 Opc, dl, {VT, MVT::Other},
22306 {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
22307 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
22310 Cmp0 = DAG.getNode(
22311 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
22312 Cmp1 = DAG.getNode(
22313 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
22315 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
22319 Opc, dl, {VT, MVT::Other},
22320 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
22321 Chain = Cmp.getValue(1);
22324 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
22327 // Handle all other FP comparisons here.
22329 // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
22330 SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
22332 Opc, dl, {VT, MVT::Other},
22333 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
22334 Chain = Cmp.getValue(1);
22337 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
22340 if (VT.getSizeInBits() > Op.getSimpleValueType().getSizeInBits()) {
22341 // We emitted a compare with an XMM/YMM result. Finish converting to a
22342 // mask register using a vptestm.
22343 EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
22344 Cmp = DAG.getBitcast(CastVT, Cmp);
22345 Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
22346 DAG.getConstant(0, dl, CastVT), ISD::SETNE);
22348 // If this is SSE/AVX CMPP, bitcast the result back to integer to match
22349 // the result type of SETCC. The bitcast is expected to be optimized
22350 // away during combining/isel.
22351 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
22355 return DAG.getMergeValues({Cmp, Chain}, dl);
22360 assert(!IsStrict && "Strict SETCC only handles FP operands.");
22362 MVT VTOp0 = Op0.getSimpleValueType();
22364 assert(VTOp0 == Op1.getSimpleValueType() &&
22365 "Expected operands with same type!");
22366 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
22367 "Invalid number of packed elements for source and destination!");
22369 // The non-AVX512 code below works under the assumption that source and
22370 // destination types are the same.
22371 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
22372 "Value types for source and destination must be the same!");
22374 // The result is boolean, but operands are int/float
22375 if (VT.getVectorElementType() == MVT::i1) {
22376 // In AVX-512 architecture setcc returns mask with i1 elements,
22377 // But there is no compare instruction for i8 and i16 elements in KNL.
22378 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
22379 "Unexpected operand type");
22380 return LowerIntVSETCC_AVX512(Op, DAG);
22383 // Lower using XOP integer comparisons.
22384 if (VT.is128BitVector() && Subtarget.hasXOP()) {
22385 // Translate compare code to XOP PCOM compare mode.
22386 unsigned CmpMode = 0;
22388 default: llvm_unreachable("Unexpected SETCC condition");
22390 case ISD::SETLT: CmpMode = 0x00; break;
22392 case ISD::SETLE: CmpMode = 0x01; break;
22394 case ISD::SETGT: CmpMode = 0x02; break;
22396 case ISD::SETGE: CmpMode = 0x03; break;
22397 case ISD::SETEQ: CmpMode = 0x04; break;
22398 case ISD::SETNE: CmpMode = 0x05; break;
22401 // Are we comparing unsigned or signed integers?
22403 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
22405 return DAG.getNode(Opc, dl, VT, Op0, Op1,
22406 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
22409 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
22410 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
22411 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
22412 SDValue BC0 = peekThroughBitcasts(Op0);
22413 if (BC0.getOpcode() == ISD::AND) {
22415 SmallVector<APInt, 64> EltBits;
22416 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
22417 VT.getScalarSizeInBits(), UndefElts,
22418 EltBits, false, false)) {
22419 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
22421 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
22427 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
22428 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
22429 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
22430 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
22431 if (C1 && C1->getAPIntValue().isPowerOf2()) {
22432 unsigned BitWidth = VT.getScalarSizeInBits();
22433 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
22435 SDValue Result = Op0.getOperand(0);
22436 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
22437 DAG.getConstant(ShiftAmt, dl, VT));
22438 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
22439 DAG.getConstant(BitWidth - 1, dl, VT));
22444 // Break 256-bit integer vector compare into smaller ones.
22445 if (VT.is256BitVector() && !Subtarget.hasInt256())
22446 return splitIntVSETCC(Op, DAG);
22448 if (VT == MVT::v32i16 || VT == MVT::v64i8) {
22449 assert(!Subtarget.hasBWI() && "Unexpected VT with AVX512BW!");
22450 return splitIntVSETCC(Op, DAG);
22453 // If this is a SETNE against the signed minimum value, change it to SETGT.
22454 // If this is a SETNE against the signed maximum value, change it to SETLT.
22455 // which will be swapped to SETGT.
22456 // Otherwise we use PCMPEQ+invert.
22458 if (Cond == ISD::SETNE &&
22459 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
22460 if (ConstValue.isMinSignedValue())
22462 else if (ConstValue.isMaxSignedValue())
22466 // If both operands are known non-negative, then an unsigned compare is the
22467 // same as a signed compare and there's no need to flip signbits.
22468 // TODO: We could check for more general simplifications here since we're
22469 // computing known bits.
22470 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
22471 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
22473 // Special case: Use min/max operations for unsigned compares.
22474 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22475 if (ISD::isUnsignedIntSetCC(Cond) &&
22476 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
22477 TLI.isOperationLegal(ISD::UMIN, VT)) {
22478 // If we have a constant operand, increment/decrement it and change the
22479 // condition to avoid an invert.
22480 if (Cond == ISD::SETUGT) {
22481 // X > C --> X >= (C+1) --> X == umax(X, C+1)
22482 if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
22484 Cond = ISD::SETUGE;
22487 if (Cond == ISD::SETULT) {
22488 // X < C --> X <= (C-1) --> X == umin(X, C-1)
22489 if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
22491 Cond = ISD::SETULE;
22494 bool Invert = false;
22497 default: llvm_unreachable("Unexpected condition code");
22498 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
22499 case ISD::SETULE: Opc = ISD::UMIN; break;
22500 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
22501 case ISD::SETUGE: Opc = ISD::UMAX; break;
22504 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
22505 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
22507 // If the logical-not of the result is required, perform that now.
22509 Result = DAG.getNOT(dl, Result, VT);
22514 // Try to use SUBUS and PCMPEQ.
22515 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
22518 // We are handling one of the integer comparisons here. Since SSE only has
22519 // GT and EQ comparisons for integer, swapping operands and multiple
22520 // operations may be required for some comparisons.
22521 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
22523 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
22524 Cond == ISD::SETGE || Cond == ISD::SETUGE;
22525 bool Invert = Cond == ISD::SETNE ||
22526 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
22529 std::swap(Op0, Op1);
22531 // Check that the operation in question is available (most are plain SSE2,
22532 // but PCMPGTQ and PCMPEQQ have different requirements).
22533 if (VT == MVT::v2i64) {
22534 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
22535 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
22537 // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
22538 // the odd elements over the even elements.
22539 if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
22540 Op0 = DAG.getConstant(0, dl, MVT::v4i32);
22541 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
22543 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
22544 static const int MaskHi[] = { 1, 1, 3, 3 };
22545 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
22547 return DAG.getBitcast(VT, Result);
22550 if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
22551 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
22552 Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
22554 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
22555 static const int MaskHi[] = { 1, 1, 3, 3 };
22556 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
22558 return DAG.getBitcast(VT, Result);
22561 // Since SSE has no unsigned integer comparisons, we need to flip the sign
22562 // bits of the inputs before performing those operations. The lower
22563 // compare is always unsigned.
22566 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
22568 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
22570 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
22571 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
22573 // Cast everything to the right type.
22574 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
22575 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
22577 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
22578 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
22579 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
22581 // Create masks for only the low parts/high parts of the 64 bit integers.
22582 static const int MaskHi[] = { 1, 1, 3, 3 };
22583 static const int MaskLo[] = { 0, 0, 2, 2 };
22584 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
22585 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
22586 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
22588 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
22589 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
22592 Result = DAG.getNOT(dl, Result, MVT::v4i32);
22594 return DAG.getBitcast(VT, Result);
22597 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
22598 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
22599 // pcmpeqd + pshufd + pand.
22600 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
22602 // First cast everything to the right type.
22603 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
22604 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
22607 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
22609 // Make sure the lower and upper halves are both all-ones.
22610 static const int Mask[] = { 1, 0, 3, 2 };
22611 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
22612 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
22615 Result = DAG.getNOT(dl, Result, MVT::v4i32);
22617 return DAG.getBitcast(VT, Result);
22621 // Since SSE has no unsigned integer comparisons, we need to flip the sign
22622 // bits of the inputs before performing those operations.
22624 MVT EltVT = VT.getVectorElementType();
22625 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
22627 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
22628 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
22631 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
22633 // If the logical-not of the result is required, perform that now.
22635 Result = DAG.getNOT(dl, Result, VT);
22640 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
22641 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
22642 const SDLoc &dl, SelectionDAG &DAG,
22643 const X86Subtarget &Subtarget,
22645 // Only support equality comparisons.
22646 if (CC != ISD::SETEQ && CC != ISD::SETNE)
22649 // Must be a bitcast from vXi1.
22650 if (Op0.getOpcode() != ISD::BITCAST)
22653 Op0 = Op0.getOperand(0);
22654 MVT VT = Op0.getSimpleValueType();
22655 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
22656 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
22657 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
22660 X86::CondCode X86Cond;
22661 if (isNullConstant(Op1)) {
22662 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
22663 } else if (isAllOnesConstant(Op1)) {
22664 // C flag is set for all ones.
22665 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
22669 // If the input is an AND, we can combine it's operands into the KTEST.
22670 bool KTestable = false;
22671 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
22673 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
22675 if (!isNullConstant(Op1))
22677 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
22678 SDValue LHS = Op0.getOperand(0);
22679 SDValue RHS = Op0.getOperand(1);
22680 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22681 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
22684 // If the input is an OR, we can combine it's operands into the KORTEST.
22687 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
22688 LHS = Op0.getOperand(0);
22689 RHS = Op0.getOperand(1);
22692 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22693 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
22696 /// Emit flags for the given setcc condition and operands. Also returns the
22697 /// corresponding X86 condition code constant in X86CC.
22698 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
22699 ISD::CondCode CC, const SDLoc &dl,
22701 SDValue &X86CC) const {
22702 // Optimize to BT if possible.
22703 // Lower (X & (1 << N)) == 0 to BT(X, N).
22704 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
22705 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
22706 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
22707 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22708 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
22712 // Try to use PTEST/PMOVMSKB for a tree ORs equality compared with 0.
22713 // TODO: We could do AND tree with all 1s as well by using the C flag.
22714 if (isNullConstant(Op1) && (CC == ISD::SETEQ || CC == ISD::SETNE))
22716 MatchVectorAllZeroTest(Op0, CC, dl, Subtarget, DAG, X86CC))
22719 // Try to lower using KORTEST or KTEST.
22720 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
22723 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
22725 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
22726 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22727 // If the input is a setcc, then reuse the input setcc or use a new one with
22728 // the inverted condition.
22729 if (Op0.getOpcode() == X86ISD::SETCC) {
22730 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
22732 X86CC = Op0.getOperand(0);
22734 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
22735 CCode = X86::GetOppositeBranchCondition(CCode);
22736 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22739 return Op0.getOperand(1);
22743 // Try to use the carry flag from the add in place of an separate CMP for:
22744 // (seteq (add X, -1), -1). Similar for setne.
22745 if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
22746 Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22747 if (isProfitableToUseFlagOp(Op0)) {
22748 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
22750 SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
22751 Op0.getOperand(1));
22752 DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
22753 X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
22754 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22755 return SDValue(New.getNode(), 1);
22759 X86::CondCode CondCode =
22760 TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
22761 assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
22763 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
22764 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
22768 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
22770 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
22771 Op.getOpcode() == ISD::STRICT_FSETCCS;
22772 MVT VT = Op->getSimpleValueType(0);
22774 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
22776 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
22777 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
22778 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
22779 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
22782 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
22784 // Handle f128 first, since one possible outcome is a normal integer
22785 // comparison which gets handled by emitFlagsForSetcc.
22786 if (Op0.getValueType() == MVT::f128) {
22787 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
22788 Op.getOpcode() == ISD::STRICT_FSETCCS);
22790 // If softenSetCCOperands returned a scalar, use it.
22791 if (!Op1.getNode()) {
22792 assert(Op0.getValueType() == Op.getValueType() &&
22793 "Unexpected setcc expansion!");
22795 return DAG.getMergeValues({Op0, Chain}, dl);
22800 if (Op0.getSimpleValueType().isInteger()) {
22802 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
22803 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
22804 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
22807 // Handle floating point.
22808 X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
22809 if (CondCode == X86::COND_INVALID)
22814 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
22816 DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
22817 dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
22818 Chain = EFLAGS.getValue(1);
22820 EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
22823 SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
22824 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
22825 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
22828 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
22829 SDValue LHS = Op.getOperand(0);
22830 SDValue RHS = Op.getOperand(1);
22831 SDValue Carry = Op.getOperand(2);
22832 SDValue Cond = Op.getOperand(3);
22835 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
22836 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
22838 // Recreate the carry if needed.
22839 EVT CarryVT = Carry.getValueType();
22840 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
22841 Carry, DAG.getAllOnesConstant(DL, CarryVT));
22843 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
22844 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
22845 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
22848 // This function returns three things: the arithmetic computation itself
22849 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
22850 // flag and the condition code define the case in which the arithmetic
22851 // computation overflows.
22852 static std::pair<SDValue, SDValue>
22853 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
22854 assert(Op.getResNo() == 0 && "Unexpected result number!");
22855 SDValue Value, Overflow;
22856 SDValue LHS = Op.getOperand(0);
22857 SDValue RHS = Op.getOperand(1);
22858 unsigned BaseOp = 0;
22860 switch (Op.getOpcode()) {
22861 default: llvm_unreachable("Unknown ovf instruction!");
22863 BaseOp = X86ISD::ADD;
22864 Cond = X86::COND_O;
22867 BaseOp = X86ISD::ADD;
22868 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
22871 BaseOp = X86ISD::SUB;
22872 Cond = X86::COND_O;
22875 BaseOp = X86ISD::SUB;
22876 Cond = X86::COND_B;
22879 BaseOp = X86ISD::SMUL;
22880 Cond = X86::COND_O;
22883 BaseOp = X86ISD::UMUL;
22884 Cond = X86::COND_O;
22889 // Also sets EFLAGS.
22890 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22891 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
22892 Overflow = Value.getValue(1);
22895 return std::make_pair(Value, Overflow);
22898 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
22899 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
22900 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
22901 // looks for this combo and may remove the "setcc" instruction if the "setcc"
22902 // has only one use.
22904 X86::CondCode Cond;
22905 SDValue Value, Overflow;
22906 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
22908 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
22909 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
22910 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
22913 /// Return true if opcode is a X86 logical comparison.
22914 static bool isX86LogicalCmp(SDValue Op) {
22915 unsigned Opc = Op.getOpcode();
22916 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
22917 Opc == X86ISD::FCMP)
22919 if (Op.getResNo() == 1 &&
22920 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
22921 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
22922 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
22928 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
22929 if (V.getOpcode() != ISD::TRUNCATE)
22932 SDValue VOp0 = V.getOperand(0);
22933 unsigned InBits = VOp0.getValueSizeInBits();
22934 unsigned Bits = V.getValueSizeInBits();
22935 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
22938 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
22939 bool AddTest = true;
22940 SDValue Cond = Op.getOperand(0);
22941 SDValue Op1 = Op.getOperand(1);
22942 SDValue Op2 = Op.getOperand(2);
22944 MVT VT = Op1.getSimpleValueType();
22947 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
22948 // are available or VBLENDV if AVX is available.
22949 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
22950 if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
22951 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
22952 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
22953 bool IsAlwaysSignaling;
22955 translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
22956 CondOp0, CondOp1, IsAlwaysSignaling);
22958 if (Subtarget.hasAVX512()) {
22960 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
22961 DAG.getTargetConstant(SSECC, DL, MVT::i8));
22962 assert(!VT.isVector() && "Not a scalar type?");
22963 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22966 if (SSECC < 8 || Subtarget.hasAVX()) {
22967 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
22968 DAG.getTargetConstant(SSECC, DL, MVT::i8));
22970 // If we have AVX, we can use a variable vector select (VBLENDV) instead
22971 // of 3 logic instructions for size savings and potentially speed.
22972 // Unfortunately, there is no scalar form of VBLENDV.
22974 // If either operand is a +0.0 constant, don't try this. We can expect to
22975 // optimize away at least one of the logic instructions later in that
22976 // case, so that sequence would be faster than a variable blend.
22978 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
22979 // uses XMM0 as the selection register. That may need just as many
22980 // instructions as the AND/ANDN/OR sequence due to register moves, so
22982 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
22983 !isNullFPConstant(Op2)) {
22984 // Convert to vectors, do a VSELECT, and convert back to scalar.
22985 // All of the conversions should be optimized away.
22986 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
22987 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
22988 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
22989 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
22991 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
22992 VCmp = DAG.getBitcast(VCmpVT, VCmp);
22994 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
22996 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
22997 VSel, DAG.getIntPtrConstant(0, DL));
22999 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
23000 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
23001 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
23005 // AVX512 fallback is to lower selects of scalar floats to masked moves.
23006 if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
23007 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
23008 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23011 if (Cond.getOpcode() == ISD::SETCC) {
23012 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
23014 // If the condition was updated, it's possible that the operands of the
23015 // select were also updated (for example, EmitTest has a RAUW). Refresh
23016 // the local references to the select operands in case they got stale.
23017 Op1 = Op.getOperand(1);
23018 Op2 = Op.getOperand(2);
23022 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
23023 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
23024 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
23025 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
23026 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
23027 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
23028 if (Cond.getOpcode() == X86ISD::SETCC &&
23029 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
23030 isNullConstant(Cond.getOperand(1).getOperand(1))) {
23031 SDValue Cmp = Cond.getOperand(1);
23032 SDValue CmpOp0 = Cmp.getOperand(0);
23033 unsigned CondCode = Cond.getConstantOperandVal(0);
23035 // Special handling for __builtin_ffs(X) - 1 pattern which looks like
23036 // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
23037 // handle to keep the CMP with 0. This should be removed by
23038 // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
23039 // cttz_zero_undef.
23040 auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
23041 return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
23042 Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
23044 if (Subtarget.hasCMov() && (VT == MVT::i32 || VT == MVT::i64) &&
23045 ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
23046 (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
23048 } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23049 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
23050 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
23052 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23053 SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
23055 // Apply further optimizations for special cases
23056 // (select (x != 0), -1, 0) -> neg & sbb
23057 // (select (x == 0), 0, -1) -> neg & sbb
23058 if (isNullConstant(Y) &&
23059 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
23060 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
23061 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
23062 Zero = DAG.getConstant(0, DL, Op.getValueType());
23063 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Neg.getValue(1));
23066 Cmp = DAG.getNode(X86ISD::SUB, DL, CmpVTs,
23067 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
23069 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
23070 SDValue Res = // Res = 0 or -1.
23071 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp.getValue(1));
23073 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
23074 Res = DAG.getNOT(DL, Res, Res.getValueType());
23076 return DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
23077 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
23078 Cmp.getOperand(0).getOpcode() == ISD::AND &&
23079 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
23080 SDValue Src1, Src2;
23081 // true if Op2 is XOR or OR operator and one of its operands
23083 // ( a , a op b) || ( b , a op b)
23084 auto isOrXorPattern = [&]() {
23085 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
23086 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
23088 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
23095 if (isOrXorPattern()) {
23097 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
23098 // we need mask of all zeros or ones with same size of the other
23100 if (CmpSz > VT.getSizeInBits())
23101 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
23102 else if (CmpSz < VT.getSizeInBits())
23103 Neg = DAG.getNode(ISD::AND, DL, VT,
23104 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
23105 DAG.getConstant(1, DL, VT));
23108 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
23109 Neg); // -(and (x, 0x1))
23110 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
23111 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
23116 // Look past (and (setcc_carry (cmp ...)), 1).
23117 if (Cond.getOpcode() == ISD::AND &&
23118 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
23119 isOneConstant(Cond.getOperand(1)))
23120 Cond = Cond.getOperand(0);
23122 // If condition flag is set by a X86ISD::CMP, then use it as the condition
23123 // setting operand in place of the X86ISD::SETCC.
23124 unsigned CondOpcode = Cond.getOpcode();
23125 if (CondOpcode == X86ISD::SETCC ||
23126 CondOpcode == X86ISD::SETCC_CARRY) {
23127 CC = Cond.getOperand(0);
23129 SDValue Cmp = Cond.getOperand(1);
23130 bool IllegalFPCMov = false;
23131 if (VT.isFloatingPoint() && !VT.isVector() &&
23132 !isScalarFPTypeInSSEReg(VT) && Subtarget.hasCMov()) // FPStack?
23133 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
23135 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
23136 Cmp.getOpcode() == X86ISD::BT) { // FIXME
23140 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
23141 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
23142 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
23144 X86::CondCode X86Cond;
23145 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
23147 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
23152 // Look past the truncate if the high bits are known zero.
23153 if (isTruncWithZeroHighBitsInput(Cond, DAG))
23154 Cond = Cond.getOperand(0);
23156 // We know the result of AND is compared against zero. Try to match
23158 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
23160 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
23169 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
23170 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
23173 // a < b ? -1 : 0 -> RES = ~setcc_carry
23174 // a < b ? 0 : -1 -> RES = setcc_carry
23175 // a >= b ? -1 : 0 -> RES = setcc_carry
23176 // a >= b ? 0 : -1 -> RES = ~setcc_carry
23177 if (Cond.getOpcode() == X86ISD::SUB) {
23178 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
23180 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
23181 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23182 (isNullConstant(Op1) || isNullConstant(Op2))) {
23184 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
23185 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
23186 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
23187 return DAG.getNOT(DL, Res, Res.getValueType());
23192 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
23193 // widen the cmov and push the truncate through. This avoids introducing a new
23194 // branch during isel and doesn't add any extensions.
23195 if (Op.getValueType() == MVT::i8 &&
23196 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
23197 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
23198 if (T1.getValueType() == T2.getValueType() &&
23199 // Exclude CopyFromReg to avoid partial register stalls.
23200 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
23201 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
23203 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
23207 // Or finally, promote i8 cmovs if we have CMOV,
23208 // or i16 cmovs if it won't prevent folding a load.
23209 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
23210 // legal, but EmitLoweredSelect() can not deal with these extensions
23211 // being inserted between two CMOV's. (in i16 case too TBN)
23212 // https://bugs.llvm.org/show_bug.cgi?id=40974
23213 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
23214 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
23215 !MayFoldLoad(Op2))) {
23216 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
23217 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
23218 SDValue Ops[] = { Op2, Op1, CC, Cond };
23219 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
23220 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
23223 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
23224 // condition is true.
23225 SDValue Ops[] = { Op2, Op1, CC, Cond };
23226 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
23229 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
23230 const X86Subtarget &Subtarget,
23231 SelectionDAG &DAG) {
23232 MVT VT = Op->getSimpleValueType(0);
23233 SDValue In = Op->getOperand(0);
23234 MVT InVT = In.getSimpleValueType();
23235 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
23236 MVT VTElt = VT.getVectorElementType();
23239 unsigned NumElts = VT.getVectorNumElements();
23241 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
23243 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
23244 // If v16i32 is to be avoided, we'll need to split and concatenate.
23245 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
23246 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
23248 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
23251 // Widen to 512-bits if VLX is not supported.
23252 MVT WideVT = ExtVT;
23253 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
23254 NumElts *= 512 / ExtVT.getSizeInBits();
23255 InVT = MVT::getVectorVT(MVT::i1, NumElts);
23256 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
23257 In, DAG.getIntPtrConstant(0, dl));
23258 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
23262 MVT WideEltVT = WideVT.getVectorElementType();
23263 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
23264 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
23265 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
23267 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
23268 SDValue Zero = DAG.getConstant(0, dl, WideVT);
23269 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
23272 // Truncate if we had to extend i16/i8 above.
23274 WideVT = MVT::getVectorVT(VTElt, NumElts);
23275 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
23278 // Extract back to 128/256-bit if we widened.
23280 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
23281 DAG.getIntPtrConstant(0, dl));
23286 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
23287 SelectionDAG &DAG) {
23288 SDValue In = Op->getOperand(0);
23289 MVT InVT = In.getSimpleValueType();
23291 if (InVT.getVectorElementType() == MVT::i1)
23292 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
23294 assert(Subtarget.hasAVX() && "Expected AVX support");
23295 return LowerAVXExtend(Op, DAG, Subtarget);
23298 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
23299 // For sign extend this needs to handle all vector sizes and SSE4.1 and
23300 // non-SSE4.1 targets. For zero extend this should only handle inputs of
23301 // MVT::v64i8 when BWI is not supported, but AVX512 is.
23302 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
23303 const X86Subtarget &Subtarget,
23304 SelectionDAG &DAG) {
23305 SDValue In = Op->getOperand(0);
23306 MVT VT = Op->getSimpleValueType(0);
23307 MVT InVT = In.getSimpleValueType();
23309 MVT SVT = VT.getVectorElementType();
23310 MVT InSVT = InVT.getVectorElementType();
23311 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
23313 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
23315 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
23317 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
23318 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
23319 !(VT.is512BitVector() && Subtarget.hasAVX512()))
23323 unsigned Opc = Op.getOpcode();
23324 unsigned NumElts = VT.getVectorNumElements();
23326 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
23327 // For 512-bit vectors, we need 128-bits or 256-bits.
23328 if (InVT.getSizeInBits() > 128) {
23329 // Input needs to be at least the same number of elements as output, and
23330 // at least 128-bits.
23331 int InSize = InSVT.getSizeInBits() * NumElts;
23332 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
23333 InVT = In.getSimpleValueType();
23336 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
23337 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
23338 // need to be handled here for 256/512-bit results.
23339 if (Subtarget.hasInt256()) {
23340 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
23342 if (InVT.getVectorNumElements() != NumElts)
23343 return DAG.getNode(Op.getOpcode(), dl, VT, In);
23345 // FIXME: Apparently we create inreg operations that could be regular
23348 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
23349 : ISD::ZERO_EXTEND;
23350 return DAG.getNode(ExtOpc, dl, VT, In);
23353 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
23354 if (Subtarget.hasAVX()) {
23355 assert(VT.is256BitVector() && "256-bit vector expected");
23356 MVT HalfVT = VT.getHalfNumVectorElementsVT();
23357 int HalfNumElts = HalfVT.getVectorNumElements();
23359 unsigned NumSrcElts = InVT.getVectorNumElements();
23360 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
23361 for (int i = 0; i != HalfNumElts; ++i)
23362 HiMask[i] = HalfNumElts + i;
23364 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
23365 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
23366 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
23367 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
23370 // We should only get here for sign extend.
23371 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
23372 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
23374 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
23376 SDValue SignExt = Curr;
23378 // As SRAI is only available on i16/i32 types, we expand only up to i32
23379 // and handle i64 separately.
23380 if (InVT != MVT::v4i32) {
23381 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
23383 unsigned DestWidth = DestVT.getScalarSizeInBits();
23384 unsigned Scale = DestWidth / InSVT.getSizeInBits();
23386 unsigned InNumElts = InVT.getVectorNumElements();
23387 unsigned DestElts = DestVT.getVectorNumElements();
23389 // Build a shuffle mask that takes each input element and places it in the
23390 // MSBs of the new element size.
23391 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
23392 for (unsigned i = 0; i != DestElts; ++i)
23393 Mask[i * Scale + (Scale - 1)] = i;
23395 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
23396 Curr = DAG.getBitcast(DestVT, Curr);
23398 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
23399 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
23400 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
23403 if (VT == MVT::v2i64) {
23404 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
23405 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
23406 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
23407 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
23408 SignExt = DAG.getBitcast(VT, SignExt);
23414 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
23415 SelectionDAG &DAG) {
23416 MVT VT = Op->getSimpleValueType(0);
23417 SDValue In = Op->getOperand(0);
23418 MVT InVT = In.getSimpleValueType();
23421 if (InVT.getVectorElementType() == MVT::i1)
23422 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
23424 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
23425 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
23426 "Expected same number of elements");
23427 assert((VT.getVectorElementType() == MVT::i16 ||
23428 VT.getVectorElementType() == MVT::i32 ||
23429 VT.getVectorElementType() == MVT::i64) &&
23430 "Unexpected element type");
23431 assert((InVT.getVectorElementType() == MVT::i8 ||
23432 InVT.getVectorElementType() == MVT::i16 ||
23433 InVT.getVectorElementType() == MVT::i32) &&
23434 "Unexpected element type");
23436 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
23437 assert(InVT == MVT::v32i8 && "Unexpected VT!");
23438 return splitVectorIntUnary(Op, DAG);
23441 if (Subtarget.hasInt256())
23444 // Optimize vectors in AVX mode
23445 // Sign extend v8i16 to v8i32 and
23448 // Divide input vector into two parts
23449 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
23450 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
23451 // concat the vectors to original VT
23452 MVT HalfVT = VT.getHalfNumVectorElementsVT();
23453 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
23455 unsigned NumElems = InVT.getVectorNumElements();
23456 SmallVector<int,8> ShufMask(NumElems, -1);
23457 for (unsigned i = 0; i != NumElems/2; ++i)
23458 ShufMask[i] = i + NumElems/2;
23460 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
23461 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
23463 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
23466 /// Change a vector store into a pair of half-size vector stores.
23467 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
23468 SDValue StoredVal = Store->getValue();
23469 assert((StoredVal.getValueType().is256BitVector() ||
23470 StoredVal.getValueType().is512BitVector()) &&
23471 "Expecting 256/512-bit op");
23473 // Splitting volatile memory ops is not allowed unless the operation was not
23474 // legal to begin with. Assume the input store is legal (this transform is
23475 // only used for targets with AVX). Note: It is possible that we have an
23476 // illegal type like v2i128, and so we could allow splitting a volatile store
23477 // in that case if that is important.
23478 if (!Store->isSimple())
23482 SDValue Value0, Value1;
23483 std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
23484 unsigned HalfOffset = Value0.getValueType().getStoreSize();
23485 SDValue Ptr0 = Store->getBasePtr();
23486 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfOffset, DL);
23488 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
23489 Store->getOriginalAlign(),
23490 Store->getMemOperand()->getFlags());
23491 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
23492 Store->getPointerInfo().getWithOffset(HalfOffset),
23493 Store->getOriginalAlign(),
23494 Store->getMemOperand()->getFlags());
23495 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
23498 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
23500 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
23501 SelectionDAG &DAG) {
23502 SDValue StoredVal = Store->getValue();
23503 assert(StoreVT.is128BitVector() &&
23504 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
23505 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
23507 // Splitting volatile memory ops is not allowed unless the operation was not
23508 // legal to begin with. We are assuming the input op is legal (this transform
23509 // is only used for targets with AVX).
23510 if (!Store->isSimple())
23513 MVT StoreSVT = StoreVT.getScalarType();
23514 unsigned NumElems = StoreVT.getVectorNumElements();
23515 unsigned ScalarSize = StoreSVT.getStoreSize();
23518 SmallVector<SDValue, 4> Stores;
23519 for (unsigned i = 0; i != NumElems; ++i) {
23520 unsigned Offset = i * ScalarSize;
23521 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
23522 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
23523 DAG.getIntPtrConstant(i, DL));
23524 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
23525 Store->getPointerInfo().getWithOffset(Offset),
23526 Store->getOriginalAlign(),
23527 Store->getMemOperand()->getFlags());
23528 Stores.push_back(Ch);
23530 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
23533 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
23534 SelectionDAG &DAG) {
23535 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
23537 SDValue StoredVal = St->getValue();
23539 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
23540 if (StoredVal.getValueType().isVector() &&
23541 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
23542 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
23544 assert(!St->isTruncatingStore() && "Expected non-truncating store");
23545 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
23546 "Expected AVX512F without AVX512DQI");
23548 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
23549 DAG.getUNDEF(MVT::v16i1), StoredVal,
23550 DAG.getIntPtrConstant(0, dl));
23551 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
23552 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
23554 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
23555 St->getPointerInfo(), St->getOriginalAlign(),
23556 St->getMemOperand()->getFlags());
23559 if (St->isTruncatingStore())
23562 // If this is a 256-bit store of concatenated ops, we are better off splitting
23563 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
23564 // and each half can execute independently. Some cores would split the op into
23565 // halves anyway, so the concat (vinsertf128) is purely an extra op.
23566 MVT StoreVT = StoredVal.getSimpleValueType();
23567 if (StoreVT.is256BitVector() ||
23568 ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
23569 !Subtarget.hasBWI())) {
23570 SmallVector<SDValue, 4> CatOps;
23571 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
23572 return splitVectorStore(St, DAG);
23576 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23577 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
23579 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
23580 TargetLowering::TypeWidenVector && "Unexpected type action!");
23582 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
23583 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
23584 DAG.getUNDEF(StoreVT));
23586 if (Subtarget.hasSSE2()) {
23587 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
23589 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
23590 MVT CastVT = MVT::getVectorVT(StVT, 2);
23591 StoredVal = DAG.getBitcast(CastVT, StoredVal);
23592 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
23593 DAG.getIntPtrConstant(0, dl));
23595 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
23596 St->getPointerInfo(), St->getOriginalAlign(),
23597 St->getMemOperand()->getFlags());
23599 assert(Subtarget.hasSSE1() && "Expected SSE");
23600 SDVTList Tys = DAG.getVTList(MVT::Other);
23601 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
23602 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
23603 St->getMemOperand());
23606 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
23607 // may emit an illegal shuffle but the expansion is still better than scalar
23608 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
23609 // we'll emit a shuffle and a arithmetic shift.
23610 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
23611 // TODO: It is possible to support ZExt by zeroing the undef values during
23612 // the shuffle phase or after the shuffle.
23613 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
23614 SelectionDAG &DAG) {
23615 MVT RegVT = Op.getSimpleValueType();
23616 assert(RegVT.isVector() && "We only custom lower vector loads.");
23617 assert(RegVT.isInteger() &&
23618 "We only custom lower integer vector loads.");
23620 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
23623 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
23624 if (RegVT.getVectorElementType() == MVT::i1) {
23625 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
23626 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
23627 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
23628 "Expected AVX512F without AVX512DQI");
23630 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
23631 Ld->getPointerInfo(), Ld->getOriginalAlign(),
23632 Ld->getMemOperand()->getFlags());
23634 // Replace chain users with the new chain.
23635 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
23637 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
23638 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
23639 DAG.getBitcast(MVT::v16i1, Val),
23640 DAG.getIntPtrConstant(0, dl));
23641 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
23647 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
23648 /// each of which has no other use apart from the AND / OR.
23649 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
23650 Opc = Op.getOpcode();
23651 if (Opc != ISD::OR && Opc != ISD::AND)
23653 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
23654 Op.getOperand(0).hasOneUse() &&
23655 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
23656 Op.getOperand(1).hasOneUse());
23659 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
23660 SDValue Chain = Op.getOperand(0);
23661 SDValue Cond = Op.getOperand(1);
23662 SDValue Dest = Op.getOperand(2);
23665 if (Cond.getOpcode() == ISD::SETCC &&
23666 Cond.getOperand(0).getValueType() != MVT::f128) {
23667 SDValue LHS = Cond.getOperand(0);
23668 SDValue RHS = Cond.getOperand(1);
23669 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23671 // Special case for
23672 // setcc([su]{add,sub,mul}o == 0)
23673 // setcc([su]{add,sub,mul}o != 1)
23674 if (ISD::isOverflowIntrOpRes(LHS) &&
23675 (CC == ISD::SETEQ || CC == ISD::SETNE) &&
23676 (isNullConstant(RHS) || isOneConstant(RHS))) {
23677 SDValue Value, Overflow;
23678 X86::CondCode X86Cond;
23679 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
23681 if ((CC == ISD::SETEQ) == isNullConstant(RHS))
23682 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
23684 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23685 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23689 if (LHS.getSimpleValueType().isInteger()) {
23691 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
23692 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23696 if (CC == ISD::SETOEQ) {
23697 // For FCMP_OEQ, we can emit
23698 // two branches instead of an explicit AND instruction with a
23699 // separate test. However, we only do this if this block doesn't
23700 // have a fall-through edge, because this requires an explicit
23701 // jmp when the condition is false.
23702 if (Op.getNode()->hasOneUse()) {
23703 SDNode *User = *Op.getNode()->use_begin();
23704 // Look for an unconditional branch following this conditional branch.
23705 // We need this because we need to reverse the successors in order
23706 // to implement FCMP_OEQ.
23707 if (User->getOpcode() == ISD::BR) {
23708 SDValue FalseBB = User->getOperand(1);
23710 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
23711 assert(NewBR == User);
23716 DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
23717 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
23718 Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
23720 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23721 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23725 } else if (CC == ISD::SETUNE) {
23726 // For FCMP_UNE, we can emit
23727 // two branches instead of an explicit OR instruction with a
23729 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
23730 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
23732 DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
23733 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23734 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23737 X86::CondCode X86Cond =
23738 TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
23739 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
23740 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23741 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23746 if (ISD::isOverflowIntrOpRes(Cond)) {
23747 SDValue Value, Overflow;
23748 X86::CondCode X86Cond;
23749 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
23751 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23752 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23756 // Look past the truncate if the high bits are known zero.
23757 if (isTruncWithZeroHighBitsInput(Cond, DAG))
23758 Cond = Cond.getOperand(0);
23760 EVT CondVT = Cond.getValueType();
23762 // Add an AND with 1 if we don't already have one.
23763 if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
23765 DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
23767 SDValue LHS = Cond;
23768 SDValue RHS = DAG.getConstant(0, dl, CondVT);
23771 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
23772 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23776 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
23777 // Calls to _alloca are needed to probe the stack when allocating more than 4k
23778 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
23779 // that the guard pages used by the OS virtual memory manager are allocated in
23780 // correct sequence.
23782 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
23783 SelectionDAG &DAG) const {
23784 MachineFunction &MF = DAG.getMachineFunction();
23785 bool SplitStack = MF.shouldSplitStack();
23786 bool EmitStackProbeCall = hasStackProbeSymbol(MF);
23787 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
23788 SplitStack || EmitStackProbeCall;
23792 SDNode *Node = Op.getNode();
23793 SDValue Chain = Op.getOperand(0);
23794 SDValue Size = Op.getOperand(1);
23795 MaybeAlign Alignment(Op.getConstantOperandVal(2));
23796 EVT VT = Node->getValueType(0);
23798 // Chain the dynamic stack allocation so that it doesn't modify the stack
23799 // pointer when other instructions are using the stack.
23800 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
23802 bool Is64Bit = Subtarget.is64Bit();
23803 MVT SPTy = getPointerTy(DAG.getDataLayout());
23807 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23808 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
23809 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
23810 " not tell us which reg is the stack pointer!");
23812 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23813 const Align StackAlign = TFI.getStackAlign();
23814 if (hasInlineStackProbe(MF)) {
23815 MachineRegisterInfo &MRI = MF.getRegInfo();
23817 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23818 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23819 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23820 Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
23821 DAG.getRegister(Vreg, SPTy));
23823 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
23824 Chain = SP.getValue(1);
23825 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
23827 if (Alignment && *Alignment > StackAlign)
23829 DAG.getNode(ISD::AND, dl, VT, Result,
23830 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23831 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
23832 } else if (SplitStack) {
23833 MachineRegisterInfo &MRI = MF.getRegInfo();
23836 // The 64 bit implementation of segmented stacks needs to clobber both r10
23837 // r11. This makes it impossible to use it along with nested parameters.
23838 const Function &F = MF.getFunction();
23839 for (const auto &A : F.args()) {
23840 if (A.hasNestAttr())
23841 report_fatal_error("Cannot use segmented stacks with functions that "
23842 "have nested arguments.");
23846 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23847 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23848 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23849 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
23850 DAG.getRegister(Vreg, SPTy));
23852 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
23853 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
23854 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
23856 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23857 Register SPReg = RegInfo->getStackRegister();
23858 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
23859 Chain = SP.getValue(1);
23862 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
23863 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23864 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
23870 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
23871 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
23873 SDValue Ops[2] = {Result, Chain};
23874 return DAG.getMergeValues(Ops, dl);
23877 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
23878 MachineFunction &MF = DAG.getMachineFunction();
23879 auto PtrVT = getPointerTy(MF.getDataLayout());
23880 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23882 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23885 if (!Subtarget.is64Bit() ||
23886 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
23887 // vastart just stores the address of the VarArgsFrameIndex slot into the
23888 // memory location argument.
23889 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23890 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
23891 MachinePointerInfo(SV));
23895 // gp_offset (0 - 6 * 8)
23896 // fp_offset (48 - 48 + 8 * 16)
23897 // overflow_arg_area (point to parameters coming in memory).
23899 SmallVector<SDValue, 8> MemOps;
23900 SDValue FIN = Op.getOperand(1);
23902 SDValue Store = DAG.getStore(
23903 Op.getOperand(0), DL,
23904 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
23905 MachinePointerInfo(SV));
23906 MemOps.push_back(Store);
23909 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
23910 Store = DAG.getStore(
23911 Op.getOperand(0), DL,
23912 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
23913 MachinePointerInfo(SV, 4));
23914 MemOps.push_back(Store);
23916 // Store ptr to overflow_arg_area
23917 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
23918 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23920 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
23921 MemOps.push_back(Store);
23923 // Store ptr to reg_save_area.
23924 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
23925 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
23926 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
23927 Store = DAG.getStore(
23928 Op.getOperand(0), DL, RSFIN, FIN,
23929 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
23930 MemOps.push_back(Store);
23931 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
23934 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
23935 assert(Subtarget.is64Bit() &&
23936 "LowerVAARG only handles 64-bit va_arg!");
23937 assert(Op.getNumOperands() == 4);
23939 MachineFunction &MF = DAG.getMachineFunction();
23940 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
23941 // The Win64 ABI uses char* instead of a structure.
23942 return DAG.expandVAArg(Op.getNode());
23944 SDValue Chain = Op.getOperand(0);
23945 SDValue SrcPtr = Op.getOperand(1);
23946 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23947 unsigned Align = Op.getConstantOperandVal(3);
23950 EVT ArgVT = Op.getNode()->getValueType(0);
23951 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
23952 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
23955 // Decide which area this value should be read from.
23956 // TODO: Implement the AMD64 ABI in its entirety. This simple
23957 // selection mechanism works only for the basic types.
23958 assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
23959 if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
23960 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
23962 assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
23963 "Unhandled argument type in LowerVAARG");
23964 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
23967 if (ArgMode == 2) {
23968 // Sanity Check: Make sure using fp_offset makes sense.
23969 assert(!Subtarget.useSoftFloat() &&
23970 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
23971 Subtarget.hasSSE1());
23974 // Insert VAARG_64 node into the DAG
23975 // VAARG_64 returns two values: Variable Argument Address, Chain
23976 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
23977 DAG.getConstant(ArgMode, dl, MVT::i8),
23978 DAG.getConstant(Align, dl, MVT::i32)};
23979 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
23980 SDValue VAARG = DAG.getMemIntrinsicNode(
23981 X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
23982 /*Align=*/None, MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
23983 Chain = VAARG.getValue(1);
23985 // Load the next argument and return it
23986 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
23989 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
23990 SelectionDAG &DAG) {
23991 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
23992 // where a va_list is still an i8*.
23993 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
23994 if (Subtarget.isCallingConvWin64(
23995 DAG.getMachineFunction().getFunction().getCallingConv()))
23996 // Probably a Win64 va_copy.
23997 return DAG.expandVACopy(Op.getNode());
23999 SDValue Chain = Op.getOperand(0);
24000 SDValue DstPtr = Op.getOperand(1);
24001 SDValue SrcPtr = Op.getOperand(2);
24002 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
24003 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24006 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(24, DL),
24007 Align(8), /*isVolatile*/ false, false, false,
24008 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
24011 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
24012 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
24016 case X86ISD::VSHLI:
24017 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
24020 case X86ISD::VSRLI:
24021 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
24024 case X86ISD::VSRAI:
24025 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
24027 llvm_unreachable("Unknown target vector shift node");
24030 /// Handle vector element shifts where the shift amount is a constant.
24031 /// Takes immediate version of shift as input.
24032 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
24033 SDValue SrcOp, uint64_t ShiftAmt,
24034 SelectionDAG &DAG) {
24035 MVT ElementType = VT.getVectorElementType();
24037 // Bitcast the source vector to the output type, this is mainly necessary for
24038 // vXi8/vXi64 shifts.
24039 if (VT != SrcOp.getSimpleValueType())
24040 SrcOp = DAG.getBitcast(VT, SrcOp);
24042 // Fold this packed shift into its first operand if ShiftAmt is 0.
24046 // Check for ShiftAmt >= element width
24047 if (ShiftAmt >= ElementType.getSizeInBits()) {
24048 if (Opc == X86ISD::VSRAI)
24049 ShiftAmt = ElementType.getSizeInBits() - 1;
24051 return DAG.getConstant(0, dl, VT);
24054 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
24055 && "Unknown target vector shift-by-constant node");
24057 // Fold this packed vector shift into a build vector if SrcOp is a
24058 // vector of Constants or UNDEFs.
24059 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
24060 SmallVector<SDValue, 8> Elts;
24061 unsigned NumElts = SrcOp->getNumOperands();
24064 default: llvm_unreachable("Unknown opcode!");
24065 case X86ISD::VSHLI:
24066 for (unsigned i = 0; i != NumElts; ++i) {
24067 SDValue CurrentOp = SrcOp->getOperand(i);
24068 if (CurrentOp->isUndef()) {
24069 // Must produce 0s in the correct bits.
24070 Elts.push_back(DAG.getConstant(0, dl, ElementType));
24073 auto *ND = cast<ConstantSDNode>(CurrentOp);
24074 const APInt &C = ND->getAPIntValue();
24075 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
24078 case X86ISD::VSRLI:
24079 for (unsigned i = 0; i != NumElts; ++i) {
24080 SDValue CurrentOp = SrcOp->getOperand(i);
24081 if (CurrentOp->isUndef()) {
24082 // Must produce 0s in the correct bits.
24083 Elts.push_back(DAG.getConstant(0, dl, ElementType));
24086 auto *ND = cast<ConstantSDNode>(CurrentOp);
24087 const APInt &C = ND->getAPIntValue();
24088 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
24091 case X86ISD::VSRAI:
24092 for (unsigned i = 0; i != NumElts; ++i) {
24093 SDValue CurrentOp = SrcOp->getOperand(i);
24094 if (CurrentOp->isUndef()) {
24095 // All shifted in bits must be the same so use 0.
24096 Elts.push_back(DAG.getConstant(0, dl, ElementType));
24099 auto *ND = cast<ConstantSDNode>(CurrentOp);
24100 const APInt &C = ND->getAPIntValue();
24101 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
24106 return DAG.getBuildVector(VT, dl, Elts);
24109 return DAG.getNode(Opc, dl, VT, SrcOp,
24110 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
24113 /// Handle vector element shifts where the shift amount may or may not be a
24114 /// constant. Takes immediate version of shift as input.
24115 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
24116 SDValue SrcOp, SDValue ShAmt,
24117 const X86Subtarget &Subtarget,
24118 SelectionDAG &DAG) {
24119 MVT SVT = ShAmt.getSimpleValueType();
24120 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
24122 // Catch shift-by-constant.
24123 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
24124 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
24125 CShAmt->getZExtValue(), DAG);
24127 // Change opcode to non-immediate version.
24128 Opc = getTargetVShiftUniformOpcode(Opc, true);
24130 // Need to build a vector containing shift amount.
24131 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
24132 // +====================+============+=======================================+
24133 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
24134 // +====================+============+=======================================+
24135 // | i64 | Yes, No | Use ShAmt as lowest elt |
24136 // | i32 | Yes | zero-extend in-reg |
24137 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
24138 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
24139 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
24140 // +====================+============+=======================================+
24142 if (SVT == MVT::i64)
24143 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
24144 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
24145 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
24146 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
24147 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
24148 ShAmt = ShAmt.getOperand(0);
24149 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
24150 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
24151 if (Subtarget.hasSSE41())
24152 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
24153 MVT::v2i64, ShAmt);
24155 SDValue ByteShift = DAG.getTargetConstant(
24156 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
24157 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
24158 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
24160 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
24163 } else if (Subtarget.hasSSE41() &&
24164 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
24165 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
24166 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
24167 MVT::v2i64, ShAmt);
24169 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
24170 DAG.getUNDEF(SVT)};
24171 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
24174 // The return type has to be a 128-bit type with the same element
24175 // type as the input type.
24176 MVT EltVT = VT.getVectorElementType();
24177 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
24179 ShAmt = DAG.getBitcast(ShVT, ShAmt);
24180 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
24183 /// Return Mask with the necessary casting or extending
24184 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
24185 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
24186 const X86Subtarget &Subtarget, SelectionDAG &DAG,
24189 if (isAllOnesConstant(Mask))
24190 return DAG.getConstant(1, dl, MaskVT);
24191 if (X86::isZeroNode(Mask))
24192 return DAG.getConstant(0, dl, MaskVT);
24194 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
24196 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
24197 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
24198 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
24199 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
24201 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
24202 DAG.getConstant(0, dl, MVT::i32));
24203 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
24204 DAG.getConstant(1, dl, MVT::i32));
24206 Lo = DAG.getBitcast(MVT::v32i1, Lo);
24207 Hi = DAG.getBitcast(MVT::v32i1, Hi);
24209 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
24211 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
24212 Mask.getSimpleValueType().getSizeInBits());
24213 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
24214 // are extracted by EXTRACT_SUBVECTOR.
24215 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
24216 DAG.getBitcast(BitcastVT, Mask),
24217 DAG.getIntPtrConstant(0, dl));
24221 /// Return (and \p Op, \p Mask) for compare instructions or
24222 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
24223 /// necessary casting or extending for \p Mask when lowering masking intrinsics
24224 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
24225 SDValue PreservedSrc,
24226 const X86Subtarget &Subtarget,
24227 SelectionDAG &DAG) {
24228 MVT VT = Op.getSimpleValueType();
24229 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
24230 unsigned OpcodeSelect = ISD::VSELECT;
24233 if (isAllOnesConstant(Mask))
24236 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24238 if (PreservedSrc.isUndef())
24239 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
24240 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
24243 /// Creates an SDNode for a predicated scalar operation.
24244 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
24245 /// The mask is coming as MVT::i8 and it should be transformed
24246 /// to MVT::v1i1 while lowering masking intrinsics.
24247 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
24248 /// "X86select" instead of "vselect". We just can't create the "vselect" node
24249 /// for a scalar instruction.
24250 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
24251 SDValue PreservedSrc,
24252 const X86Subtarget &Subtarget,
24253 SelectionDAG &DAG) {
24255 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
24256 if (MaskConst->getZExtValue() & 0x1)
24259 MVT VT = Op.getSimpleValueType();
24262 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
24263 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
24264 DAG.getBitcast(MVT::v8i1, Mask),
24265 DAG.getIntPtrConstant(0, dl));
24266 if (Op.getOpcode() == X86ISD::FSETCCM ||
24267 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
24268 Op.getOpcode() == X86ISD::VFPCLASSS)
24269 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
24271 if (PreservedSrc.isUndef())
24272 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
24273 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
24276 static int getSEHRegistrationNodeSize(const Function *Fn) {
24277 if (!Fn->hasPersonalityFn())
24278 report_fatal_error(
24279 "querying registration node size for function without personality");
24280 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
24281 // WinEHStatePass for the full struct definition.
24282 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
24283 case EHPersonality::MSVC_X86SEH: return 24;
24284 case EHPersonality::MSVC_CXX: return 16;
24287 report_fatal_error(
24288 "can only recover FP for 32-bit MSVC EH personality functions");
24291 /// When the MSVC runtime transfers control to us, either to an outlined
24292 /// function or when returning to a parent frame after catching an exception, we
24293 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
24294 /// Here's the math:
24295 /// RegNodeBase = EntryEBP - RegNodeSize
24296 /// ParentFP = RegNodeBase - ParentFrameOffset
24297 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
24298 /// subtracting the offset (negative on x86) takes us back to the parent FP.
24299 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
24300 SDValue EntryEBP) {
24301 MachineFunction &MF = DAG.getMachineFunction();
24304 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24305 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
24307 // It's possible that the parent function no longer has a personality function
24308 // if the exceptional code was optimized away, in which case we just return
24309 // the incoming EBP.
24310 if (!Fn->hasPersonalityFn())
24313 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
24314 // registration, or the .set_setframe offset.
24315 MCSymbol *OffsetSym =
24316 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
24317 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
24318 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
24319 SDValue ParentFrameOffset =
24320 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
24322 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
24323 // prologue to RBP in the parent function.
24324 const X86Subtarget &Subtarget =
24325 static_cast<const X86Subtarget &>(DAG.getSubtarget());
24326 if (Subtarget.is64Bit())
24327 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
24329 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
24330 // RegNodeBase = EntryEBP - RegNodeSize
24331 // ParentFP = RegNodeBase - ParentFrameOffset
24332 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
24333 DAG.getConstant(RegNodeSize, dl, PtrVT));
24334 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
24337 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
24338 SelectionDAG &DAG) const {
24339 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
24340 auto isRoundModeCurDirection = [](SDValue Rnd) {
24341 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
24342 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
24346 auto isRoundModeSAE = [](SDValue Rnd) {
24347 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
24348 unsigned RC = C->getZExtValue();
24349 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
24350 // Clear the NO_EXC bit and check remaining bits.
24351 RC ^= X86::STATIC_ROUNDING::NO_EXC;
24352 // As a convenience we allow no other bits or explicitly
24353 // current direction.
24354 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
24360 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
24361 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
24362 RC = C->getZExtValue();
24363 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
24364 // Clear the NO_EXC bit and check remaining bits.
24365 RC ^= X86::STATIC_ROUNDING::NO_EXC;
24366 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
24367 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
24368 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
24369 RC == X86::STATIC_ROUNDING::TO_ZERO;
24377 unsigned IntNo = Op.getConstantOperandVal(0);
24378 MVT VT = Op.getSimpleValueType();
24379 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
24382 switch(IntrData->Type) {
24383 case INTR_TYPE_1OP: {
24384 // We specify 2 possible opcodes for intrinsics with rounding modes.
24385 // First, we check if the intrinsic may have non-default rounding mode,
24386 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24387 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24388 if (IntrWithRoundingModeOpcode != 0) {
24389 SDValue Rnd = Op.getOperand(2);
24391 if (isRoundModeSAEToX(Rnd, RC))
24392 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24394 DAG.getTargetConstant(RC, dl, MVT::i32));
24395 if (!isRoundModeCurDirection(Rnd))
24398 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24401 case INTR_TYPE_1OP_SAE: {
24402 SDValue Sae = Op.getOperand(2);
24405 if (isRoundModeCurDirection(Sae))
24406 Opc = IntrData->Opc0;
24407 else if (isRoundModeSAE(Sae))
24408 Opc = IntrData->Opc1;
24412 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
24414 case INTR_TYPE_2OP: {
24415 SDValue Src2 = Op.getOperand(2);
24417 // We specify 2 possible opcodes for intrinsics with rounding modes.
24418 // First, we check if the intrinsic may have non-default rounding mode,
24419 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24420 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24421 if (IntrWithRoundingModeOpcode != 0) {
24422 SDValue Rnd = Op.getOperand(3);
24424 if (isRoundModeSAEToX(Rnd, RC))
24425 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24426 Op.getOperand(1), Src2,
24427 DAG.getTargetConstant(RC, dl, MVT::i32));
24428 if (!isRoundModeCurDirection(Rnd))
24432 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24433 Op.getOperand(1), Src2);
24435 case INTR_TYPE_2OP_SAE: {
24436 SDValue Sae = Op.getOperand(3);
24439 if (isRoundModeCurDirection(Sae))
24440 Opc = IntrData->Opc0;
24441 else if (isRoundModeSAE(Sae))
24442 Opc = IntrData->Opc1;
24446 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
24449 case INTR_TYPE_3OP:
24450 case INTR_TYPE_3OP_IMM8: {
24451 SDValue Src1 = Op.getOperand(1);
24452 SDValue Src2 = Op.getOperand(2);
24453 SDValue Src3 = Op.getOperand(3);
24455 // We specify 2 possible opcodes for intrinsics with rounding modes.
24456 // First, we check if the intrinsic may have non-default rounding mode,
24457 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24458 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24459 if (IntrWithRoundingModeOpcode != 0) {
24460 SDValue Rnd = Op.getOperand(4);
24462 if (isRoundModeSAEToX(Rnd, RC))
24463 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24465 DAG.getTargetConstant(RC, dl, MVT::i32));
24466 if (!isRoundModeCurDirection(Rnd))
24470 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24471 {Src1, Src2, Src3});
24473 case INTR_TYPE_4OP:
24474 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
24475 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
24476 case INTR_TYPE_1OP_MASK: {
24477 SDValue Src = Op.getOperand(1);
24478 SDValue PassThru = Op.getOperand(2);
24479 SDValue Mask = Op.getOperand(3);
24480 // We add rounding mode to the Node when
24481 // - RC Opcode is specified and
24482 // - RC is not "current direction".
24483 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24484 if (IntrWithRoundingModeOpcode != 0) {
24485 SDValue Rnd = Op.getOperand(4);
24487 if (isRoundModeSAEToX(Rnd, RC))
24488 return getVectorMaskingNode(
24489 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24490 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
24491 Mask, PassThru, Subtarget, DAG);
24492 if (!isRoundModeCurDirection(Rnd))
24495 return getVectorMaskingNode(
24496 DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
24499 case INTR_TYPE_1OP_MASK_SAE: {
24500 SDValue Src = Op.getOperand(1);
24501 SDValue PassThru = Op.getOperand(2);
24502 SDValue Mask = Op.getOperand(3);
24503 SDValue Rnd = Op.getOperand(4);
24506 if (isRoundModeCurDirection(Rnd))
24507 Opc = IntrData->Opc0;
24508 else if (isRoundModeSAE(Rnd))
24509 Opc = IntrData->Opc1;
24513 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
24516 case INTR_TYPE_SCALAR_MASK: {
24517 SDValue Src1 = Op.getOperand(1);
24518 SDValue Src2 = Op.getOperand(2);
24519 SDValue passThru = Op.getOperand(3);
24520 SDValue Mask = Op.getOperand(4);
24521 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24522 // There are 2 kinds of intrinsics in this group:
24523 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
24524 // (2) With rounding mode and sae - 7 operands.
24525 bool HasRounding = IntrWithRoundingModeOpcode != 0;
24526 if (Op.getNumOperands() == (5U + HasRounding)) {
24528 SDValue Rnd = Op.getOperand(5);
24530 if (isRoundModeSAEToX(Rnd, RC))
24531 return getScalarMaskingNode(
24532 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
24533 DAG.getTargetConstant(RC, dl, MVT::i32)),
24534 Mask, passThru, Subtarget, DAG);
24535 if (!isRoundModeCurDirection(Rnd))
24538 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
24540 Mask, passThru, Subtarget, DAG);
24543 assert(Op.getNumOperands() == (6U + HasRounding) &&
24544 "Unexpected intrinsic form");
24545 SDValue RoundingMode = Op.getOperand(5);
24546 unsigned Opc = IntrData->Opc0;
24548 SDValue Sae = Op.getOperand(6);
24549 if (isRoundModeSAE(Sae))
24550 Opc = IntrWithRoundingModeOpcode;
24551 else if (!isRoundModeCurDirection(Sae))
24554 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
24555 Src2, RoundingMode),
24556 Mask, passThru, Subtarget, DAG);
24558 case INTR_TYPE_SCALAR_MASK_RND: {
24559 SDValue Src1 = Op.getOperand(1);
24560 SDValue Src2 = Op.getOperand(2);
24561 SDValue passThru = Op.getOperand(3);
24562 SDValue Mask = Op.getOperand(4);
24563 SDValue Rnd = Op.getOperand(5);
24567 if (isRoundModeCurDirection(Rnd))
24568 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
24569 else if (isRoundModeSAEToX(Rnd, RC))
24570 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
24571 DAG.getTargetConstant(RC, dl, MVT::i32));
24575 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
24577 case INTR_TYPE_SCALAR_MASK_SAE: {
24578 SDValue Src1 = Op.getOperand(1);
24579 SDValue Src2 = Op.getOperand(2);
24580 SDValue passThru = Op.getOperand(3);
24581 SDValue Mask = Op.getOperand(4);
24582 SDValue Sae = Op.getOperand(5);
24584 if (isRoundModeCurDirection(Sae))
24585 Opc = IntrData->Opc0;
24586 else if (isRoundModeSAE(Sae))
24587 Opc = IntrData->Opc1;
24591 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
24592 Mask, passThru, Subtarget, DAG);
24594 case INTR_TYPE_2OP_MASK: {
24595 SDValue Src1 = Op.getOperand(1);
24596 SDValue Src2 = Op.getOperand(2);
24597 SDValue PassThru = Op.getOperand(3);
24598 SDValue Mask = Op.getOperand(4);
24600 if (IntrData->Opc1 != 0) {
24601 SDValue Rnd = Op.getOperand(5);
24603 if (isRoundModeSAEToX(Rnd, RC))
24604 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
24605 DAG.getTargetConstant(RC, dl, MVT::i32));
24606 else if (!isRoundModeCurDirection(Rnd))
24610 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
24611 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
24613 case INTR_TYPE_2OP_MASK_SAE: {
24614 SDValue Src1 = Op.getOperand(1);
24615 SDValue Src2 = Op.getOperand(2);
24616 SDValue PassThru = Op.getOperand(3);
24617 SDValue Mask = Op.getOperand(4);
24619 unsigned Opc = IntrData->Opc0;
24620 if (IntrData->Opc1 != 0) {
24621 SDValue Sae = Op.getOperand(5);
24622 if (isRoundModeSAE(Sae))
24623 Opc = IntrData->Opc1;
24624 else if (!isRoundModeCurDirection(Sae))
24628 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
24629 Mask, PassThru, Subtarget, DAG);
24631 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
24632 SDValue Src1 = Op.getOperand(1);
24633 SDValue Src2 = Op.getOperand(2);
24634 SDValue Src3 = Op.getOperand(3);
24635 SDValue PassThru = Op.getOperand(4);
24636 SDValue Mask = Op.getOperand(5);
24637 SDValue Sae = Op.getOperand(6);
24639 if (isRoundModeCurDirection(Sae))
24640 Opc = IntrData->Opc0;
24641 else if (isRoundModeSAE(Sae))
24642 Opc = IntrData->Opc1;
24646 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
24647 Mask, PassThru, Subtarget, DAG);
24649 case INTR_TYPE_3OP_MASK_SAE: {
24650 SDValue Src1 = Op.getOperand(1);
24651 SDValue Src2 = Op.getOperand(2);
24652 SDValue Src3 = Op.getOperand(3);
24653 SDValue PassThru = Op.getOperand(4);
24654 SDValue Mask = Op.getOperand(5);
24656 unsigned Opc = IntrData->Opc0;
24657 if (IntrData->Opc1 != 0) {
24658 SDValue Sae = Op.getOperand(6);
24659 if (isRoundModeSAE(Sae))
24660 Opc = IntrData->Opc1;
24661 else if (!isRoundModeCurDirection(Sae))
24664 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
24665 Mask, PassThru, Subtarget, DAG);
24668 SDValue Src1 = Op.getOperand(1);
24669 SDValue Src2 = Op.getOperand(2);
24670 SDValue Src3 = Op.getOperand(3);
24672 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
24673 Src3 = DAG.getBitcast(MaskVT, Src3);
24675 // Reverse the operands to match VSELECT order.
24676 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
24679 SDValue Src1 = Op.getOperand(1);
24680 SDValue Src2 = Op.getOperand(2);
24682 // Swap Src1 and Src2 in the node creation
24683 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
24686 // NOTE: We need to swizzle the operands to pass the multiply operands
24688 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24689 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
24691 SDValue Src1 = Op.getOperand(1);
24692 SDValue Imm = Op.getOperand(2);
24693 SDValue Mask = Op.getOperand(3);
24694 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
24695 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
24697 // Need to fill with zeros to ensure the bitcast will produce zeroes
24698 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24699 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
24700 DAG.getConstant(0, dl, MVT::v8i1),
24701 FPclassMask, DAG.getIntPtrConstant(0, dl));
24702 return DAG.getBitcast(MVT::i8, Ins);
24705 case CMP_MASK_CC: {
24706 MVT MaskVT = Op.getSimpleValueType();
24707 SDValue CC = Op.getOperand(3);
24708 // We specify 2 possible opcodes for intrinsics with rounding modes.
24709 // First, we check if the intrinsic may have non-default rounding mode,
24710 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24711 if (IntrData->Opc1 != 0) {
24712 SDValue Sae = Op.getOperand(4);
24713 if (isRoundModeSAE(Sae))
24714 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
24715 Op.getOperand(2), CC, Sae);
24716 if (!isRoundModeCurDirection(Sae))
24719 //default rounding mode
24720 return DAG.getNode(IntrData->Opc0, dl, MaskVT,
24721 {Op.getOperand(1), Op.getOperand(2), CC});
24723 case CMP_MASK_SCALAR_CC: {
24724 SDValue Src1 = Op.getOperand(1);
24725 SDValue Src2 = Op.getOperand(2);
24726 SDValue CC = Op.getOperand(3);
24727 SDValue Mask = Op.getOperand(4);
24730 if (IntrData->Opc1 != 0) {
24731 SDValue Sae = Op.getOperand(5);
24732 if (isRoundModeSAE(Sae))
24733 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
24734 else if (!isRoundModeCurDirection(Sae))
24737 //default rounding mode
24738 if (!Cmp.getNode())
24739 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
24741 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
24743 // Need to fill with zeros to ensure the bitcast will produce zeroes
24744 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24745 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
24746 DAG.getConstant(0, dl, MVT::v8i1),
24747 CmpMask, DAG.getIntPtrConstant(0, dl));
24748 return DAG.getBitcast(MVT::i8, Ins);
24750 case COMI: { // Comparison intrinsics
24751 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
24752 SDValue LHS = Op.getOperand(1);
24753 SDValue RHS = Op.getOperand(2);
24754 // Some conditions require the operands to be swapped.
24755 if (CC == ISD::SETLT || CC == ISD::SETLE)
24756 std::swap(LHS, RHS);
24758 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
24761 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
24762 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
24763 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
24764 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
24767 case ISD::SETNE: { // (ZF = 1 or PF = 1)
24768 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
24769 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
24770 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
24773 case ISD::SETGT: // (CF = 0 and ZF = 0)
24774 case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
24775 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
24778 case ISD::SETGE: // CF = 0
24779 case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
24780 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
24783 llvm_unreachable("Unexpected illegal condition!");
24785 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24787 case COMI_RM: { // Comparison intrinsics with Sae
24788 SDValue LHS = Op.getOperand(1);
24789 SDValue RHS = Op.getOperand(2);
24790 unsigned CondVal = Op.getConstantOperandVal(3);
24791 SDValue Sae = Op.getOperand(4);
24794 if (isRoundModeCurDirection(Sae))
24795 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
24796 DAG.getTargetConstant(CondVal, dl, MVT::i8));
24797 else if (isRoundModeSAE(Sae))
24798 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
24799 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
24802 // Need to fill with zeros to ensure the bitcast will produce zeroes
24803 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24804 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24805 DAG.getConstant(0, dl, MVT::v16i1),
24806 FCmp, DAG.getIntPtrConstant(0, dl));
24807 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
24808 DAG.getBitcast(MVT::i16, Ins));
24811 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
24812 Op.getOperand(1), Op.getOperand(2), Subtarget,
24814 case COMPRESS_EXPAND_IN_REG: {
24815 SDValue Mask = Op.getOperand(3);
24816 SDValue DataToCompress = Op.getOperand(1);
24817 SDValue PassThru = Op.getOperand(2);
24818 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
24819 return Op.getOperand(1);
24821 // Avoid false dependency.
24822 if (PassThru.isUndef())
24823 PassThru = DAG.getConstant(0, dl, VT);
24825 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
24829 case FIXUPIMM_MASKZ: {
24830 SDValue Src1 = Op.getOperand(1);
24831 SDValue Src2 = Op.getOperand(2);
24832 SDValue Src3 = Op.getOperand(3);
24833 SDValue Imm = Op.getOperand(4);
24834 SDValue Mask = Op.getOperand(5);
24835 SDValue Passthru = (IntrData->Type == FIXUPIMM)
24837 : getZeroVector(VT, Subtarget, DAG, dl);
24839 unsigned Opc = IntrData->Opc0;
24840 if (IntrData->Opc1 != 0) {
24841 SDValue Sae = Op.getOperand(6);
24842 if (isRoundModeSAE(Sae))
24843 Opc = IntrData->Opc1;
24844 else if (!isRoundModeCurDirection(Sae))
24848 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
24850 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
24851 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24853 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24856 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
24857 // Clear the upper bits of the rounding immediate so that the legacy
24858 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24859 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
24860 SDValue RoundingMode =
24861 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24862 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24863 Op.getOperand(1), RoundingMode);
24866 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
24867 // Clear the upper bits of the rounding immediate so that the legacy
24868 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24869 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
24870 SDValue RoundingMode =
24871 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24872 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24873 Op.getOperand(1), Op.getOperand(2), RoundingMode);
24876 assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");
24878 // The control is a TargetConstant, but we need to convert it to a
24880 uint64_t Imm = Op.getConstantOperandVal(2);
24881 SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
24882 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24883 Op.getOperand(1), Control);
24887 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
24888 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
24891 // If the carry in is zero, then we should just use ADD/SUB instead of
24893 if (isNullConstant(Op.getOperand(1))) {
24894 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
24897 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
24898 DAG.getConstant(-1, dl, MVT::i8));
24899 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
24900 Op.getOperand(3), GenCF.getValue(1));
24902 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
24903 SDValue Results[] = { SetCC, Res };
24904 return DAG.getMergeValues(Results, dl);
24906 case CVTPD2PS_MASK:
24907 case CVTPD2DQ_MASK:
24908 case CVTQQ2PS_MASK:
24909 case TRUNCATE_TO_REG: {
24910 SDValue Src = Op.getOperand(1);
24911 SDValue PassThru = Op.getOperand(2);
24912 SDValue Mask = Op.getOperand(3);
24914 if (isAllOnesConstant(Mask))
24915 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24917 MVT SrcVT = Src.getSimpleValueType();
24918 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24919 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24920 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
24921 {Src, PassThru, Mask});
24923 case CVTPS2PH_MASK: {
24924 SDValue Src = Op.getOperand(1);
24925 SDValue Rnd = Op.getOperand(2);
24926 SDValue PassThru = Op.getOperand(3);
24927 SDValue Mask = Op.getOperand(4);
24929 if (isAllOnesConstant(Mask))
24930 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
24932 MVT SrcVT = Src.getSimpleValueType();
24933 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24934 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24935 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
24939 case CVTNEPS2BF16_MASK: {
24940 SDValue Src = Op.getOperand(1);
24941 SDValue PassThru = Op.getOperand(2);
24942 SDValue Mask = Op.getOperand(3);
24944 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24945 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24947 // Break false dependency.
24948 if (PassThru.isUndef())
24949 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
24951 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
24960 default: return SDValue(); // Don't custom lower most intrinsics.
24962 // ptest and testp intrinsics. The intrinsic these come from are designed to
24963 // return an integer value, not just an instruction so lower it to the ptest
24964 // or testp pattern and a setcc for the result.
24965 case Intrinsic::x86_avx512_ktestc_b:
24966 case Intrinsic::x86_avx512_ktestc_w:
24967 case Intrinsic::x86_avx512_ktestc_d:
24968 case Intrinsic::x86_avx512_ktestc_q:
24969 case Intrinsic::x86_avx512_ktestz_b:
24970 case Intrinsic::x86_avx512_ktestz_w:
24971 case Intrinsic::x86_avx512_ktestz_d:
24972 case Intrinsic::x86_avx512_ktestz_q:
24973 case Intrinsic::x86_sse41_ptestz:
24974 case Intrinsic::x86_sse41_ptestc:
24975 case Intrinsic::x86_sse41_ptestnzc:
24976 case Intrinsic::x86_avx_ptestz_256:
24977 case Intrinsic::x86_avx_ptestc_256:
24978 case Intrinsic::x86_avx_ptestnzc_256:
24979 case Intrinsic::x86_avx_vtestz_ps:
24980 case Intrinsic::x86_avx_vtestc_ps:
24981 case Intrinsic::x86_avx_vtestnzc_ps:
24982 case Intrinsic::x86_avx_vtestz_pd:
24983 case Intrinsic::x86_avx_vtestc_pd:
24984 case Intrinsic::x86_avx_vtestnzc_pd:
24985 case Intrinsic::x86_avx_vtestz_ps_256:
24986 case Intrinsic::x86_avx_vtestc_ps_256:
24987 case Intrinsic::x86_avx_vtestnzc_ps_256:
24988 case Intrinsic::x86_avx_vtestz_pd_256:
24989 case Intrinsic::x86_avx_vtestc_pd_256:
24990 case Intrinsic::x86_avx_vtestnzc_pd_256: {
24991 unsigned TestOpc = X86ISD::PTEST;
24992 X86::CondCode X86CC;
24994 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
24995 case Intrinsic::x86_avx512_ktestc_b:
24996 case Intrinsic::x86_avx512_ktestc_w:
24997 case Intrinsic::x86_avx512_ktestc_d:
24998 case Intrinsic::x86_avx512_ktestc_q:
25000 TestOpc = X86ISD::KTEST;
25001 X86CC = X86::COND_B;
25003 case Intrinsic::x86_avx512_ktestz_b:
25004 case Intrinsic::x86_avx512_ktestz_w:
25005 case Intrinsic::x86_avx512_ktestz_d:
25006 case Intrinsic::x86_avx512_ktestz_q:
25007 TestOpc = X86ISD::KTEST;
25008 X86CC = X86::COND_E;
25010 case Intrinsic::x86_avx_vtestz_ps:
25011 case Intrinsic::x86_avx_vtestz_pd:
25012 case Intrinsic::x86_avx_vtestz_ps_256:
25013 case Intrinsic::x86_avx_vtestz_pd_256:
25014 TestOpc = X86ISD::TESTP;
25016 case Intrinsic::x86_sse41_ptestz:
25017 case Intrinsic::x86_avx_ptestz_256:
25019 X86CC = X86::COND_E;
25021 case Intrinsic::x86_avx_vtestc_ps:
25022 case Intrinsic::x86_avx_vtestc_pd:
25023 case Intrinsic::x86_avx_vtestc_ps_256:
25024 case Intrinsic::x86_avx_vtestc_pd_256:
25025 TestOpc = X86ISD::TESTP;
25027 case Intrinsic::x86_sse41_ptestc:
25028 case Intrinsic::x86_avx_ptestc_256:
25030 X86CC = X86::COND_B;
25032 case Intrinsic::x86_avx_vtestnzc_ps:
25033 case Intrinsic::x86_avx_vtestnzc_pd:
25034 case Intrinsic::x86_avx_vtestnzc_ps_256:
25035 case Intrinsic::x86_avx_vtestnzc_pd_256:
25036 TestOpc = X86ISD::TESTP;
25038 case Intrinsic::x86_sse41_ptestnzc:
25039 case Intrinsic::x86_avx_ptestnzc_256:
25041 X86CC = X86::COND_A;
25045 SDValue LHS = Op.getOperand(1);
25046 SDValue RHS = Op.getOperand(2);
25047 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
25048 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
25049 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25052 case Intrinsic::x86_sse42_pcmpistria128:
25053 case Intrinsic::x86_sse42_pcmpestria128:
25054 case Intrinsic::x86_sse42_pcmpistric128:
25055 case Intrinsic::x86_sse42_pcmpestric128:
25056 case Intrinsic::x86_sse42_pcmpistrio128:
25057 case Intrinsic::x86_sse42_pcmpestrio128:
25058 case Intrinsic::x86_sse42_pcmpistris128:
25059 case Intrinsic::x86_sse42_pcmpestris128:
25060 case Intrinsic::x86_sse42_pcmpistriz128:
25061 case Intrinsic::x86_sse42_pcmpestriz128: {
25063 X86::CondCode X86CC;
25065 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
25066 case Intrinsic::x86_sse42_pcmpistria128:
25067 Opcode = X86ISD::PCMPISTR;
25068 X86CC = X86::COND_A;
25070 case Intrinsic::x86_sse42_pcmpestria128:
25071 Opcode = X86ISD::PCMPESTR;
25072 X86CC = X86::COND_A;
25074 case Intrinsic::x86_sse42_pcmpistric128:
25075 Opcode = X86ISD::PCMPISTR;
25076 X86CC = X86::COND_B;
25078 case Intrinsic::x86_sse42_pcmpestric128:
25079 Opcode = X86ISD::PCMPESTR;
25080 X86CC = X86::COND_B;
25082 case Intrinsic::x86_sse42_pcmpistrio128:
25083 Opcode = X86ISD::PCMPISTR;
25084 X86CC = X86::COND_O;
25086 case Intrinsic::x86_sse42_pcmpestrio128:
25087 Opcode = X86ISD::PCMPESTR;
25088 X86CC = X86::COND_O;
25090 case Intrinsic::x86_sse42_pcmpistris128:
25091 Opcode = X86ISD::PCMPISTR;
25092 X86CC = X86::COND_S;
25094 case Intrinsic::x86_sse42_pcmpestris128:
25095 Opcode = X86ISD::PCMPESTR;
25096 X86CC = X86::COND_S;
25098 case Intrinsic::x86_sse42_pcmpistriz128:
25099 Opcode = X86ISD::PCMPISTR;
25100 X86CC = X86::COND_E;
25102 case Intrinsic::x86_sse42_pcmpestriz128:
25103 Opcode = X86ISD::PCMPESTR;
25104 X86CC = X86::COND_E;
25107 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
25108 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
25109 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
25110 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
25111 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25114 case Intrinsic::x86_sse42_pcmpistri128:
25115 case Intrinsic::x86_sse42_pcmpestri128: {
25117 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
25118 Opcode = X86ISD::PCMPISTR;
25120 Opcode = X86ISD::PCMPESTR;
25122 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
25123 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
25124 return DAG.getNode(Opcode, dl, VTs, NewOps);
25127 case Intrinsic::x86_sse42_pcmpistrm128:
25128 case Intrinsic::x86_sse42_pcmpestrm128: {
25130 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
25131 Opcode = X86ISD::PCMPISTR;
25133 Opcode = X86ISD::PCMPESTR;
25135 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
25136 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
25137 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
25140 case Intrinsic::eh_sjlj_lsda: {
25141 MachineFunction &MF = DAG.getMachineFunction();
25142 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25143 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
25144 auto &Context = MF.getMMI().getContext();
25145 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
25146 Twine(MF.getFunctionNumber()));
25147 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
25148 DAG.getMCSymbol(S, PtrVT));
25151 case Intrinsic::x86_seh_lsda: {
25152 // Compute the symbol for the LSDA. We know it'll get emitted later.
25153 MachineFunction &MF = DAG.getMachineFunction();
25154 SDValue Op1 = Op.getOperand(1);
25155 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
25156 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
25157 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
25159 // Generate a simple absolute symbol reference. This intrinsic is only
25160 // supported on 32-bit Windows, which isn't PIC.
25161 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
25162 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
25165 case Intrinsic::eh_recoverfp: {
25166 SDValue FnOp = Op.getOperand(1);
25167 SDValue IncomingFPOp = Op.getOperand(2);
25168 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
25169 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
25171 report_fatal_error(
25172 "llvm.eh.recoverfp must take a function as the first argument");
25173 return recoverFramePointer(DAG, Fn, IncomingFPOp);
25176 case Intrinsic::localaddress: {
25177 // Returns one of the stack, base, or frame pointer registers, depending on
25178 // which is used to reference local variables.
25179 MachineFunction &MF = DAG.getMachineFunction();
25180 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25182 if (RegInfo->hasBasePointer(MF))
25183 Reg = RegInfo->getBaseRegister();
25184 else { // Handles the SP or FP case.
25185 bool CantUseFP = RegInfo->needsStackRealignment(MF);
25187 Reg = RegInfo->getPtrSizedStackRegister(MF);
25189 Reg = RegInfo->getPtrSizedFrameRegister(MF);
25191 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
25194 case Intrinsic::x86_avx512_vp2intersect_q_512:
25195 case Intrinsic::x86_avx512_vp2intersect_q_256:
25196 case Intrinsic::x86_avx512_vp2intersect_q_128:
25197 case Intrinsic::x86_avx512_vp2intersect_d_512:
25198 case Intrinsic::x86_avx512_vp2intersect_d_256:
25199 case Intrinsic::x86_avx512_vp2intersect_d_128: {
25200 MVT MaskVT = Op.getSimpleValueType();
25202 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
25205 SDValue Operation =
25206 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
25207 Op->getOperand(1), Op->getOperand(2));
25209 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
25210 MaskVT, Operation);
25211 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
25212 MaskVT, Operation);
25213 return DAG.getMergeValues({Result0, Result1}, DL);
25215 case Intrinsic::x86_mmx_pslli_w:
25216 case Intrinsic::x86_mmx_pslli_d:
25217 case Intrinsic::x86_mmx_pslli_q:
25218 case Intrinsic::x86_mmx_psrli_w:
25219 case Intrinsic::x86_mmx_psrli_d:
25220 case Intrinsic::x86_mmx_psrli_q:
25221 case Intrinsic::x86_mmx_psrai_w:
25222 case Intrinsic::x86_mmx_psrai_d: {
25224 SDValue ShAmt = Op.getOperand(2);
25225 // If the argument is a constant, convert it to a target constant.
25226 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
25227 // Clamp out of bounds shift amounts since they will otherwise be masked
25228 // to 8-bits which may make it no longer out of bounds.
25229 unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
25230 if (ShiftAmount == 0)
25231 return Op.getOperand(1);
25233 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
25234 Op.getOperand(0), Op.getOperand(1),
25235 DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
25238 unsigned NewIntrinsic;
25240 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
25241 case Intrinsic::x86_mmx_pslli_w:
25242 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
25244 case Intrinsic::x86_mmx_pslli_d:
25245 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
25247 case Intrinsic::x86_mmx_pslli_q:
25248 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
25250 case Intrinsic::x86_mmx_psrli_w:
25251 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
25253 case Intrinsic::x86_mmx_psrli_d:
25254 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
25256 case Intrinsic::x86_mmx_psrli_q:
25257 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
25259 case Intrinsic::x86_mmx_psrai_w:
25260 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
25262 case Intrinsic::x86_mmx_psrai_d:
25263 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
25267 // The vector shift intrinsics with scalars uses 32b shift amounts but
25268 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
25270 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
25271 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
25272 DAG.getConstant(NewIntrinsic, DL, MVT::i32),
25273 Op.getOperand(1), ShAmt);
25279 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
25280 SDValue Src, SDValue Mask, SDValue Base,
25281 SDValue Index, SDValue ScaleOp, SDValue Chain,
25282 const X86Subtarget &Subtarget) {
25284 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25285 // Scale must be constant.
25288 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25289 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25290 TLI.getPointerTy(DAG.getDataLayout()));
25291 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
25292 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
25293 // If source is undef or we know it won't be used, use a zero vector
25294 // to break register dependency.
25295 // TODO: use undef instead and let BreakFalseDeps deal with it?
25296 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
25297 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
25299 // Cast mask to an integer type.
25300 Mask = DAG.getBitcast(MaskVT, Mask);
25302 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
25304 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
25306 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
25307 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
25308 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
25311 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
25312 SDValue Src, SDValue Mask, SDValue Base,
25313 SDValue Index, SDValue ScaleOp, SDValue Chain,
25314 const X86Subtarget &Subtarget) {
25315 MVT VT = Op.getSimpleValueType();
25317 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25318 // Scale must be constant.
25321 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25322 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25323 TLI.getPointerTy(DAG.getDataLayout()));
25324 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
25325 VT.getVectorNumElements());
25326 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
25328 // We support two versions of the gather intrinsics. One with scalar mask and
25329 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
25330 if (Mask.getValueType() != MaskVT)
25331 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25333 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
25334 // If source is undef or we know it won't be used, use a zero vector
25335 // to break register dependency.
25336 // TODO: use undef instead and let BreakFalseDeps deal with it?
25337 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
25338 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
25340 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
25342 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
25344 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
25345 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
25346 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
25349 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
25350 SDValue Src, SDValue Mask, SDValue Base,
25351 SDValue Index, SDValue ScaleOp, SDValue Chain,
25352 const X86Subtarget &Subtarget) {
25354 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25355 // Scale must be constant.
25358 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25359 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25360 TLI.getPointerTy(DAG.getDataLayout()));
25361 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
25362 Src.getSimpleValueType().getVectorNumElements());
25363 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
25365 // We support two versions of the scatter intrinsics. One with scalar mask and
25366 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
25367 if (Mask.getValueType() != MaskVT)
25368 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25370 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
25372 SDVTList VTs = DAG.getVTList(MVT::Other);
25373 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
25375 DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
25376 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
25380 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
25381 SDValue Mask, SDValue Base, SDValue Index,
25382 SDValue ScaleOp, SDValue Chain,
25383 const X86Subtarget &Subtarget) {
25385 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25386 // Scale must be constant.
25389 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25390 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25391 TLI.getPointerTy(DAG.getDataLayout()));
25392 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
25393 SDValue Segment = DAG.getRegister(0, MVT::i32);
25395 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
25396 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25397 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
25398 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
25399 return SDValue(Res, 0);
25402 /// Handles the lowering of builtin intrinsics with chain that return their
25403 /// value into registers EDX:EAX.
25404 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
25405 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
25407 /// Returns a Glue value which can be used to add extra copy-from-reg if the
25408 /// expanded intrinsics implicitly defines extra registers (i.e. not just
25410 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
25412 unsigned TargetOpcode,
25414 const X86Subtarget &Subtarget,
25415 SmallVectorImpl<SDValue> &Results) {
25416 SDValue Chain = N->getOperand(0);
25420 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
25421 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
25422 Glue = Chain.getValue(1);
25425 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
25426 SDValue N1Ops[] = {Chain, Glue};
25427 SDNode *N1 = DAG.getMachineNode(
25428 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
25429 Chain = SDValue(N1, 0);
25431 // Reads the content of XCR and returns it in registers EDX:EAX.
25433 if (Subtarget.is64Bit()) {
25434 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
25435 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
25438 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
25439 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
25442 Chain = HI.getValue(1);
25443 Glue = HI.getValue(2);
25445 if (Subtarget.is64Bit()) {
25446 // Merge the two 32-bit values into a 64-bit one.
25447 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
25448 DAG.getConstant(32, DL, MVT::i8));
25449 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
25450 Results.push_back(Chain);
25454 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
25455 SDValue Ops[] = { LO, HI };
25456 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
25457 Results.push_back(Pair);
25458 Results.push_back(Chain);
25462 /// Handles the lowering of builtin intrinsics that read the time stamp counter
25463 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
25464 /// READCYCLECOUNTER nodes.
25465 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
25467 const X86Subtarget &Subtarget,
25468 SmallVectorImpl<SDValue> &Results) {
25469 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
25470 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
25471 // and the EAX register is loaded with the low-order 32 bits.
25472 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
25473 /* NoRegister */0, Subtarget,
25475 if (Opcode != X86::RDTSCP)
25478 SDValue Chain = Results[1];
25479 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
25480 // the ECX register. Add 'ecx' explicitly to the chain.
25481 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
25483 Results.push_back(ecx.getValue(1));
25486 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
25487 SelectionDAG &DAG) {
25488 SmallVector<SDValue, 3> Results;
25490 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
25492 return DAG.getMergeValues(Results, DL);
25495 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
25496 MachineFunction &MF = DAG.getMachineFunction();
25497 SDValue Chain = Op.getOperand(0);
25498 SDValue RegNode = Op.getOperand(2);
25499 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
25501 report_fatal_error("EH registrations only live in functions using WinEH");
25503 // Cast the operand to an alloca, and remember the frame index.
25504 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
25506 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
25507 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
25509 // Return the chain operand without making any DAG nodes.
25513 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
25514 MachineFunction &MF = DAG.getMachineFunction();
25515 SDValue Chain = Op.getOperand(0);
25516 SDValue EHGuard = Op.getOperand(2);
25517 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
25519 report_fatal_error("EHGuard only live in functions using WinEH");
25521 // Cast the operand to an alloca, and remember the frame index.
25522 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
25524 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
25525 EHInfo->EHGuardFrameIndex = FINode->getIndex();
25527 // Return the chain operand without making any DAG nodes.
25531 /// Emit Truncating Store with signed or unsigned saturation.
25533 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
25534 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
25535 SelectionDAG &DAG) {
25536 SDVTList VTs = DAG.getVTList(MVT::Other);
25537 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
25538 SDValue Ops[] = { Chain, Val, Ptr, Undef };
25539 unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
25540 return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
25543 /// Emit Masked Truncating Store with signed or unsigned saturation.
25545 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
25546 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
25547 MachineMemOperand *MMO, SelectionDAG &DAG) {
25548 SDVTList VTs = DAG.getVTList(MVT::Other);
25549 SDValue Ops[] = { Chain, Val, Ptr, Mask };
25550 unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
25551 return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
25554 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
25555 SelectionDAG &DAG) {
25556 unsigned IntNo = Op.getConstantOperandVal(1);
25557 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
25560 case llvm::Intrinsic::x86_seh_ehregnode:
25561 return MarkEHRegistrationNode(Op, DAG);
25562 case llvm::Intrinsic::x86_seh_ehguard:
25563 return MarkEHGuard(Op, DAG);
25564 case llvm::Intrinsic::x86_rdpkru: {
25566 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
25567 // Create a RDPKRU node and pass 0 to the ECX parameter.
25568 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
25569 DAG.getConstant(0, dl, MVT::i32));
25571 case llvm::Intrinsic::x86_wrpkru: {
25573 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
25574 // to the EDX and ECX parameters.
25575 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
25576 Op.getOperand(0), Op.getOperand(2),
25577 DAG.getConstant(0, dl, MVT::i32),
25578 DAG.getConstant(0, dl, MVT::i32));
25580 case llvm::Intrinsic::x86_flags_read_u32:
25581 case llvm::Intrinsic::x86_flags_read_u64:
25582 case llvm::Intrinsic::x86_flags_write_u32:
25583 case llvm::Intrinsic::x86_flags_write_u64: {
25584 // We need a frame pointer because this will get lowered to a PUSH/POP
25586 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25587 MFI.setHasCopyImplyingStackAdjustment(true);
25588 // Don't do anything here, we will expand these intrinsics out later
25589 // during FinalizeISel in EmitInstrWithCustomInserter.
25592 case Intrinsic::x86_lwpins32:
25593 case Intrinsic::x86_lwpins64:
25594 case Intrinsic::x86_umwait:
25595 case Intrinsic::x86_tpause: {
25597 SDValue Chain = Op->getOperand(0);
25598 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
25602 default: llvm_unreachable("Impossible intrinsic");
25603 case Intrinsic::x86_umwait:
25604 Opcode = X86ISD::UMWAIT;
25606 case Intrinsic::x86_tpause:
25607 Opcode = X86ISD::TPAUSE;
25609 case Intrinsic::x86_lwpins32:
25610 case Intrinsic::x86_lwpins64:
25611 Opcode = X86ISD::LWPINS;
25615 SDValue Operation =
25616 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
25617 Op->getOperand(3), Op->getOperand(4));
25618 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
25619 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
25620 Operation.getValue(1));
25622 case Intrinsic::x86_enqcmd:
25623 case Intrinsic::x86_enqcmds: {
25625 SDValue Chain = Op.getOperand(0);
25626 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
25629 default: llvm_unreachable("Impossible intrinsic!");
25630 case Intrinsic::x86_enqcmd:
25631 Opcode = X86ISD::ENQCMD;
25633 case Intrinsic::x86_enqcmds:
25634 Opcode = X86ISD::ENQCMDS;
25637 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
25639 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
25640 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
25641 Operation.getValue(1));
25648 switch(IntrData->Type) {
25649 default: llvm_unreachable("Unknown Intrinsic Type");
25652 // Emit the node with the right value type.
25653 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
25654 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
25656 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
25657 // Otherwise return the value from Rand, which is always 0, casted to i32.
25658 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
25659 DAG.getConstant(1, dl, Op->getValueType(1)),
25660 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
25661 SDValue(Result.getNode(), 1)};
25662 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
25664 // Return { result, isValid, chain }.
25665 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
25666 SDValue(Result.getNode(), 2));
25668 case GATHER_AVX2: {
25669 SDValue Chain = Op.getOperand(0);
25670 SDValue Src = Op.getOperand(2);
25671 SDValue Base = Op.getOperand(3);
25672 SDValue Index = Op.getOperand(4);
25673 SDValue Mask = Op.getOperand(5);
25674 SDValue Scale = Op.getOperand(6);
25675 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
25676 Scale, Chain, Subtarget);
25679 //gather(v1, mask, index, base, scale);
25680 SDValue Chain = Op.getOperand(0);
25681 SDValue Src = Op.getOperand(2);
25682 SDValue Base = Op.getOperand(3);
25683 SDValue Index = Op.getOperand(4);
25684 SDValue Mask = Op.getOperand(5);
25685 SDValue Scale = Op.getOperand(6);
25686 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
25690 //scatter(base, mask, index, v1, scale);
25691 SDValue Chain = Op.getOperand(0);
25692 SDValue Base = Op.getOperand(2);
25693 SDValue Mask = Op.getOperand(3);
25694 SDValue Index = Op.getOperand(4);
25695 SDValue Src = Op.getOperand(5);
25696 SDValue Scale = Op.getOperand(6);
25697 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
25698 Scale, Chain, Subtarget);
25701 const APInt &HintVal = Op.getConstantOperandAPInt(6);
25702 assert((HintVal == 2 || HintVal == 3) &&
25703 "Wrong prefetch hint in intrinsic: should be 2 or 3");
25704 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
25705 SDValue Chain = Op.getOperand(0);
25706 SDValue Mask = Op.getOperand(2);
25707 SDValue Index = Op.getOperand(3);
25708 SDValue Base = Op.getOperand(4);
25709 SDValue Scale = Op.getOperand(5);
25710 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
25713 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
25715 SmallVector<SDValue, 2> Results;
25716 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
25718 return DAG.getMergeValues(Results, dl);
25720 // Read Performance Monitoring Counters.
25722 // GetExtended Control Register.
25724 SmallVector<SDValue, 2> Results;
25726 // RDPMC uses ECX to select the index of the performance counter to read.
25727 // XGETBV uses ECX to select the index of the XCR register to return.
25728 // The result is stored into registers EDX:EAX.
25729 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
25730 Subtarget, Results);
25731 return DAG.getMergeValues(Results, dl);
25733 // XTEST intrinsics.
25735 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
25736 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
25738 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
25739 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
25740 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
25741 Ret, SDValue(InTrans.getNode(), 1));
25743 case TRUNCATE_TO_MEM_VI8:
25744 case TRUNCATE_TO_MEM_VI16:
25745 case TRUNCATE_TO_MEM_VI32: {
25746 SDValue Mask = Op.getOperand(4);
25747 SDValue DataToTruncate = Op.getOperand(3);
25748 SDValue Addr = Op.getOperand(2);
25749 SDValue Chain = Op.getOperand(0);
25751 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
25752 assert(MemIntr && "Expected MemIntrinsicSDNode!");
25754 EVT MemVT = MemIntr->getMemoryVT();
25756 uint16_t TruncationOp = IntrData->Opc0;
25757 switch (TruncationOp) {
25758 case X86ISD::VTRUNC: {
25759 if (isAllOnesConstant(Mask)) // return just a truncate store
25760 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
25761 MemIntr->getMemOperand());
25763 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25764 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25765 SDValue Offset = DAG.getUNDEF(VMask.getValueType());
25767 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
25768 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
25769 true /* truncating */);
25771 case X86ISD::VTRUNCUS:
25772 case X86ISD::VTRUNCS: {
25773 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
25774 if (isAllOnesConstant(Mask))
25775 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
25776 MemIntr->getMemOperand(), DAG);
25778 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25779 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25781 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
25782 VMask, MemVT, MemIntr->getMemOperand(), DAG);
25785 llvm_unreachable("Unsupported truncstore intrinsic");
25791 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
25792 SelectionDAG &DAG) const {
25793 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25794 MFI.setReturnAddressIsTaken(true);
25796 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
25799 unsigned Depth = Op.getConstantOperandVal(0);
25801 EVT PtrVT = getPointerTy(DAG.getDataLayout());
25804 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
25805 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25806 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
25807 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
25808 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
25809 MachinePointerInfo());
25812 // Just load the return address.
25813 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
25814 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
25815 MachinePointerInfo());
25818 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
25819 SelectionDAG &DAG) const {
25820 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
25821 return getReturnAddressFrameIndex(DAG);
25824 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
25825 MachineFunction &MF = DAG.getMachineFunction();
25826 MachineFrameInfo &MFI = MF.getFrameInfo();
25827 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
25828 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25829 EVT VT = Op.getValueType();
25831 MFI.setFrameAddressIsTaken(true);
25833 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
25834 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
25835 // is not possible to crawl up the stack without looking at the unwind codes
25837 int FrameAddrIndex = FuncInfo->getFAIndex();
25838 if (!FrameAddrIndex) {
25839 // Set up a frame object for the return address.
25840 unsigned SlotSize = RegInfo->getSlotSize();
25841 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
25842 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
25843 FuncInfo->setFAIndex(FrameAddrIndex);
25845 return DAG.getFrameIndex(FrameAddrIndex, VT);
25848 unsigned FrameReg =
25849 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
25850 SDLoc dl(Op); // FIXME probably not meaningful
25851 unsigned Depth = Op.getConstantOperandVal(0);
25852 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
25853 (FrameReg == X86::EBP && VT == MVT::i32)) &&
25854 "Invalid Frame Register!");
25855 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
25857 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
25858 MachinePointerInfo());
25862 // FIXME? Maybe this could be a TableGen attribute on some registers and
25863 // this table could be generated automatically from RegInfo.
25864 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
25865 const MachineFunction &MF) const {
25866 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25868 Register Reg = StringSwitch<unsigned>(RegName)
25869 .Case("esp", X86::ESP)
25870 .Case("rsp", X86::RSP)
25871 .Case("ebp", X86::EBP)
25872 .Case("rbp", X86::RBP)
25875 if (Reg == X86::EBP || Reg == X86::RBP) {
25876 if (!TFI.hasFP(MF))
25877 report_fatal_error("register " + StringRef(RegName) +
25878 " is allocatable: function has no frame pointer");
25881 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25882 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
25883 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
25884 "Invalid Frame Register!");
25892 report_fatal_error("Invalid register name global variable");
25895 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
25896 SelectionDAG &DAG) const {
25897 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25898 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
25901 Register X86TargetLowering::getExceptionPointerRegister(
25902 const Constant *PersonalityFn) const {
25903 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
25904 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25906 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
25909 Register X86TargetLowering::getExceptionSelectorRegister(
25910 const Constant *PersonalityFn) const {
25911 // Funclet personalities don't use selectors (the runtime does the selection).
25912 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
25913 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25916 bool X86TargetLowering::needsFixedCatchObjects() const {
25917 return Subtarget.isTargetWin64();
25920 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
25921 SDValue Chain = Op.getOperand(0);
25922 SDValue Offset = Op.getOperand(1);
25923 SDValue Handler = Op.getOperand(2);
25926 EVT PtrVT = getPointerTy(DAG.getDataLayout());
25927 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25928 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
25929 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
25930 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
25931 "Invalid Frame Register!");
25932 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
25933 Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
25935 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
25936 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
25938 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
25939 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
25940 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
25942 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
25943 DAG.getRegister(StoreAddrReg, PtrVT));
25946 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
25947 SelectionDAG &DAG) const {
25949 // If the subtarget is not 64bit, we may need the global base reg
25950 // after isel expand pseudo, i.e., after CGBR pass ran.
25951 // Therefore, ask for the GlobalBaseReg now, so that the pass
25952 // inserts the code for us in case we need it.
25953 // Otherwise, we will end up in a situation where we will
25954 // reference a virtual register that is not defined!
25955 if (!Subtarget.is64Bit()) {
25956 const X86InstrInfo *TII = Subtarget.getInstrInfo();
25957 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
25959 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
25960 DAG.getVTList(MVT::i32, MVT::Other),
25961 Op.getOperand(0), Op.getOperand(1));
25964 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
25965 SelectionDAG &DAG) const {
25967 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
25968 Op.getOperand(0), Op.getOperand(1));
25971 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
25972 SelectionDAG &DAG) const {
25974 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
25978 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
25979 return Op.getOperand(0);
25982 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
25983 SelectionDAG &DAG) const {
25984 SDValue Root = Op.getOperand(0);
25985 SDValue Trmp = Op.getOperand(1); // trampoline
25986 SDValue FPtr = Op.getOperand(2); // nested function
25987 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
25990 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
25991 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
25993 if (Subtarget.is64Bit()) {
25994 SDValue OutChains[6];
25996 // Large code-model.
25997 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
25998 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
26000 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
26001 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
26003 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
26005 // Load the pointer to the nested function into R11.
26006 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
26007 SDValue Addr = Trmp;
26008 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
26009 Addr, MachinePointerInfo(TrmpAddr));
26011 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26012 DAG.getConstant(2, dl, MVT::i64));
26014 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
26015 /* Alignment = */ 2);
26017 // Load the 'nest' parameter value into R10.
26018 // R10 is specified in X86CallingConv.td
26019 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
26020 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26021 DAG.getConstant(10, dl, MVT::i64));
26022 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
26023 Addr, MachinePointerInfo(TrmpAddr, 10));
26025 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26026 DAG.getConstant(12, dl, MVT::i64));
26028 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
26029 /* Alignment = */ 2);
26031 // Jump to the nested function.
26032 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
26033 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26034 DAG.getConstant(20, dl, MVT::i64));
26035 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
26036 Addr, MachinePointerInfo(TrmpAddr, 20));
26038 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
26039 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26040 DAG.getConstant(22, dl, MVT::i64));
26041 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
26042 Addr, MachinePointerInfo(TrmpAddr, 22));
26044 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
26046 const Function *Func =
26047 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
26048 CallingConv::ID CC = Func->getCallingConv();
26053 llvm_unreachable("Unsupported calling convention");
26054 case CallingConv::C:
26055 case CallingConv::X86_StdCall: {
26056 // Pass 'nest' parameter in ECX.
26057 // Must be kept in sync with X86CallingConv.td
26058 NestReg = X86::ECX;
26060 // Check that ECX wasn't needed by an 'inreg' parameter.
26061 FunctionType *FTy = Func->getFunctionType();
26062 const AttributeList &Attrs = Func->getAttributes();
26064 if (!Attrs.isEmpty() && !Func->isVarArg()) {
26065 unsigned InRegCount = 0;
26068 for (FunctionType::param_iterator I = FTy->param_begin(),
26069 E = FTy->param_end(); I != E; ++I, ++Idx)
26070 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
26071 auto &DL = DAG.getDataLayout();
26072 // FIXME: should only count parameters that are lowered to integers.
26073 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
26076 if (InRegCount > 2) {
26077 report_fatal_error("Nest register in use - reduce number of inreg"
26083 case CallingConv::X86_FastCall:
26084 case CallingConv::X86_ThisCall:
26085 case CallingConv::Fast:
26086 case CallingConv::Tail:
26087 // Pass 'nest' parameter in EAX.
26088 // Must be kept in sync with X86CallingConv.td
26089 NestReg = X86::EAX;
26093 SDValue OutChains[4];
26094 SDValue Addr, Disp;
26096 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26097 DAG.getConstant(10, dl, MVT::i32));
26098 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
26100 // This is storing the opcode for MOV32ri.
26101 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
26102 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
26104 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
26105 Trmp, MachinePointerInfo(TrmpAddr));
26107 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26108 DAG.getConstant(1, dl, MVT::i32));
26110 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
26111 /* Alignment = */ 1);
26113 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
26114 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26115 DAG.getConstant(5, dl, MVT::i32));
26116 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
26117 Addr, MachinePointerInfo(TrmpAddr, 5),
26118 /* Alignment = */ 1);
26120 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26121 DAG.getConstant(6, dl, MVT::i32));
26123 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
26124 /* Alignment = */ 1);
26126 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
26130 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
26131 SelectionDAG &DAG) const {
26133 The rounding mode is in bits 11:10 of FPSR, and has the following
26135 00 Round to nearest
26140 FLT_ROUNDS, on the other hand, expects the following:
26147 To perform the conversion, we use a packed lookup table of the four 2-bit
26148 values that we can index by FPSP[11:10]
26149 0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
26151 (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
26154 MachineFunction &MF = DAG.getMachineFunction();
26155 MVT VT = Op.getSimpleValueType();
26158 // Save FP Control Word to stack slot
26159 int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
26160 SDValue StackSlot =
26161 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
26163 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
26165 SDValue Chain = Op.getOperand(0);
26166 SDValue Ops[] = {Chain, StackSlot};
26167 Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
26168 DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
26169 Align(2), MachineMemOperand::MOStore);
26171 // Load FP Control Word from stack slot
26172 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
26173 Chain = CWD.getValue(1);
26175 // Mask and turn the control bits into a shift for the lookup table.
26177 DAG.getNode(ISD::SRL, DL, MVT::i16,
26178 DAG.getNode(ISD::AND, DL, MVT::i16,
26179 CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
26180 DAG.getConstant(9, DL, MVT::i8));
26181 Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
26183 SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
26185 DAG.getNode(ISD::AND, DL, MVT::i32,
26186 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
26187 DAG.getConstant(3, DL, MVT::i32));
26189 RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
26191 return DAG.getMergeValues({RetVal, Chain}, DL);
26194 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
26196 // i8/i16 vector implemented using dword LZCNT vector instruction
26197 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
26198 // split the vector, perform operation on it's Lo a Hi part and
26199 // concatenate the results.
26200 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
26201 const X86Subtarget &Subtarget) {
26202 assert(Op.getOpcode() == ISD::CTLZ);
26204 MVT VT = Op.getSimpleValueType();
26205 MVT EltVT = VT.getVectorElementType();
26206 unsigned NumElems = VT.getVectorNumElements();
26208 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
26209 "Unsupported element type");
26211 // Split vector, it's Lo and Hi parts will be handled in next iteration.
26212 if (NumElems > 16 ||
26213 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
26214 return splitVectorIntUnary(Op, DAG);
26216 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
26217 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
26218 "Unsupported value type for operation");
26220 // Use native supported vector instruction vplzcntd.
26221 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
26222 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
26223 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
26224 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
26226 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
26229 // Lower CTLZ using a PSHUFB lookup table implementation.
26230 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
26231 const X86Subtarget &Subtarget,
26232 SelectionDAG &DAG) {
26233 MVT VT = Op.getSimpleValueType();
26234 int NumElts = VT.getVectorNumElements();
26235 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
26236 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
26238 // Per-nibble leading zero PSHUFB lookup table.
26239 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
26240 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
26241 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
26242 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
26244 SmallVector<SDValue, 64> LUTVec;
26245 for (int i = 0; i < NumBytes; ++i)
26246 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
26247 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
26249 // Begin by bitcasting the input to byte vector, then split those bytes
26250 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
26251 // If the hi input nibble is zero then we add both results together, otherwise
26252 // we just take the hi result (by masking the lo result to zero before the
26254 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
26255 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
26257 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
26259 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
26261 if (CurrVT.is512BitVector()) {
26262 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
26263 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
26264 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
26266 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
26269 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
26270 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
26271 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
26272 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
26274 // Merge result back from vXi8 back to VT, working on the lo/hi halves
26275 // of the current vector width in the same way we did for the nibbles.
26276 // If the upper half of the input element is zero then add the halves'
26277 // leading zero counts together, otherwise just use the upper half's.
26278 // Double the width of the result until we are at target width.
26279 while (CurrVT != VT) {
26280 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
26281 int CurrNumElts = CurrVT.getVectorNumElements();
26282 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
26283 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
26284 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
26286 // Check if the upper half of the input element is zero.
26287 if (CurrVT.is512BitVector()) {
26288 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
26289 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
26290 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
26291 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
26293 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
26294 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
26296 HiZ = DAG.getBitcast(NextVT, HiZ);
26298 // Move the upper/lower halves to the lower bits as we'll be extending to
26299 // NextVT. Mask the lower result to zero if HiZ is true and add the results
26301 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
26302 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
26303 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
26304 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
26305 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
26312 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
26313 const X86Subtarget &Subtarget,
26314 SelectionDAG &DAG) {
26315 MVT VT = Op.getSimpleValueType();
26317 if (Subtarget.hasCDI() &&
26318 // vXi8 vectors need to be promoted to 512-bits for vXi32.
26319 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
26320 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
26322 // Decompose 256-bit ops into smaller 128-bit ops.
26323 if (VT.is256BitVector() && !Subtarget.hasInt256())
26324 return splitVectorIntUnary(Op, DAG);
26326 // Decompose 512-bit ops into smaller 256-bit ops.
26327 if (VT.is512BitVector() && !Subtarget.hasBWI())
26328 return splitVectorIntUnary(Op, DAG);
26330 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
26331 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
26334 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
26335 SelectionDAG &DAG) {
26336 MVT VT = Op.getSimpleValueType();
26338 unsigned NumBits = VT.getSizeInBits();
26340 unsigned Opc = Op.getOpcode();
26343 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
26345 Op = Op.getOperand(0);
26346 if (VT == MVT::i8) {
26347 // Zero extend to i32 since there is not an i8 bsr.
26349 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
26352 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
26353 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
26354 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
26356 if (Opc == ISD::CTLZ) {
26357 // If src is zero (i.e. bsr sets ZF), returns NumBits.
26358 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
26359 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
26361 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
26364 // Finally xor with NumBits-1.
26365 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
26366 DAG.getConstant(NumBits - 1, dl, OpVT));
26369 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
26373 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
26374 SelectionDAG &DAG) {
26375 MVT VT = Op.getSimpleValueType();
26376 unsigned NumBits = VT.getScalarSizeInBits();
26377 SDValue N0 = Op.getOperand(0);
26380 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
26381 "Only scalar CTTZ requires custom lowering");
26383 // Issue a bsf (scan bits forward) which also sets EFLAGS.
26384 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
26385 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
26387 // If src is zero (i.e. bsf sets ZF), returns NumBits.
26388 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
26389 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
26391 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
26394 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
26395 const X86Subtarget &Subtarget) {
26396 MVT VT = Op.getSimpleValueType();
26397 if (VT == MVT::i16 || VT == MVT::i32)
26398 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
26400 if (VT.getScalarType() == MVT::i1)
26401 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
26402 Op.getOperand(0), Op.getOperand(1));
26404 if (VT == MVT::v32i16 || VT == MVT::v64i8)
26405 return splitVectorIntBinary(Op, DAG);
26407 assert(Op.getSimpleValueType().is256BitVector() &&
26408 Op.getSimpleValueType().isInteger() &&
26409 "Only handle AVX 256-bit vector integer operation");
26410 return splitVectorIntBinary(Op, DAG);
26413 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
26414 const X86Subtarget &Subtarget) {
26415 MVT VT = Op.getSimpleValueType();
26416 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
26417 unsigned Opcode = Op.getOpcode();
26418 if (VT.getScalarType() == MVT::i1) {
26421 default: llvm_unreachable("Expected saturated arithmetic opcode");
26424 // *addsat i1 X, Y --> X | Y
26425 return DAG.getNode(ISD::OR, dl, VT, X, Y);
26428 // *subsat i1 X, Y --> X & ~Y
26429 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
26433 if (VT.is128BitVector()) {
26434 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
26435 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26436 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
26437 *DAG.getContext(), VT);
26439 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
26440 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
26441 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
26442 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
26443 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
26445 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
26446 // usubsat X, Y --> (X >u Y) ? X - Y : 0
26447 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
26448 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
26449 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
26451 // Use default expansion.
26455 if (VT == MVT::v32i16 || VT == MVT::v64i8)
26456 return splitVectorIntBinary(Op, DAG);
26458 assert(Op.getSimpleValueType().is256BitVector() &&
26459 Op.getSimpleValueType().isInteger() &&
26460 "Only handle AVX 256-bit vector integer operation");
26461 return splitVectorIntBinary(Op, DAG);
26464 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
26465 SelectionDAG &DAG) {
26466 MVT VT = Op.getSimpleValueType();
26467 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
26468 // Since X86 does not have CMOV for 8-bit integer, we don't convert
26469 // 8-bit integer abs to NEG and CMOV.
26471 SDValue N0 = Op.getOperand(0);
26472 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
26473 DAG.getConstant(0, DL, VT), N0);
26474 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
26475 SDValue(Neg.getNode(), 1)};
26476 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
26479 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
26480 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
26482 SDValue Src = Op.getOperand(0);
26484 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
26485 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
26488 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
26489 assert(VT.isInteger() &&
26490 "Only handle AVX 256-bit vector integer operation");
26491 return splitVectorIntUnary(Op, DAG);
26494 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
26495 return splitVectorIntUnary(Op, DAG);
26497 // Default to expand.
26501 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
26502 MVT VT = Op.getSimpleValueType();
26504 // For AVX1 cases, split to use legal ops (everything but v4i64).
26505 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
26506 return splitVectorIntBinary(Op, DAG);
26508 if (VT == MVT::v32i16 || VT == MVT::v64i8)
26509 return splitVectorIntBinary(Op, DAG);
26512 unsigned Opcode = Op.getOpcode();
26513 SDValue N0 = Op.getOperand(0);
26514 SDValue N1 = Op.getOperand(1);
26516 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
26517 // using the SMIN/SMAX instructions and flipping the signbit back.
26518 if (VT == MVT::v8i16) {
26519 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
26520 "Unexpected MIN/MAX opcode");
26521 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
26522 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
26523 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
26524 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
26525 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
26526 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
26529 // Else, expand to a compare/select.
26532 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
26533 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
26534 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
26535 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
26536 default: llvm_unreachable("Unknown MINMAX opcode");
26539 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
26540 return DAG.getSelect(DL, VT, Cond, N0, N1);
26543 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
26544 SelectionDAG &DAG) {
26546 MVT VT = Op.getSimpleValueType();
26548 if (VT.getScalarType() == MVT::i1)
26549 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
26551 // Decompose 256-bit ops into 128-bit ops.
26552 if (VT.is256BitVector() && !Subtarget.hasInt256())
26553 return splitVectorIntBinary(Op, DAG);
26555 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
26556 return splitVectorIntBinary(Op, DAG);
26558 SDValue A = Op.getOperand(0);
26559 SDValue B = Op.getOperand(1);
26561 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
26562 // vector pairs, multiply and truncate.
26563 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
26564 unsigned NumElts = VT.getVectorNumElements();
26566 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26567 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26568 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
26569 return DAG.getNode(
26570 ISD::TRUNCATE, dl, VT,
26571 DAG.getNode(ISD::MUL, dl, ExVT,
26572 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
26573 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
26576 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26578 // Extract the lo/hi parts to any extend to i16.
26579 // We're going to mask off the low byte of each result element of the
26580 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
26582 SDValue Undef = DAG.getUNDEF(VT);
26583 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
26584 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
26587 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26588 // If the LHS is a constant, manually unpackl/unpackh.
26589 SmallVector<SDValue, 16> LoOps, HiOps;
26590 for (unsigned i = 0; i != NumElts; i += 16) {
26591 for (unsigned j = 0; j != 8; ++j) {
26592 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
26594 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
26599 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26600 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26602 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
26603 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
26606 // Multiply, mask the lower 8bits of the lo/hi results and pack.
26607 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26608 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26609 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
26610 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
26611 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26614 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
26615 if (VT == MVT::v4i32) {
26616 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
26617 "Should not custom lower when pmulld is available!");
26619 // Extract the odd parts.
26620 static const int UnpackMask[] = { 1, -1, 3, -1 };
26621 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
26622 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
26624 // Multiply the even parts.
26625 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
26626 DAG.getBitcast(MVT::v2i64, A),
26627 DAG.getBitcast(MVT::v2i64, B));
26628 // Now multiply odd parts.
26629 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
26630 DAG.getBitcast(MVT::v2i64, Aodds),
26631 DAG.getBitcast(MVT::v2i64, Bodds));
26633 Evens = DAG.getBitcast(VT, Evens);
26634 Odds = DAG.getBitcast(VT, Odds);
26636 // Merge the two vectors back together with a shuffle. This expands into 2
26638 static const int ShufMask[] = { 0, 4, 2, 6 };
26639 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
26642 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
26643 "Only know how to lower V2I64/V4I64/V8I64 multiply");
26644 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
26646 // Ahi = psrlqi(a, 32);
26647 // Bhi = psrlqi(b, 32);
26649 // AloBlo = pmuludq(a, b);
26650 // AloBhi = pmuludq(a, Bhi);
26651 // AhiBlo = pmuludq(Ahi, b);
26653 // Hi = psllqi(AloBhi + AhiBlo, 32);
26654 // return AloBlo + Hi;
26655 KnownBits AKnown = DAG.computeKnownBits(A);
26656 KnownBits BKnown = DAG.computeKnownBits(B);
26658 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
26659 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
26660 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
26662 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
26663 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
26664 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
26666 SDValue Zero = DAG.getConstant(0, dl, VT);
26668 // Only multiply lo/hi halves that aren't known to be zero.
26669 SDValue AloBlo = Zero;
26670 if (!ALoIsZero && !BLoIsZero)
26671 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
26673 SDValue AloBhi = Zero;
26674 if (!ALoIsZero && !BHiIsZero) {
26675 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
26676 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
26679 SDValue AhiBlo = Zero;
26680 if (!AHiIsZero && !BLoIsZero) {
26681 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
26682 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
26685 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
26686 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
26688 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
26691 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
26692 SelectionDAG &DAG) {
26694 MVT VT = Op.getSimpleValueType();
26695 bool IsSigned = Op->getOpcode() == ISD::MULHS;
26696 unsigned NumElts = VT.getVectorNumElements();
26697 SDValue A = Op.getOperand(0);
26698 SDValue B = Op.getOperand(1);
26700 // Decompose 256-bit ops into 128-bit ops.
26701 if (VT.is256BitVector() && !Subtarget.hasInt256())
26702 return splitVectorIntBinary(Op, DAG);
26704 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
26705 return splitVectorIntBinary(Op, DAG);
26707 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
26708 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
26709 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
26710 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
26712 // PMULxD operations multiply each even value (starting at 0) of LHS with
26713 // the related value of RHS and produce a widen result.
26714 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26715 // => <2 x i64> <ae|cg>
26717 // In other word, to have all the results, we need to perform two PMULxD:
26718 // 1. one with the even values.
26719 // 2. one with the odd values.
26720 // To achieve #2, with need to place the odd values at an even position.
26722 // Place the odd value at an even position (basically, shift all values 1
26723 // step to the left):
26724 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
26725 9, -1, 11, -1, 13, -1, 15, -1};
26726 // <a|b|c|d> => <b|undef|d|undef>
26727 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
26728 makeArrayRef(&Mask[0], NumElts));
26729 // <e|f|g|h> => <f|undef|h|undef>
26730 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
26731 makeArrayRef(&Mask[0], NumElts));
26733 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
26735 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
26737 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
26738 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26739 // => <2 x i64> <ae|cg>
26740 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26741 DAG.getBitcast(MulVT, A),
26742 DAG.getBitcast(MulVT, B)));
26743 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
26744 // => <2 x i64> <bf|dh>
26745 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26746 DAG.getBitcast(MulVT, Odd0),
26747 DAG.getBitcast(MulVT, Odd1)));
26749 // Shuffle it back into the right order.
26750 SmallVector<int, 16> ShufMask(NumElts);
26751 for (int i = 0; i != (int)NumElts; ++i)
26752 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
26754 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
26756 // If we have a signed multiply but no PMULDQ fix up the result of an
26757 // unsigned multiply.
26758 if (IsSigned && !Subtarget.hasSSE41()) {
26759 SDValue Zero = DAG.getConstant(0, dl, VT);
26760 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
26761 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
26762 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
26763 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
26765 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
26766 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
26772 // Only i8 vectors should need custom lowering after this.
26773 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
26774 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
26775 "Unsupported vector type");
26777 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
26778 // logical shift down the upper half and pack back to i8.
26780 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
26781 // and then ashr/lshr the upper bits down to the lower bits before multiply.
26782 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26784 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26785 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26786 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26787 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
26788 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
26789 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
26790 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
26791 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
26794 // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
26795 // to a vXi16 type. Do the multiplies, shift the results and pack the half
26796 // lane results back together.
26798 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26800 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
26801 -1, -1, -1, -1, -1, -1, -1, -1};
26803 // Extract the lo parts and zero/sign extend to i16.
26804 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
26805 // shifts to sign extend. Using unpack for unsigned only requires an xor to
26806 // create zeros and a copy due to tied registers contraints pre-avx. But using
26807 // zero_extend_vector_inreg would require an additional pshufd for the high
26811 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26812 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
26814 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
26815 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
26816 } else if (IsSigned) {
26817 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
26818 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
26820 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
26821 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
26823 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
26824 DAG.getConstant(0, dl, VT)));
26825 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
26826 DAG.getConstant(0, dl, VT)));
26830 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26831 // If the LHS is a constant, manually unpackl/unpackh and extend.
26832 SmallVector<SDValue, 16> LoOps, HiOps;
26833 for (unsigned i = 0; i != NumElts; i += 16) {
26834 for (unsigned j = 0; j != 8; ++j) {
26835 SDValue LoOp = B.getOperand(i + j);
26836 SDValue HiOp = B.getOperand(i + j + 8);
26839 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
26840 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
26842 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
26843 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
26846 LoOps.push_back(LoOp);
26847 HiOps.push_back(HiOp);
26851 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26852 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26853 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26854 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
26856 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
26857 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
26858 } else if (IsSigned) {
26859 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
26860 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
26862 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
26863 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
26865 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
26866 DAG.getConstant(0, dl, VT)));
26867 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
26868 DAG.getConstant(0, dl, VT)));
26871 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
26872 // pack back to vXi8.
26873 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26874 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26875 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
26876 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
26878 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26879 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26882 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
26883 assert(Subtarget.isTargetWin64() && "Unexpected target");
26884 EVT VT = Op.getValueType();
26885 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
26886 "Unexpected return type for lowering");
26890 switch (Op->getOpcode()) {
26891 default: llvm_unreachable("Unexpected request for libcall!");
26892 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
26893 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
26894 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
26895 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
26896 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
26897 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
26901 SDValue InChain = DAG.getEntryNode();
26903 TargetLowering::ArgListTy Args;
26904 TargetLowering::ArgListEntry Entry;
26905 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
26906 EVT ArgVT = Op->getOperand(i).getValueType();
26907 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
26908 "Unexpected argument type for lowering");
26909 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
26910 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
26911 MachinePointerInfo MPI =
26912 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
26913 Entry.Node = StackPtr;
26914 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
26915 MPI, /* Alignment = */ 16);
26916 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26917 Entry.Ty = PointerType::get(ArgTy,0);
26918 Entry.IsSExt = false;
26919 Entry.IsZExt = false;
26920 Args.push_back(Entry);
26923 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
26924 getPointerTy(DAG.getDataLayout()));
26926 TargetLowering::CallLoweringInfo CLI(DAG);
26927 CLI.setDebugLoc(dl)
26930 getLibcallCallingConv(LC),
26931 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
26934 .setSExtResult(isSigned)
26935 .setZExtResult(!isSigned);
26937 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
26938 return DAG.getBitcast(VT, CallInfo.first);
26941 // Return true if the required (according to Opcode) shift-imm form is natively
26942 // supported by the Subtarget
26943 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
26945 if (VT.getScalarSizeInBits() < 16)
26948 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
26949 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
26952 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
26953 (VT.is256BitVector() && Subtarget.hasInt256());
26955 bool AShift = LShift && (Subtarget.hasAVX512() ||
26956 (VT != MVT::v2i64 && VT != MVT::v4i64));
26957 return (Opcode == ISD::SRA) ? AShift : LShift;
26960 // The shift amount is a variable, but it is the same for all vector lanes.
26961 // These instructions are defined together with shift-immediate.
26963 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
26965 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
26968 // Return true if the required (according to Opcode) variable-shift form is
26969 // natively supported by the Subtarget
26970 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
26973 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
26976 // vXi16 supported only on AVX-512, BWI
26977 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
26980 if (Subtarget.hasAVX512())
26983 bool LShift = VT.is128BitVector() || VT.is256BitVector();
26984 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
26985 return (Opcode == ISD::SRA) ? AShift : LShift;
26988 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
26989 const X86Subtarget &Subtarget) {
26990 MVT VT = Op.getSimpleValueType();
26992 SDValue R = Op.getOperand(0);
26993 SDValue Amt = Op.getOperand(1);
26994 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
26996 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
26997 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
26998 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
26999 SDValue Ex = DAG.getBitcast(ExVT, R);
27001 // ashr(R, 63) === cmp_slt(R, 0)
27002 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
27003 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
27004 "Unsupported PCMPGT op");
27005 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
27008 if (ShiftAmt >= 32) {
27009 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
27011 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
27012 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
27013 ShiftAmt - 32, DAG);
27014 if (VT == MVT::v2i64)
27015 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
27016 if (VT == MVT::v4i64)
27017 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
27018 {9, 1, 11, 3, 13, 5, 15, 7});
27020 // SRA upper i32, SRL whole i64 and select lower i32.
27021 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
27024 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
27025 Lower = DAG.getBitcast(ExVT, Lower);
27026 if (VT == MVT::v2i64)
27027 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
27028 if (VT == MVT::v4i64)
27029 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
27030 {8, 1, 10, 3, 12, 5, 14, 7});
27032 return DAG.getBitcast(VT, Ex);
27035 // Optimize shl/srl/sra with constant shift amount.
27036 APInt APIntShiftAmt;
27037 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
27040 // If the shift amount is out of range, return undef.
27041 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
27042 return DAG.getUNDEF(VT);
27044 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
27046 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
27047 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
27049 // i64 SRA needs to be performed as partial shifts.
27050 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
27051 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
27052 Op.getOpcode() == ISD::SRA)
27053 return ArithmeticShiftRight64(ShiftAmt);
27055 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
27056 (Subtarget.hasBWI() && VT == MVT::v64i8)) {
27057 unsigned NumElts = VT.getVectorNumElements();
27058 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27060 // Simple i8 add case
27061 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
27062 return DAG.getNode(ISD::ADD, dl, VT, R, R);
27064 // ashr(R, 7) === cmp_slt(R, 0)
27065 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
27066 SDValue Zeros = DAG.getConstant(0, dl, VT);
27067 if (VT.is512BitVector()) {
27068 assert(VT == MVT::v64i8 && "Unexpected element type!");
27069 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
27070 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
27072 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
27075 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
27076 if (VT == MVT::v16i8 && Subtarget.hasXOP())
27079 if (Op.getOpcode() == ISD::SHL) {
27080 // Make a large shift.
27081 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
27083 SHL = DAG.getBitcast(VT, SHL);
27084 // Zero out the rightmost bits.
27085 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
27086 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
27088 if (Op.getOpcode() == ISD::SRL) {
27089 // Make a large shift.
27090 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
27092 SRL = DAG.getBitcast(VT, SRL);
27093 // Zero out the leftmost bits.
27094 return DAG.getNode(ISD::AND, dl, VT, SRL,
27095 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
27097 if (Op.getOpcode() == ISD::SRA) {
27098 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
27099 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
27101 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
27102 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
27103 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
27106 llvm_unreachable("Unknown shift opcode.");
27112 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
27113 const X86Subtarget &Subtarget) {
27114 MVT VT = Op.getSimpleValueType();
27116 SDValue R = Op.getOperand(0);
27117 SDValue Amt = Op.getOperand(1);
27118 unsigned Opcode = Op.getOpcode();
27119 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
27120 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
27122 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
27123 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
27124 MVT EltVT = VT.getVectorElementType();
27125 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
27126 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
27127 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
27128 else if (EltVT.bitsLT(MVT::i32))
27129 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
27131 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
27134 // vXi8 shifts - shift as v8i16 + mask result.
27135 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
27136 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
27137 VT == MVT::v64i8) &&
27138 !Subtarget.hasXOP()) {
27139 unsigned NumElts = VT.getVectorNumElements();
27140 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27141 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
27142 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
27143 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
27144 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
27146 // Create the mask using vXi16 shifts. For shift-rights we need to move
27147 // the upper byte down before splatting the vXi8 mask.
27148 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
27149 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
27150 BaseShAmt, Subtarget, DAG);
27151 if (Opcode != ISD::SHL)
27152 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
27154 BitMask = DAG.getBitcast(VT, BitMask);
27155 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
27156 SmallVector<int, 64>(NumElts, 0));
27158 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
27159 DAG.getBitcast(ExtVT, R), BaseShAmt,
27161 Res = DAG.getBitcast(VT, Res);
27162 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
27164 if (Opcode == ISD::SRA) {
27165 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
27166 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
27167 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
27168 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
27169 BaseShAmt, Subtarget, DAG);
27170 SignMask = DAG.getBitcast(VT, SignMask);
27171 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
27172 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
27179 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
27180 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
27181 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
27182 Amt = Amt.getOperand(0);
27183 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
27184 std::vector<SDValue> Vals(Ratio);
27185 for (unsigned i = 0; i != Ratio; ++i)
27186 Vals[i] = Amt.getOperand(i);
27187 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
27188 for (unsigned j = 0; j != Ratio; ++j)
27189 if (Vals[j] != Amt.getOperand(i + j))
27193 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
27194 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
27199 // Convert a shift/rotate left amount to a multiplication scale factor.
27200 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
27201 const X86Subtarget &Subtarget,
27202 SelectionDAG &DAG) {
27203 MVT VT = Amt.getSimpleValueType();
27204 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
27205 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
27206 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
27209 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
27210 SmallVector<SDValue, 8> Elts;
27211 MVT SVT = VT.getVectorElementType();
27212 unsigned SVTBits = SVT.getSizeInBits();
27213 APInt One(SVTBits, 1);
27214 unsigned NumElems = VT.getVectorNumElements();
27216 for (unsigned i = 0; i != NumElems; ++i) {
27217 SDValue Op = Amt->getOperand(i);
27218 if (Op->isUndef()) {
27219 Elts.push_back(Op);
27223 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
27224 APInt C(SVTBits, ND->getZExtValue());
27225 uint64_t ShAmt = C.getZExtValue();
27226 if (ShAmt >= SVTBits) {
27227 Elts.push_back(DAG.getUNDEF(SVT));
27230 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
27232 return DAG.getBuildVector(VT, dl, Elts);
27235 // If the target doesn't support variable shifts, use either FP conversion
27236 // or integer multiplication to avoid shifting each element individually.
27237 if (VT == MVT::v4i32) {
27238 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
27239 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
27240 DAG.getConstant(0x3f800000U, dl, VT));
27241 Amt = DAG.getBitcast(MVT::v4f32, Amt);
27242 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
27245 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
27246 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
27247 SDValue Z = DAG.getConstant(0, dl, VT);
27248 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
27249 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
27250 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
27251 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
27252 if (Subtarget.hasSSE41())
27253 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27255 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
27256 DAG.getBitcast(VT, Hi),
27257 {0, 2, 4, 6, 8, 10, 12, 14});
27263 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
27264 SelectionDAG &DAG) {
27265 MVT VT = Op.getSimpleValueType();
27267 SDValue R = Op.getOperand(0);
27268 SDValue Amt = Op.getOperand(1);
27269 unsigned EltSizeInBits = VT.getScalarSizeInBits();
27270 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27272 unsigned Opc = Op.getOpcode();
27273 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
27274 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
27276 assert(VT.isVector() && "Custom lowering only for vector shifts!");
27277 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
27279 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
27282 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
27285 if (SupportedVectorVarShift(VT, Subtarget, Opc))
27288 // XOP has 128-bit variable logical/arithmetic shifts.
27289 // +ve/-ve Amt = shift left/right.
27290 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
27291 VT == MVT::v8i16 || VT == MVT::v16i8)) {
27292 if (Opc == ISD::SRL || Opc == ISD::SRA) {
27293 SDValue Zero = DAG.getConstant(0, dl, VT);
27294 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
27296 if (Opc == ISD::SHL || Opc == ISD::SRL)
27297 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
27298 if (Opc == ISD::SRA)
27299 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
27302 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
27303 // shifts per-lane and then shuffle the partial results back together.
27304 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
27305 // Splat the shift amounts so the scalar shifts above will catch it.
27306 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
27307 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
27308 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
27309 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
27310 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
27313 // i64 vector arithmetic shift can be emulated with the transform:
27314 // M = lshr(SIGN_MASK, Amt)
27315 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
27316 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
27318 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
27319 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
27320 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
27321 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
27322 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
27326 // If possible, lower this shift as a sequence of two shifts by
27327 // constant plus a BLENDing shuffle instead of scalarizing it.
27329 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
27331 // Could be rewritten as:
27332 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
27334 // The advantage is that the two shifts from the example would be
27335 // lowered as X86ISD::VSRLI nodes in parallel before blending.
27336 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
27337 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
27338 SDValue Amt1, Amt2;
27339 unsigned NumElts = VT.getVectorNumElements();
27340 SmallVector<int, 8> ShuffleMask;
27341 for (unsigned i = 0; i != NumElts; ++i) {
27342 SDValue A = Amt->getOperand(i);
27344 ShuffleMask.push_back(SM_SentinelUndef);
27347 if (!Amt1 || Amt1 == A) {
27348 ShuffleMask.push_back(i);
27352 if (!Amt2 || Amt2 == A) {
27353 ShuffleMask.push_back(i + NumElts);
27360 // Only perform this blend if we can perform it without loading a mask.
27361 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
27362 (VT != MVT::v16i16 ||
27363 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
27364 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
27365 canWidenShuffleElements(ShuffleMask))) {
27366 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
27367 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
27368 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
27369 Cst2->getAPIntValue().ult(EltSizeInBits)) {
27370 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
27371 Cst1->getZExtValue(), DAG);
27372 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
27373 Cst2->getZExtValue(), DAG);
27374 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
27379 // If possible, lower this packed shift into a vector multiply instead of
27380 // expanding it into a sequence of scalar shifts.
27381 if (Opc == ISD::SHL)
27382 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
27383 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
27385 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
27386 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
27387 if (Opc == ISD::SRL && ConstantAmt &&
27388 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
27389 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
27390 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
27391 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
27392 SDValue Zero = DAG.getConstant(0, dl, VT);
27393 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
27394 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
27395 return DAG.getSelect(dl, VT, ZAmt, R, Res);
27399 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
27400 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
27401 // TODO: Special case handling for shift by 0/1, really we can afford either
27402 // of these cases in pre-SSE41/XOP/AVX512 but not both.
27403 if (Opc == ISD::SRA && ConstantAmt &&
27404 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
27405 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
27406 !Subtarget.hasAVX512()) ||
27407 DAG.isKnownNeverZero(Amt))) {
27408 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
27409 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
27410 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
27412 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
27414 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
27416 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
27417 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
27418 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
27419 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
27423 // v4i32 Non Uniform Shifts.
27424 // If the shift amount is constant we can shift each lane using the SSE2
27425 // immediate shifts, else we need to zero-extend each lane to the lower i64
27426 // and shift using the SSE2 variable shifts.
27427 // The separate results can then be blended together.
27428 if (VT == MVT::v4i32) {
27429 SDValue Amt0, Amt1, Amt2, Amt3;
27431 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
27432 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
27433 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
27434 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
27436 // The SSE2 shifts use the lower i64 as the same shift amount for
27437 // all lanes and the upper i64 is ignored. On AVX we're better off
27438 // just zero-extending, but for SSE just duplicating the top 16-bits is
27439 // cheaper and has the same effect for out of range values.
27440 if (Subtarget.hasAVX()) {
27441 SDValue Z = DAG.getConstant(0, dl, VT);
27442 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
27443 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
27444 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
27445 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
27447 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
27448 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
27449 {4, 5, 6, 7, -1, -1, -1, -1});
27450 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
27451 {0, 1, 1, 1, -1, -1, -1, -1});
27452 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
27453 {2, 3, 3, 3, -1, -1, -1, -1});
27454 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
27455 {0, 1, 1, 1, -1, -1, -1, -1});
27456 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
27457 {2, 3, 3, 3, -1, -1, -1, -1});
27461 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
27462 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
27463 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
27464 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
27465 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
27467 // Merge the shifted lane results optimally with/without PBLENDW.
27468 // TODO - ideally shuffle combining would handle this.
27469 if (Subtarget.hasSSE41()) {
27470 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
27471 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
27472 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
27474 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
27475 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
27476 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
27479 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
27480 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
27481 // make the existing SSE solution better.
27482 // NOTE: We honor prefered vector width before promoting to 512-bits.
27483 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
27484 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
27485 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
27486 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
27487 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
27488 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
27489 "Unexpected vector type");
27490 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
27491 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
27492 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
27493 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
27494 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
27495 return DAG.getNode(ISD::TRUNCATE, dl, VT,
27496 DAG.getNode(Opc, dl, ExtVT, R, Amt));
27499 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
27500 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
27501 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
27502 (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
27503 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
27504 !Subtarget.hasXOP()) {
27505 int NumElts = VT.getVectorNumElements();
27506 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
27508 // Extend constant shift amount to vXi16 (it doesn't matter if the type
27510 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
27511 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
27512 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
27513 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
27514 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
27515 "Constant build vector expected");
27517 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
27518 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
27519 : DAG.getZExtOrTrunc(R, dl, ExVT);
27520 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
27521 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
27522 return DAG.getZExtOrTrunc(R, dl, VT);
27525 SmallVector<SDValue, 16> LoAmt, HiAmt;
27526 for (int i = 0; i != NumElts; i += 16) {
27527 for (int j = 0; j != 8; ++j) {
27528 LoAmt.push_back(Amt.getOperand(i + j));
27529 HiAmt.push_back(Amt.getOperand(i + j + 8));
27533 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
27534 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
27535 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
27537 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
27538 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
27539 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
27540 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
27541 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
27542 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
27543 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
27544 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
27545 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
27548 if (VT == MVT::v16i8 ||
27549 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
27550 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
27551 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
27553 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27554 if (VT.is512BitVector()) {
27555 // On AVX512BW targets we make use of the fact that VSELECT lowers
27556 // to a masked blend which selects bytes based just on the sign bit
27557 // extracted to a mask.
27558 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
27559 V0 = DAG.getBitcast(VT, V0);
27560 V1 = DAG.getBitcast(VT, V1);
27561 Sel = DAG.getBitcast(VT, Sel);
27562 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
27564 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
27565 } else if (Subtarget.hasSSE41()) {
27566 // On SSE41 targets we can use PBLENDVB which selects bytes based just
27567 // on the sign bit.
27568 V0 = DAG.getBitcast(VT, V0);
27569 V1 = DAG.getBitcast(VT, V1);
27570 Sel = DAG.getBitcast(VT, Sel);
27571 return DAG.getBitcast(SelVT,
27572 DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
27574 // On pre-SSE41 targets we test for the sign bit by comparing to
27575 // zero - a negative value will set all bits of the lanes to true
27576 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27577 SDValue Z = DAG.getConstant(0, dl, SelVT);
27578 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
27579 return DAG.getSelect(dl, SelVT, C, V0, V1);
27582 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27583 // We can safely do this using i16 shifts as we're only interested in
27584 // the 3 lower bits of each byte.
27585 Amt = DAG.getBitcast(ExtVT, Amt);
27586 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
27587 Amt = DAG.getBitcast(VT, Amt);
27589 if (Opc == ISD::SHL || Opc == ISD::SRL) {
27590 // r = VSELECT(r, shift(r, 4), a);
27591 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
27592 R = SignBitSelect(VT, Amt, M, R);
27595 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27597 // r = VSELECT(r, shift(r, 2), a);
27598 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
27599 R = SignBitSelect(VT, Amt, M, R);
27602 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27604 // return VSELECT(r, shift(r, 1), a);
27605 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
27606 R = SignBitSelect(VT, Amt, M, R);
27610 if (Opc == ISD::SRA) {
27611 // For SRA we need to unpack each byte to the higher byte of a i16 vector
27612 // so we can correctly sign extend. We don't care what happens to the
27614 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
27615 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
27616 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
27617 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
27618 ALo = DAG.getBitcast(ExtVT, ALo);
27619 AHi = DAG.getBitcast(ExtVT, AHi);
27620 RLo = DAG.getBitcast(ExtVT, RLo);
27621 RHi = DAG.getBitcast(ExtVT, RHi);
27623 // r = VSELECT(r, shift(r, 4), a);
27624 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
27625 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
27626 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27627 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27630 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
27631 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
27633 // r = VSELECT(r, shift(r, 2), a);
27634 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
27635 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
27636 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27637 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27640 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
27641 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
27643 // r = VSELECT(r, shift(r, 1), a);
27644 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
27645 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
27646 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27647 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27649 // Logical shift the result back to the lower byte, leaving a zero upper
27650 // byte meaning that we can safely pack with PACKUSWB.
27651 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
27652 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
27653 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
27657 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
27658 MVT ExtVT = MVT::v8i32;
27659 SDValue Z = DAG.getConstant(0, dl, VT);
27660 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
27661 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
27662 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
27663 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
27664 ALo = DAG.getBitcast(ExtVT, ALo);
27665 AHi = DAG.getBitcast(ExtVT, AHi);
27666 RLo = DAG.getBitcast(ExtVT, RLo);
27667 RHi = DAG.getBitcast(ExtVT, RHi);
27668 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
27669 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
27670 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
27671 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
27672 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27675 if (VT == MVT::v8i16) {
27676 // If we have a constant shift amount, the non-SSE41 path is best as
27677 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
27678 bool UseSSE41 = Subtarget.hasSSE41() &&
27679 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27681 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
27682 // On SSE41 targets we can use PBLENDVB which selects bytes based just on
27685 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
27686 V0 = DAG.getBitcast(ExtVT, V0);
27687 V1 = DAG.getBitcast(ExtVT, V1);
27688 Sel = DAG.getBitcast(ExtVT, Sel);
27689 return DAG.getBitcast(
27690 VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
27692 // On pre-SSE41 targets we splat the sign bit - a negative value will
27693 // set all bits of the lanes to true and VSELECT uses that in
27694 // its OR(AND(V0,C),AND(V1,~C)) lowering.
27696 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
27697 return DAG.getSelect(dl, VT, C, V0, V1);
27700 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
27702 // On SSE41 targets we need to replicate the shift mask in both
27703 // bytes for PBLENDVB.
27706 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
27707 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
27709 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
27712 // r = VSELECT(r, shift(r, 8), a);
27713 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
27714 R = SignBitSelect(Amt, M, R);
27717 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27719 // r = VSELECT(r, shift(r, 4), a);
27720 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
27721 R = SignBitSelect(Amt, M, R);
27724 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27726 // r = VSELECT(r, shift(r, 2), a);
27727 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
27728 R = SignBitSelect(Amt, M, R);
27731 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27733 // return VSELECT(r, shift(r, 1), a);
27734 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
27735 R = SignBitSelect(Amt, M, R);
27739 // Decompose 256-bit shifts into 128-bit shifts.
27740 if (VT.is256BitVector())
27741 return splitVectorIntBinary(Op, DAG);
27743 if (VT == MVT::v32i16 || VT == MVT::v64i8)
27744 return splitVectorIntBinary(Op, DAG);
27749 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
27750 SelectionDAG &DAG) {
27751 MVT VT = Op.getSimpleValueType();
27752 assert(VT.isVector() && "Custom lowering only for vector rotates!");
27755 SDValue R = Op.getOperand(0);
27756 SDValue Amt = Op.getOperand(1);
27757 unsigned Opcode = Op.getOpcode();
27758 unsigned EltSizeInBits = VT.getScalarSizeInBits();
27759 int NumElts = VT.getVectorNumElements();
27761 // Check for constant splat rotation amount.
27762 APInt CstSplatValue;
27763 bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
27765 // Check for splat rotate by zero.
27766 if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
27769 // AVX512 implicitly uses modulo rotation amounts.
27770 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
27771 // Attempt to rotate by immediate.
27773 unsigned RotOpc = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
27774 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
27775 return DAG.getNode(RotOpc, DL, VT, R,
27776 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
27779 // Else, fall-back on VPROLV/VPRORV.
27783 assert((Opcode == ISD::ROTL) && "Only ROTL supported");
27785 // XOP has 128-bit vector variable + immediate rotates.
27786 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
27787 // XOP implicitly uses modulo rotation amounts.
27788 if (Subtarget.hasXOP()) {
27789 if (VT.is256BitVector())
27790 return splitVectorIntBinary(Op, DAG);
27791 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
27793 // Attempt to rotate by immediate.
27795 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
27796 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
27797 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
27800 // Use general rotate by variable (per-element).
27804 // Split 256-bit integers on pre-AVX2 targets.
27805 if (VT.is256BitVector() && !Subtarget.hasAVX2())
27806 return splitVectorIntBinary(Op, DAG);
27808 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
27809 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
27810 Subtarget.hasAVX2())) &&
27811 "Only vXi32/vXi16/vXi8 vector rotates supported");
27813 // Rotate by an uniform constant - expand back to shifts.
27817 bool IsSplatAmt = DAG.isSplatValue(Amt);
27819 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
27821 if (EltSizeInBits == 8 && !IsSplatAmt) {
27822 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
27825 // We don't need ModuloAmt here as we just peek at individual bits.
27826 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27828 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27829 if (Subtarget.hasSSE41()) {
27830 // On SSE41 targets we can use PBLENDVB which selects bytes based just
27831 // on the sign bit.
27832 V0 = DAG.getBitcast(VT, V0);
27833 V1 = DAG.getBitcast(VT, V1);
27834 Sel = DAG.getBitcast(VT, Sel);
27835 return DAG.getBitcast(SelVT,
27836 DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
27838 // On pre-SSE41 targets we test for the sign bit by comparing to
27839 // zero - a negative value will set all bits of the lanes to true
27840 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27841 SDValue Z = DAG.getConstant(0, DL, SelVT);
27842 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
27843 return DAG.getSelect(DL, SelVT, C, V0, V1);
27846 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27847 // We can safely do this using i16 shifts as we're only interested in
27848 // the 3 lower bits of each byte.
27849 Amt = DAG.getBitcast(ExtVT, Amt);
27850 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
27851 Amt = DAG.getBitcast(VT, Amt);
27853 // r = VSELECT(r, rot(r, 4), a);
27857 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
27858 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
27859 R = SignBitSelect(VT, Amt, M, R);
27862 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27864 // r = VSELECT(r, rot(r, 2), a);
27867 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
27868 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
27869 R = SignBitSelect(VT, Amt, M, R);
27872 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27874 // return VSELECT(r, rot(r, 1), a);
27877 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
27878 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
27879 return SignBitSelect(VT, Amt, M, R);
27882 // ISD::ROT* uses modulo rotate amounts.
27883 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
27884 DAG.getConstant(EltSizeInBits - 1, DL, VT));
27886 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27887 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
27888 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
27890 // Fallback for splats + all supported variable shifts.
27891 // Fallback for non-constants AVX2 vXi16 as well.
27892 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
27893 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
27894 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
27895 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
27896 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
27897 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
27900 // As with shifts, convert the rotation amount to a multiplication factor.
27901 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
27902 assert(Scale && "Failed to convert ROTL amount to scale");
27904 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
27905 if (EltSizeInBits == 16) {
27906 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
27907 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
27908 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27911 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
27912 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
27913 // that can then be OR'd with the lower 32-bits.
27914 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
27915 static const int OddMask[] = {1, -1, 3, -1};
27916 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
27917 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
27919 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27920 DAG.getBitcast(MVT::v2i64, R),
27921 DAG.getBitcast(MVT::v2i64, Scale));
27922 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27923 DAG.getBitcast(MVT::v2i64, R13),
27924 DAG.getBitcast(MVT::v2i64, Scale13));
27925 Res02 = DAG.getBitcast(VT, Res02);
27926 Res13 = DAG.getBitcast(VT, Res13);
27928 return DAG.getNode(ISD::OR, DL, VT,
27929 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
27930 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
27933 /// Returns true if the operand type is exactly twice the native width, and
27934 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
27935 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
27936 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
27937 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
27938 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
27941 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
27942 if (OpWidth == 128)
27943 return Subtarget.hasCmpxchg16b();
27948 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
27949 Type *MemType = SI->getValueOperand()->getType();
27951 bool NoImplicitFloatOps =
27952 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27953 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27954 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27955 (Subtarget.hasSSE1() || Subtarget.hasX87()))
27958 return needsCmpXchgNb(MemType);
27961 // Note: this turns large loads into lock cmpxchg8b/16b.
27962 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27963 TargetLowering::AtomicExpansionKind
27964 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
27965 Type *MemType = LI->getType();
27967 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
27968 // can use movq to do the load. If we have X87 we can load into an 80-bit
27969 // X87 register and store it to a stack temporary.
27970 bool NoImplicitFloatOps =
27971 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27972 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27973 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27974 (Subtarget.hasSSE1() || Subtarget.hasX87()))
27975 return AtomicExpansionKind::None;
27977 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27978 : AtomicExpansionKind::None;
27981 TargetLowering::AtomicExpansionKind
27982 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
27983 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27984 Type *MemType = AI->getType();
27986 // If the operand is too big, we must see if cmpxchg8/16b is available
27987 // and default to library calls otherwise.
27988 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
27989 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27990 : AtomicExpansionKind::None;
27993 AtomicRMWInst::BinOp Op = AI->getOperation();
27996 llvm_unreachable("Unknown atomic operation");
27997 case AtomicRMWInst::Xchg:
27998 case AtomicRMWInst::Add:
27999 case AtomicRMWInst::Sub:
28000 // It's better to use xadd, xsub or xchg for these in all cases.
28001 return AtomicExpansionKind::None;
28002 case AtomicRMWInst::Or:
28003 case AtomicRMWInst::And:
28004 case AtomicRMWInst::Xor:
28005 // If the atomicrmw's result isn't actually used, we can just add a "lock"
28006 // prefix to a normal instruction for these operations.
28007 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
28008 : AtomicExpansionKind::None;
28009 case AtomicRMWInst::Nand:
28010 case AtomicRMWInst::Max:
28011 case AtomicRMWInst::Min:
28012 case AtomicRMWInst::UMax:
28013 case AtomicRMWInst::UMin:
28014 case AtomicRMWInst::FAdd:
28015 case AtomicRMWInst::FSub:
28016 // These always require a non-trivial set of data operations on x86. We must
28017 // use a cmpxchg loop.
28018 return AtomicExpansionKind::CmpXChg;
28023 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
28024 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
28025 Type *MemType = AI->getType();
28026 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
28027 // there is no benefit in turning such RMWs into loads, and it is actually
28028 // harmful as it introduces a mfence.
28029 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
28032 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
28033 // lowering available in lowerAtomicArith.
28034 // TODO: push more cases through this path.
28035 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
28036 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
28040 IRBuilder<> Builder(AI);
28041 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
28042 auto SSID = AI->getSyncScopeID();
28043 // We must restrict the ordering to avoid generating loads with Release or
28044 // ReleaseAcquire orderings.
28045 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
28047 // Before the load we need a fence. Here is an example lifted from
28048 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
28051 // x.store(1, relaxed);
28052 // r1 = y.fetch_add(0, release);
28054 // y.fetch_add(42, acquire);
28055 // r2 = x.load(relaxed);
28056 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
28057 // lowered to just a load without a fence. A mfence flushes the store buffer,
28058 // making the optimization clearly correct.
28059 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
28060 // otherwise, we might be able to be more aggressive on relaxed idempotent
28061 // rmw. In practice, they do not look useful, so we don't try to be
28062 // especially clever.
28063 if (SSID == SyncScope::SingleThread)
28064 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
28065 // the IR level, so we must wrap it in an intrinsic.
28068 if (!Subtarget.hasMFence())
28069 // FIXME: it might make sense to use a locked operation here but on a
28070 // different cache-line to prevent cache-line bouncing. In practice it
28071 // is probably a small win, and x86 processors without mfence are rare
28072 // enough that we do not bother.
28076 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
28077 Builder.CreateCall(MFence, {});
28079 // Finally we can emit the atomic load.
28081 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
28082 Align(AI->getType()->getPrimitiveSizeInBits()));
28083 Loaded->setAtomic(Order, SSID);
28084 AI->replaceAllUsesWith(Loaded);
28085 AI->eraseFromParent();
28089 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
28090 if (!SI.isUnordered())
28092 return ExperimentalUnorderedISEL;
28094 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
28095 if (!LI.isUnordered())
28097 return ExperimentalUnorderedISEL;
28101 /// Emit a locked operation on a stack location which does not change any
28102 /// memory location, but does involve a lock prefix. Location is chosen to be
28103 /// a) very likely accessed only by a single thread to minimize cache traffic,
28104 /// and b) definitely dereferenceable. Returns the new Chain result.
28105 static SDValue emitLockedStackOp(SelectionDAG &DAG,
28106 const X86Subtarget &Subtarget,
28107 SDValue Chain, SDLoc DL) {
28108 // Implementation notes:
28109 // 1) LOCK prefix creates a full read/write reordering barrier for memory
28110 // operations issued by the current processor. As such, the location
28111 // referenced is not relevant for the ordering properties of the instruction.
28112 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
28113 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
28114 // 2) Using an immediate operand appears to be the best encoding choice
28115 // here since it doesn't require an extra register.
28116 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
28117 // is small enough it might just be measurement noise.)
28118 // 4) When choosing offsets, there are several contributing factors:
28119 // a) If there's no redzone, we default to TOS. (We could allocate a cache
28120 // line aligned stack object to improve this case.)
28121 // b) To minimize our chances of introducing a false dependence, we prefer
28122 // to offset the stack usage from TOS slightly.
28123 // c) To minimize concerns about cross thread stack usage - in particular,
28124 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
28125 // captures state in the TOS frame and accesses it from many threads -
28126 // we want to use an offset such that the offset is in a distinct cache
28127 // line from the TOS frame.
28129 // For a general discussion of the tradeoffs and benchmark results, see:
28130 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
28132 auto &MF = DAG.getMachineFunction();
28133 auto &TFL = *Subtarget.getFrameLowering();
28134 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
28136 if (Subtarget.is64Bit()) {
28137 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
28139 DAG.getRegister(X86::RSP, MVT::i64), // Base
28140 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
28141 DAG.getRegister(0, MVT::i64), // Index
28142 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
28143 DAG.getRegister(0, MVT::i16), // Segment.
28146 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
28148 return SDValue(Res, 1);
28151 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
28153 DAG.getRegister(X86::ESP, MVT::i32), // Base
28154 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
28155 DAG.getRegister(0, MVT::i32), // Index
28156 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
28157 DAG.getRegister(0, MVT::i16), // Segment.
28161 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
28163 return SDValue(Res, 1);
28166 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
28167 SelectionDAG &DAG) {
28169 AtomicOrdering FenceOrdering =
28170 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
28171 SyncScope::ID FenceSSID =
28172 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
28174 // The only fence that needs an instruction is a sequentially-consistent
28175 // cross-thread fence.
28176 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
28177 FenceSSID == SyncScope::System) {
28178 if (Subtarget.hasMFence())
28179 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
28181 SDValue Chain = Op.getOperand(0);
28182 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
28185 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28186 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
28189 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
28190 SelectionDAG &DAG) {
28191 MVT T = Op.getSimpleValueType();
28195 switch(T.SimpleTy) {
28196 default: llvm_unreachable("Invalid value type!");
28197 case MVT::i8: Reg = X86::AL; size = 1; break;
28198 case MVT::i16: Reg = X86::AX; size = 2; break;
28199 case MVT::i32: Reg = X86::EAX; size = 4; break;
28201 assert(Subtarget.is64Bit() && "Node not type legal!");
28202 Reg = X86::RAX; size = 8;
28205 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
28206 Op.getOperand(2), SDValue());
28207 SDValue Ops[] = { cpIn.getValue(0),
28210 DAG.getTargetConstant(size, DL, MVT::i8),
28211 cpIn.getValue(1) };
28212 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
28213 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
28214 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
28218 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
28219 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
28220 MVT::i32, cpOut.getValue(2));
28221 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
28223 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
28224 cpOut, Success, EFLAGS.getValue(1));
28227 // Create MOVMSKB, taking into account whether we need to split for AVX1.
28228 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
28229 const X86Subtarget &Subtarget) {
28230 MVT InVT = V.getSimpleValueType();
28232 if (InVT == MVT::v64i8) {
28234 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
28235 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
28236 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
28237 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
28238 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
28239 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
28240 DAG.getConstant(32, DL, MVT::i8));
28241 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
28243 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
28245 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
28246 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
28247 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
28248 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
28249 DAG.getConstant(16, DL, MVT::i8));
28250 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
28253 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
28256 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
28257 SelectionDAG &DAG) {
28258 SDValue Src = Op.getOperand(0);
28259 MVT SrcVT = Src.getSimpleValueType();
28260 MVT DstVT = Op.getSimpleValueType();
28262 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
28263 // half to v32i1 and concatenating the result.
28264 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
28265 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
28266 assert(Subtarget.hasBWI() && "Expected BWI target");
28268 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
28269 DAG.getIntPtrConstant(0, dl));
28270 Lo = DAG.getBitcast(MVT::v32i1, Lo);
28271 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
28272 DAG.getIntPtrConstant(1, dl));
28273 Hi = DAG.getBitcast(MVT::v32i1, Hi);
28274 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
28277 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
28278 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
28279 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
28280 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
28282 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
28283 V = getPMOVMSKB(DL, V, DAG, Subtarget);
28284 return DAG.getZExtOrTrunc(V, DL, DstVT);
28287 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
28288 SrcVT == MVT::i64) && "Unexpected VT!");
28290 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28291 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
28292 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
28293 // This conversion needs to be expanded.
28297 if (SrcVT.isVector()) {
28298 // Widen the vector in input in the case of MVT::v2i32.
28299 // Example: from MVT::v2i32 to MVT::v4i32.
28300 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
28301 SrcVT.getVectorNumElements() * 2);
28302 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
28303 DAG.getUNDEF(SrcVT));
28305 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
28306 "Unexpected source type in LowerBITCAST");
28307 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
28310 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
28311 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
28313 if (DstVT == MVT::x86mmx)
28314 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
28316 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
28317 DAG.getIntPtrConstant(0, dl));
28320 /// Compute the horizontal sum of bytes in V for the elements of VT.
28322 /// Requires V to be a byte vector and VT to be an integer vector type with
28323 /// wider elements than V's type. The width of the elements of VT determines
28324 /// how many bytes of V are summed horizontally to produce each element of the
28326 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
28327 const X86Subtarget &Subtarget,
28328 SelectionDAG &DAG) {
28330 MVT ByteVecVT = V.getSimpleValueType();
28331 MVT EltVT = VT.getVectorElementType();
28332 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
28333 "Expected value to have byte element type.");
28334 assert(EltVT != MVT::i8 &&
28335 "Horizontal byte sum only makes sense for wider elements!");
28336 unsigned VecSize = VT.getSizeInBits();
28337 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
28339 // PSADBW instruction horizontally add all bytes and leave the result in i64
28340 // chunks, thus directly computes the pop count for v2i64 and v4i64.
28341 if (EltVT == MVT::i64) {
28342 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
28343 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
28344 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
28345 return DAG.getBitcast(VT, V);
28348 if (EltVT == MVT::i32) {
28349 // We unpack the low half and high half into i32s interleaved with zeros so
28350 // that we can use PSADBW to horizontally sum them. The most useful part of
28351 // this is that it lines up the results of two PSADBW instructions to be
28352 // two v2i64 vectors which concatenated are the 4 population counts. We can
28353 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
28354 SDValue Zeros = DAG.getConstant(0, DL, VT);
28355 SDValue V32 = DAG.getBitcast(VT, V);
28356 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
28357 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
28359 // Do the horizontal sums into two v2i64s.
28360 Zeros = DAG.getConstant(0, DL, ByteVecVT);
28361 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
28362 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
28363 DAG.getBitcast(ByteVecVT, Low), Zeros);
28364 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
28365 DAG.getBitcast(ByteVecVT, High), Zeros);
28367 // Merge them together.
28368 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
28369 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
28370 DAG.getBitcast(ShortVecVT, Low),
28371 DAG.getBitcast(ShortVecVT, High));
28373 return DAG.getBitcast(VT, V);
28376 // The only element type left is i16.
28377 assert(EltVT == MVT::i16 && "Unknown how to handle type");
28379 // To obtain pop count for each i16 element starting from the pop count for
28380 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
28381 // right by 8. It is important to shift as i16s as i8 vector shift isn't
28382 // directly supported.
28383 SDValue ShifterV = DAG.getConstant(8, DL, VT);
28384 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
28385 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
28386 DAG.getBitcast(ByteVecVT, V));
28387 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
28390 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
28391 const X86Subtarget &Subtarget,
28392 SelectionDAG &DAG) {
28393 MVT VT = Op.getSimpleValueType();
28394 MVT EltVT = VT.getVectorElementType();
28395 int NumElts = VT.getVectorNumElements();
28397 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
28399 // Implement a lookup table in register by using an algorithm based on:
28400 // http://wm.ite.pl/articles/sse-popcount.html
28402 // The general idea is that every lower byte nibble in the input vector is an
28403 // index into a in-register pre-computed pop count table. We then split up the
28404 // input vector in two new ones: (1) a vector with only the shifted-right
28405 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
28406 // masked out higher ones) for each byte. PSHUFB is used separately with both
28407 // to index the in-register table. Next, both are added and the result is a
28408 // i8 vector where each element contains the pop count for input byte.
28409 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
28410 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
28411 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
28412 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
28414 SmallVector<SDValue, 64> LUTVec;
28415 for (int i = 0; i < NumElts; ++i)
28416 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
28417 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
28418 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
28421 SDValue FourV = DAG.getConstant(4, DL, VT);
28422 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
28425 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
28427 // The input vector is used as the shuffle mask that index elements into the
28428 // LUT. After counting low and high nibbles, add the vector to obtain the
28429 // final pop count per i8 element.
28430 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
28431 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
28432 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
28435 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
28436 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
28437 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
28438 SelectionDAG &DAG) {
28439 MVT VT = Op.getSimpleValueType();
28440 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
28441 "Unknown CTPOP type to handle");
28442 SDLoc DL(Op.getNode());
28443 SDValue Op0 = Op.getOperand(0);
28445 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
28446 if (Subtarget.hasVPOPCNTDQ()) {
28447 unsigned NumElems = VT.getVectorNumElements();
28448 assert((VT.getVectorElementType() == MVT::i8 ||
28449 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
28450 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
28451 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
28452 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
28453 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
28454 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
28458 // Decompose 256-bit ops into smaller 128-bit ops.
28459 if (VT.is256BitVector() && !Subtarget.hasInt256())
28460 return splitVectorIntUnary(Op, DAG);
28462 // Decompose 512-bit ops into smaller 256-bit ops.
28463 if (VT.is512BitVector() && !Subtarget.hasBWI())
28464 return splitVectorIntUnary(Op, DAG);
28466 // For element types greater than i8, do vXi8 pop counts and a bytesum.
28467 if (VT.getScalarType() != MVT::i8) {
28468 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
28469 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
28470 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
28471 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
28474 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
28475 if (!Subtarget.hasSSSE3())
28478 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
28481 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
28482 SelectionDAG &DAG) {
28483 assert(Op.getSimpleValueType().isVector() &&
28484 "We only do custom lowering for vector population count.");
28485 return LowerVectorCTPOP(Op, Subtarget, DAG);
28488 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
28489 MVT VT = Op.getSimpleValueType();
28490 SDValue In = Op.getOperand(0);
28493 // For scalars, its still beneficial to transfer to/from the SIMD unit to
28494 // perform the BITREVERSE.
28495 if (!VT.isVector()) {
28496 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
28497 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
28498 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
28499 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
28500 DAG.getIntPtrConstant(0, DL));
28503 int NumElts = VT.getVectorNumElements();
28504 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
28506 // Decompose 256-bit ops into smaller 128-bit ops.
28507 if (VT.is256BitVector())
28508 return splitVectorIntUnary(Op, DAG);
28510 assert(VT.is128BitVector() &&
28511 "Only 128-bit vector bitreverse lowering supported.");
28513 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
28514 // perform the BSWAP in the shuffle.
28515 // Its best to shuffle using the second operand as this will implicitly allow
28516 // memory folding for multiple vectors.
28517 SmallVector<SDValue, 16> MaskElts;
28518 for (int i = 0; i != NumElts; ++i) {
28519 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
28520 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
28521 int PermuteByte = SourceByte | (2 << 5);
28522 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
28526 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
28527 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
28528 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
28530 return DAG.getBitcast(VT, Res);
28533 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
28534 SelectionDAG &DAG) {
28535 MVT VT = Op.getSimpleValueType();
28537 if (Subtarget.hasXOP() && !VT.is512BitVector())
28538 return LowerBITREVERSE_XOP(Op, DAG);
28540 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
28542 SDValue In = Op.getOperand(0);
28545 // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
28546 if (VT == MVT::v64i8 && !Subtarget.hasBWI())
28547 return splitVectorIntUnary(Op, DAG);
28549 unsigned NumElts = VT.getVectorNumElements();
28550 assert(VT.getScalarType() == MVT::i8 &&
28551 "Only byte vector BITREVERSE supported");
28553 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
28554 if (VT.is256BitVector() && !Subtarget.hasInt256())
28555 return splitVectorIntUnary(Op, DAG);
28557 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
28558 // two nibbles and a PSHUFB lookup to find the bitreverse of each
28559 // 0-15 value (moved to the other nibble).
28560 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
28561 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
28562 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
28564 const int LoLUT[16] = {
28565 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
28566 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
28567 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
28568 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
28569 const int HiLUT[16] = {
28570 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
28571 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
28572 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
28573 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
28575 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
28576 for (unsigned i = 0; i < NumElts; ++i) {
28577 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
28578 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
28581 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
28582 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
28583 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
28584 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
28585 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
28588 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
28589 const X86Subtarget &Subtarget) {
28590 unsigned NewOpc = 0;
28591 switch (N->getOpcode()) {
28592 case ISD::ATOMIC_LOAD_ADD:
28593 NewOpc = X86ISD::LADD;
28595 case ISD::ATOMIC_LOAD_SUB:
28596 NewOpc = X86ISD::LSUB;
28598 case ISD::ATOMIC_LOAD_OR:
28599 NewOpc = X86ISD::LOR;
28601 case ISD::ATOMIC_LOAD_XOR:
28602 NewOpc = X86ISD::LXOR;
28604 case ISD::ATOMIC_LOAD_AND:
28605 NewOpc = X86ISD::LAND;
28608 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
28611 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
28613 return DAG.getMemIntrinsicNode(
28614 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
28615 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
28616 /*MemVT=*/N->getSimpleValueType(0), MMO);
28619 /// Lower atomic_load_ops into LOCK-prefixed operations.
28620 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
28621 const X86Subtarget &Subtarget) {
28622 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
28623 SDValue Chain = N->getOperand(0);
28624 SDValue LHS = N->getOperand(1);
28625 SDValue RHS = N->getOperand(2);
28626 unsigned Opc = N->getOpcode();
28627 MVT VT = N->getSimpleValueType(0);
28630 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
28631 // can only be lowered when the result is unused. They should have already
28632 // been transformed into a cmpxchg loop in AtomicExpand.
28633 if (N->hasAnyUseOfValue(0)) {
28634 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
28635 // select LXADD if LOCK_SUB can't be selected.
28636 if (Opc == ISD::ATOMIC_LOAD_SUB) {
28637 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
28638 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
28639 RHS, AN->getMemOperand());
28641 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
28642 "Used AtomicRMW ops other than Add should have been expanded!");
28646 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
28647 // The core idea here is that since the memory location isn't actually
28648 // changing, all we need is a lowering for the *ordering* impacts of the
28649 // atomicrmw. As such, we can chose a different operation and memory
28650 // location to minimize impact on other code.
28651 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
28652 // On X86, the only ordering which actually requires an instruction is
28653 // seq_cst which isn't SingleThread, everything just needs to be preserved
28654 // during codegen and then dropped. Note that we expect (but don't assume),
28655 // that orderings other than seq_cst and acq_rel have been canonicalized to
28656 // a store or load.
28657 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
28658 AN->getSyncScopeID() == SyncScope::System) {
28659 // Prefer a locked operation against a stack location to minimize cache
28660 // traffic. This assumes that stack locations are very likely to be
28661 // accessed only by the owning thread.
28662 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
28663 assert(!N->hasAnyUseOfValue(0));
28664 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28665 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28666 DAG.getUNDEF(VT), NewChain);
28668 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28669 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
28670 assert(!N->hasAnyUseOfValue(0));
28671 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28672 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28673 DAG.getUNDEF(VT), NewChain);
28676 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
28677 // RAUW the chain, but don't worry about the result, as it's unused.
28678 assert(!N->hasAnyUseOfValue(0));
28679 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28680 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28681 DAG.getUNDEF(VT), LockOp.getValue(1));
28684 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
28685 const X86Subtarget &Subtarget) {
28686 auto *Node = cast<AtomicSDNode>(Op.getNode());
28688 EVT VT = Node->getMemoryVT();
28690 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
28691 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
28693 // If this store is not sequentially consistent and the type is legal
28694 // we can just keep it.
28695 if (!IsSeqCst && IsTypeLegal)
28698 if (VT == MVT::i64 && !IsTypeLegal) {
28699 // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
28701 bool NoImplicitFloatOps =
28702 DAG.getMachineFunction().getFunction().hasFnAttribute(
28703 Attribute::NoImplicitFloat);
28704 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
28706 if (Subtarget.hasSSE1()) {
28707 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
28708 Node->getOperand(2));
28709 MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
28710 SclToVec = DAG.getBitcast(StVT, SclToVec);
28711 SDVTList Tys = DAG.getVTList(MVT::Other);
28712 SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
28713 Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
28714 MVT::i64, Node->getMemOperand());
28715 } else if (Subtarget.hasX87()) {
28716 // First load this into an 80-bit X87 register using a stack temporary.
28717 // This will put the whole integer into the significand.
28718 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
28719 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28720 MachinePointerInfo MPI =
28721 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28723 DAG.getStore(Node->getChain(), dl, Node->getOperand(2), StackPtr,
28724 MPI, /*Align*/ 0, MachineMemOperand::MOStore);
28725 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
28726 SDValue LdOps[] = {Chain, StackPtr};
28728 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
28729 /*Align*/ None, MachineMemOperand::MOLoad);
28730 Chain = Value.getValue(1);
28732 // Now use an FIST to do the atomic store.
28733 SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
28735 DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
28736 StoreOps, MVT::i64, Node->getMemOperand());
28740 // If this is a sequentially consistent store, also emit an appropriate
28743 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
28750 // Convert seq_cst store -> xchg
28751 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
28752 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
28753 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
28754 Node->getMemoryVT(),
28755 Node->getOperand(0),
28756 Node->getOperand(1), Node->getOperand(2),
28757 Node->getMemOperand());
28758 return Swap.getValue(1);
28761 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
28762 SDNode *N = Op.getNode();
28763 MVT VT = N->getSimpleValueType(0);
28765 // Let legalize expand this if it isn't a legal type yet.
28766 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
28769 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
28772 // Set the carry flag.
28773 SDValue Carry = Op.getOperand(2);
28774 EVT CarryVT = Carry.getValueType();
28775 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
28776 Carry, DAG.getAllOnesConstant(DL, CarryVT));
28778 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
28779 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
28780 Op.getOperand(1), Carry.getValue(1));
28782 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
28783 if (N->getValueType(1) == MVT::i1)
28784 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
28786 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
28789 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
28790 SelectionDAG &DAG) {
28791 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
28793 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
28794 // which returns the values as { float, float } (in XMM0) or
28795 // { double, double } (which is returned in XMM0, XMM1).
28797 SDValue Arg = Op.getOperand(0);
28798 EVT ArgVT = Arg.getValueType();
28799 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28801 TargetLowering::ArgListTy Args;
28802 TargetLowering::ArgListEntry Entry;
28806 Entry.IsSExt = false;
28807 Entry.IsZExt = false;
28808 Args.push_back(Entry);
28810 bool isF64 = ArgVT == MVT::f64;
28811 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
28812 // the small struct {f32, f32} is returned in (eax, edx). For f64,
28813 // the results are returned via SRet in memory.
28814 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28815 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
28816 const char *LibcallName = TLI.getLibcallName(LC);
28818 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
28820 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
28821 : (Type *)FixedVectorType::get(ArgTy, 4);
28823 TargetLowering::CallLoweringInfo CLI(DAG);
28824 CLI.setDebugLoc(dl)
28825 .setChain(DAG.getEntryNode())
28826 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
28828 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
28831 // Returned in xmm0 and xmm1.
28832 return CallResult.first;
28834 // Returned in bits 0:31 and 32:64 xmm0.
28835 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28836 CallResult.first, DAG.getIntPtrConstant(0, dl));
28837 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28838 CallResult.first, DAG.getIntPtrConstant(1, dl));
28839 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
28840 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
28843 /// Widen a vector input to a vector of NVT. The
28844 /// input vector must have the same element type as NVT.
28845 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
28846 bool FillWithZeroes = false) {
28847 // Check if InOp already has the right width.
28848 MVT InVT = InOp.getSimpleValueType();
28852 if (InOp.isUndef())
28853 return DAG.getUNDEF(NVT);
28855 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
28856 "input and widen element type must match");
28858 unsigned InNumElts = InVT.getVectorNumElements();
28859 unsigned WidenNumElts = NVT.getVectorNumElements();
28860 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
28861 "Unexpected request for vector widening");
28864 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
28865 InOp.getNumOperands() == 2) {
28866 SDValue N1 = InOp.getOperand(1);
28867 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
28869 InOp = InOp.getOperand(0);
28870 InVT = InOp.getSimpleValueType();
28871 InNumElts = InVT.getVectorNumElements();
28874 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
28875 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
28876 SmallVector<SDValue, 16> Ops;
28877 for (unsigned i = 0; i < InNumElts; ++i)
28878 Ops.push_back(InOp.getOperand(i));
28880 EVT EltVT = InOp.getOperand(0).getValueType();
28882 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
28883 DAG.getUNDEF(EltVT);
28884 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
28885 Ops.push_back(FillVal);
28886 return DAG.getBuildVector(NVT, dl, Ops);
28888 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
28890 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
28891 InOp, DAG.getIntPtrConstant(0, dl));
28894 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
28895 SelectionDAG &DAG) {
28896 assert(Subtarget.hasAVX512() &&
28897 "MGATHER/MSCATTER are supported on AVX-512 arch only");
28899 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
28900 SDValue Src = N->getValue();
28901 MVT VT = Src.getSimpleValueType();
28902 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
28905 SDValue Scale = N->getScale();
28906 SDValue Index = N->getIndex();
28907 SDValue Mask = N->getMask();
28908 SDValue Chain = N->getChain();
28909 SDValue BasePtr = N->getBasePtr();
28911 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
28912 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28913 // If the index is v2i64 and we have VLX we can use xmm for data and index.
28914 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
28915 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28916 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
28917 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
28918 SDVTList VTs = DAG.getVTList(MVT::Other);
28919 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28920 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28921 N->getMemoryVT(), N->getMemOperand());
28926 MVT IndexVT = Index.getSimpleValueType();
28928 // If the index is v2i32, we're being called by type legalization and we
28929 // should just let the default handling take care of it.
28930 if (IndexVT == MVT::v2i32)
28933 // If we don't have VLX and neither the passthru or index is 512-bits, we
28934 // need to widen until one is.
28935 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
28936 !Index.getSimpleValueType().is512BitVector()) {
28937 // Determine how much we need to widen by to get a 512-bit type.
28938 unsigned Factor = std::min(512/VT.getSizeInBits(),
28939 512/IndexVT.getSizeInBits());
28940 unsigned NumElts = VT.getVectorNumElements() * Factor;
28942 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28943 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28944 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28946 Src = ExtendToType(Src, VT, DAG);
28947 Index = ExtendToType(Index, IndexVT, DAG);
28948 Mask = ExtendToType(Mask, MaskVT, DAG, true);
28951 SDVTList VTs = DAG.getVTList(MVT::Other);
28952 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28953 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28954 N->getMemoryVT(), N->getMemOperand());
28957 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
28958 SelectionDAG &DAG) {
28960 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
28961 MVT VT = Op.getSimpleValueType();
28962 MVT ScalarVT = VT.getScalarType();
28963 SDValue Mask = N->getMask();
28964 MVT MaskVT = Mask.getSimpleValueType();
28965 SDValue PassThru = N->getPassThru();
28968 // Handle AVX masked loads which don't support passthru other than 0.
28969 if (MaskVT.getVectorElementType() != MVT::i1) {
28970 // We also allow undef in the isel pattern.
28971 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
28974 SDValue NewLoad = DAG.getMaskedLoad(
28975 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28976 getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
28977 N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
28978 N->isExpandingLoad());
28980 SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
28981 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
28984 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
28985 "Expanding masked load is supported on AVX-512 target only!");
28987 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
28988 "Expanding masked load is supported for 32 and 64-bit types only!");
28990 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28991 "Cannot lower masked load op.");
28993 assert((ScalarVT.getSizeInBits() >= 32 ||
28994 (Subtarget.hasBWI() &&
28995 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28996 "Unsupported masked load op.");
28998 // This operation is legal for targets with VLX, but without
28999 // VLX the vector should be widened to 512 bit
29000 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
29001 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
29002 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
29004 // Mask element has to be i1.
29005 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
29006 "Unexpected mask type");
29008 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
29010 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
29011 SDValue NewLoad = DAG.getMaskedLoad(
29012 WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
29013 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
29014 N->getExtensionType(), N->isExpandingLoad());
29017 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
29018 DAG.getIntPtrConstant(0, dl));
29019 SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
29020 return DAG.getMergeValues(RetOps, dl);
29023 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
29024 SelectionDAG &DAG) {
29025 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
29026 SDValue DataToStore = N->getValue();
29027 MVT VT = DataToStore.getSimpleValueType();
29028 MVT ScalarVT = VT.getScalarType();
29029 SDValue Mask = N->getMask();
29032 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
29033 "Expanding masked load is supported on AVX-512 target only!");
29035 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
29036 "Expanding masked load is supported for 32 and 64-bit types only!");
29038 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
29039 "Cannot lower masked store op.");
29041 assert((ScalarVT.getSizeInBits() >= 32 ||
29042 (Subtarget.hasBWI() &&
29043 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
29044 "Unsupported masked store op.");
29046 // This operation is legal for targets with VLX, but without
29047 // VLX the vector should be widened to 512 bit
29048 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
29049 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
29051 // Mask element has to be i1.
29052 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
29053 "Unexpected mask type");
29055 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
29057 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
29058 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
29059 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
29060 N->getOffset(), Mask, N->getMemoryVT(),
29061 N->getMemOperand(), N->getAddressingMode(),
29062 N->isTruncatingStore(), N->isCompressingStore());
29065 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
29066 SelectionDAG &DAG) {
29067 assert(Subtarget.hasAVX2() &&
29068 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
29070 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
29072 MVT VT = Op.getSimpleValueType();
29073 SDValue Index = N->getIndex();
29074 SDValue Mask = N->getMask();
29075 SDValue PassThru = N->getPassThru();
29076 MVT IndexVT = Index.getSimpleValueType();
29078 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
29080 // If the index is v2i32, we're being called by type legalization.
29081 if (IndexVT == MVT::v2i32)
29084 // If we don't have VLX and neither the passthru or index is 512-bits, we
29085 // need to widen until one is.
29087 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
29088 !IndexVT.is512BitVector()) {
29089 // Determine how much we need to widen by to get a 512-bit type.
29090 unsigned Factor = std::min(512/VT.getSizeInBits(),
29091 512/IndexVT.getSizeInBits());
29093 unsigned NumElts = VT.getVectorNumElements() * Factor;
29095 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
29096 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
29097 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
29099 PassThru = ExtendToType(PassThru, VT, DAG);
29100 Index = ExtendToType(Index, IndexVT, DAG);
29101 Mask = ExtendToType(Mask, MaskVT, DAG, true);
29104 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
29106 SDValue NewGather = DAG.getMemIntrinsicNode(
29107 X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
29108 N->getMemOperand());
29109 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
29110 NewGather, DAG.getIntPtrConstant(0, dl));
29111 return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
29114 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
29116 SDValue Src = Op.getOperand(0);
29117 MVT DstVT = Op.getSimpleValueType();
29119 AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
29120 unsigned SrcAS = N->getSrcAddressSpace();
29122 assert(SrcAS != N->getDestAddressSpace() &&
29123 "addrspacecast must be between different address spaces");
29125 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
29126 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
29127 } else if (DstVT == MVT::i64) {
29128 Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
29129 } else if (DstVT == MVT::i32) {
29130 Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
29132 report_fatal_error("Bad address space in addrspacecast");
29137 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
29138 SelectionDAG &DAG) const {
29139 // TODO: Eventually, the lowering of these nodes should be informed by or
29140 // deferred to the GC strategy for the function in which they appear. For
29141 // now, however, they must be lowered to something. Since they are logically
29142 // no-ops in the case of a null GC strategy (or a GC strategy which does not
29143 // require special handling for these nodes), lower them as literal NOOPs for
29145 SmallVector<SDValue, 2> Ops;
29147 Ops.push_back(Op.getOperand(0));
29148 if (Op->getGluedNode())
29149 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
29152 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
29153 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
29158 SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
29159 RTLIB::Libcall Call) const {
29161 bool IsStrict = Op->isStrictFPOpcode();
29162 unsigned Offset = IsStrict ? 1 : 0;
29163 SmallVector<SDValue, 2> Ops(Op->op_begin() + Offset, Op->op_end());
29166 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
29167 MakeLibCallOptions CallOptions;
29168 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, Call, MVT::f128, Ops,
29169 CallOptions, dl, Chain);
29172 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
29177 // Custom split CVTPS2PH with wide types.
29178 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
29180 EVT VT = Op.getValueType();
29182 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
29184 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
29185 SDValue RC = Op.getOperand(1);
29186 Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
29187 Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
29188 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29191 /// Provide custom lowering hooks for some operations.
29192 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
29193 switch (Op.getOpcode()) {
29194 default: llvm_unreachable("Should not custom lower this!");
29195 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
29196 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
29197 return LowerCMP_SWAP(Op, Subtarget, DAG);
29198 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
29199 case ISD::ATOMIC_LOAD_ADD:
29200 case ISD::ATOMIC_LOAD_SUB:
29201 case ISD::ATOMIC_LOAD_OR:
29202 case ISD::ATOMIC_LOAD_XOR:
29203 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
29204 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
29205 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
29206 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
29207 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
29208 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
29209 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
29210 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
29211 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
29212 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
29213 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
29214 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
29215 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
29216 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
29217 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
29218 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
29219 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
29220 case ISD::SHL_PARTS:
29221 case ISD::SRA_PARTS:
29222 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
29224 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
29225 case ISD::STRICT_SINT_TO_FP:
29226 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
29227 case ISD::STRICT_UINT_TO_FP:
29228 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
29229 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
29230 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
29231 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
29232 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
29233 case ISD::ZERO_EXTEND_VECTOR_INREG:
29234 case ISD::SIGN_EXTEND_VECTOR_INREG:
29235 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
29236 case ISD::FP_TO_SINT:
29237 case ISD::STRICT_FP_TO_SINT:
29238 case ISD::FP_TO_UINT:
29239 case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
29240 case ISD::FP_EXTEND:
29241 case ISD::STRICT_FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
29242 case ISD::FP_ROUND:
29243 case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG);
29244 case ISD::FP16_TO_FP:
29245 case ISD::STRICT_FP16_TO_FP: return LowerFP16_TO_FP(Op, DAG);
29246 case ISD::FP_TO_FP16:
29247 case ISD::STRICT_FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
29248 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
29249 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
29251 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
29252 case ISD::FROUND: return LowerFROUND(Op, DAG);
29254 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
29255 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
29256 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
29258 case ISD::LLRINT: return LowerLRINT_LLRINT(Op, DAG);
29260 case ISD::STRICT_FSETCC:
29261 case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG);
29262 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
29263 case ISD::SELECT: return LowerSELECT(Op, DAG);
29264 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
29265 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
29266 case ISD::VASTART: return LowerVASTART(Op, DAG);
29267 case ISD::VAARG: return LowerVAARG(Op, DAG);
29268 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
29269 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
29270 case ISD::INTRINSIC_VOID:
29271 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
29272 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
29273 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
29274 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
29275 case ISD::FRAME_TO_ARGS_OFFSET:
29276 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
29277 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
29278 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
29279 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
29280 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
29281 case ISD::EH_SJLJ_SETUP_DISPATCH:
29282 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
29283 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
29284 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
29285 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
29287 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
29289 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
29290 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
29292 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
29294 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
29297 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
29303 case ISD::UMULO: return LowerXALUO(Op, DAG);
29304 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
29305 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
29306 case ISD::ADDCARRY:
29307 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
29309 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
29313 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
29317 case ISD::UMIN: return LowerMINMAX(Op, DAG);
29318 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
29319 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
29320 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
29321 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
29322 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
29323 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
29324 case ISD::GC_TRANSITION_START:
29325 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION(Op, DAG);
29326 case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG);
29327 case X86ISD::CVTPS2PH: return LowerCVTPS2PH(Op, DAG);
29331 /// Places new result values for the node in Results (their number
29332 /// and types must exactly match those of the original return values of
29333 /// the node), or leaves Results empty, which indicates that the node is not
29334 /// to be custom lowered after all.
29335 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
29336 SmallVectorImpl<SDValue> &Results,
29337 SelectionDAG &DAG) const {
29338 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
29340 if (!Res.getNode())
29343 // If the original node has one result, take the return value from
29344 // LowerOperation as is. It might not be result number 0.
29345 if (N->getNumValues() == 1) {
29346 Results.push_back(Res);
29350 // If the original node has multiple results, then the return node should
29351 // have the same number of results.
29352 assert((N->getNumValues() == Res->getNumValues()) &&
29353 "Lowering returned the wrong number of results!");
29355 // Places new result values base on N result number.
29356 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
29357 Results.push_back(Res.getValue(I));
29360 /// Replace a node with an illegal result type with a new node built out of
29362 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
29363 SmallVectorImpl<SDValue>&Results,
29364 SelectionDAG &DAG) const {
29366 switch (N->getOpcode()) {
29369 dbgs() << "ReplaceNodeResults: ";
29372 llvm_unreachable("Do not know how to custom type legalize this operation!");
29373 case X86ISD::CVTPH2PS: {
29374 EVT VT = N->getValueType(0);
29376 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29378 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
29379 Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
29380 Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
29381 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29382 Results.push_back(Res);
29385 case X86ISD::STRICT_CVTPH2PS: {
29386 EVT VT = N->getValueType(0);
29388 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
29390 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
29391 Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
29392 {N->getOperand(0), Lo});
29393 Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
29394 {N->getOperand(0), Hi});
29395 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29396 Lo.getValue(1), Hi.getValue(1));
29397 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29398 Results.push_back(Res);
29399 Results.push_back(Chain);
29403 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
29404 // Use a v2i64 if possible.
29405 bool NoImplicitFloatOps =
29406 DAG.getMachineFunction().getFunction().hasFnAttribute(
29407 Attribute::NoImplicitFloat);
29408 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
29410 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
29411 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
29412 // Bit count should fit in 32-bits, extract it as that and then zero
29413 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
29414 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
29415 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
29416 DAG.getIntPtrConstant(0, dl));
29417 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
29418 Results.push_back(Wide);
29423 EVT VT = N->getValueType(0);
29424 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29425 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
29426 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
29427 // elements are needed.
29428 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
29429 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
29430 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
29431 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
29432 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29433 unsigned NumConcats = 16 / VT.getVectorNumElements();
29434 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29435 ConcatOps[0] = Res;
29436 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
29437 Results.push_back(Res);
29440 case X86ISD::VPMADDWD:
29441 case X86ISD::AVG: {
29442 // Legalize types for X86ISD::AVG/VPMADDWD by widening.
29443 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29445 EVT VT = N->getValueType(0);
29446 EVT InVT = N->getOperand(0).getValueType();
29447 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
29448 "Expected a VT that divides into 128 bits.");
29449 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29450 "Unexpected type action!");
29451 unsigned NumConcat = 128 / InVT.getSizeInBits();
29453 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
29454 InVT.getVectorElementType(),
29455 NumConcat * InVT.getVectorNumElements());
29456 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
29457 VT.getVectorElementType(),
29458 NumConcat * VT.getVectorNumElements());
29460 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
29461 Ops[0] = N->getOperand(0);
29462 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
29463 Ops[0] = N->getOperand(1);
29464 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
29466 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
29467 Results.push_back(Res);
29471 assert(N->getValueType(0) == MVT::i64 &&
29472 "Unexpected type (!= i64) on ABS.");
29473 MVT HalfT = MVT::i32;
29474 SDValue Lo, Hi, Tmp;
29475 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
29477 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
29478 DAG.getConstant(0, dl, HalfT));
29479 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
29480 DAG.getConstant(1, dl, HalfT));
29482 ISD::SRA, dl, HalfT, Hi,
29483 DAG.getShiftAmountConstant(HalfT.getSizeInBits() - 1, HalfT, dl));
29484 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
29485 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
29486 SDValue(Lo.getNode(), 1));
29487 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
29488 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
29489 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi));
29492 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
29493 case X86ISD::FMINC:
29495 case X86ISD::FMAXC:
29496 case X86ISD::FMAX: {
29497 EVT VT = N->getValueType(0);
29498 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
29499 SDValue UNDEF = DAG.getUNDEF(VT);
29500 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
29501 N->getOperand(0), UNDEF);
29502 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
29503 N->getOperand(1), UNDEF);
29504 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
29511 EVT VT = N->getValueType(0);
29512 if (VT.isVector()) {
29513 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29514 "Unexpected type action!");
29515 // If this RHS is a constant splat vector we can widen this and let
29516 // division/remainder by constant optimize it.
29517 // TODO: Can we do something for non-splat?
29519 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
29520 unsigned NumConcats = 128 / VT.getSizeInBits();
29521 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
29522 Ops0[0] = N->getOperand(0);
29523 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
29524 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
29525 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
29526 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
29527 Results.push_back(Res);
29535 case ISD::UDIVREM: {
29536 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
29537 Results.push_back(V);
29540 case ISD::TRUNCATE: {
29541 MVT VT = N->getSimpleValueType(0);
29542 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
29545 // The generic legalizer will try to widen the input type to the same
29546 // number of elements as the widened result type. But this isn't always
29547 // the best thing so do some custom legalization to avoid some cases.
29548 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
29549 SDValue In = N->getOperand(0);
29550 EVT InVT = In.getValueType();
29552 unsigned InBits = InVT.getSizeInBits();
29553 if (128 % InBits == 0) {
29554 // 128 bit and smaller inputs should avoid truncate all together and
29555 // just use a build_vector that will become a shuffle.
29556 // TODO: Widen and use a shuffle directly?
29557 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
29558 EVT EltVT = VT.getVectorElementType();
29559 unsigned WidenNumElts = WidenVT.getVectorNumElements();
29560 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
29561 // Use the original element count so we don't do more scalar opts than
29563 unsigned MinElts = VT.getVectorNumElements();
29564 for (unsigned i=0; i < MinElts; ++i) {
29565 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
29566 DAG.getIntPtrConstant(i, dl));
29567 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
29569 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
29572 // With AVX512 there are some cases that can use a target specific
29573 // truncate node to go from 256/512 to less than 128 with zeros in the
29574 // upper elements of the 128 bit result.
29575 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
29576 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
29577 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
29578 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
29581 // There's one case we can widen to 512 bits and use VTRUNC.
29582 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
29583 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
29584 DAG.getUNDEF(MVT::v4i64));
29585 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
29589 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
29590 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
29591 isTypeLegal(MVT::v4i64)) {
29592 // Input needs to be split and output needs to widened. Let's use two
29593 // VTRUNCs, and shuffle their results together into the wider type.
29595 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
29597 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
29598 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
29599 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
29600 { 0, 1, 2, 3, 16, 17, 18, 19,
29601 -1, -1, -1, -1, -1, -1, -1, -1 });
29602 Results.push_back(Res);
29608 case ISD::ANY_EXTEND:
29609 // Right now, only MVT::v8i8 has Custom action for an illegal type.
29610 // It's intended to custom handle the input type.
29611 assert(N->getValueType(0) == MVT::v8i8 &&
29612 "Do not know how to legalize this Node");
29614 case ISD::SIGN_EXTEND:
29615 case ISD::ZERO_EXTEND: {
29616 EVT VT = N->getValueType(0);
29617 SDValue In = N->getOperand(0);
29618 EVT InVT = In.getValueType();
29619 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
29620 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
29621 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
29622 "Unexpected type action!");
29623 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
29624 // Custom split this so we can extend i8/i16->i32 invec. This is better
29625 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
29626 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
29627 // we allow the sra from the extend to i32 to be shared by the split.
29628 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
29630 // Fill a vector with sign bits for each element.
29631 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
29632 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
29634 // Create an unpackl and unpackh to interleave the sign bits then bitcast
29636 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
29638 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
29639 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
29641 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
29643 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29644 Results.push_back(Res);
29648 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
29649 if (!InVT.is128BitVector()) {
29650 // Not a 128 bit vector, but maybe type legalization will promote
29652 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
29654 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
29655 if (!InVT.is128BitVector())
29658 // Promote the input to 128 bits. Type legalization will turn this into
29659 // zext_inreg/sext_inreg.
29660 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
29663 // Perform custom splitting instead of the two stage extend we would get
29666 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
29667 assert(isTypeLegal(LoVT) && "Split VT not legal?");
29669 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
29671 // We need to shift the input over by half the number of elements.
29672 unsigned NumElts = InVT.getVectorNumElements();
29673 unsigned HalfNumElts = NumElts / 2;
29674 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
29675 for (unsigned i = 0; i != HalfNumElts; ++i)
29676 ShufMask[i] = i + HalfNumElts;
29678 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
29679 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
29681 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29682 Results.push_back(Res);
29686 case ISD::FP_TO_SINT:
29687 case ISD::STRICT_FP_TO_SINT:
29688 case ISD::FP_TO_UINT:
29689 case ISD::STRICT_FP_TO_UINT: {
29690 bool IsStrict = N->isStrictFPOpcode();
29691 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
29692 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
29693 EVT VT = N->getValueType(0);
29694 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29695 EVT SrcVT = Src.getValueType();
29697 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
29698 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29699 "Unexpected type action!");
29701 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
29702 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
29703 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
29704 VT.getVectorNumElements());
29708 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
29709 {N->getOperand(0), Src});
29710 Chain = Res.getValue(1);
29712 Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
29714 // Preserve what we know about the size of the original result. Except
29715 // when the result is v2i32 since we can't widen the assert.
29716 if (PromoteVT != MVT::v2i32)
29717 Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext,
29718 dl, PromoteVT, Res,
29719 DAG.getValueType(VT.getVectorElementType()));
29721 // Truncate back to the original width.
29722 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29724 // Now widen to 128 bits.
29725 unsigned NumConcats = 128 / VT.getSizeInBits();
29726 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
29727 VT.getVectorNumElements() * NumConcats);
29728 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29729 ConcatOps[0] = Res;
29730 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
29731 Results.push_back(Res);
29733 Results.push_back(Chain);
29738 if (VT == MVT::v2i32) {
29739 assert((IsSigned || Subtarget.hasAVX512()) &&
29740 "Can only handle signed conversion without AVX512");
29741 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29742 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29743 "Unexpected type action!");
29744 if (Src.getValueType() == MVT::v2f64) {
29747 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29749 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29751 // If we have VLX we can emit a target specific FP_TO_UINT node,.
29752 if (!IsSigned && !Subtarget.hasVLX()) {
29753 // Otherwise we can defer to the generic legalizer which will widen
29754 // the input as well. This will be further widened during op
29755 // legalization to v8i32<-v8f64.
29756 // For strict nodes we'll need to widen ourselves.
29757 // FIXME: Fix the type legalizer to safely widen strict nodes?
29760 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
29761 DAG.getConstantFP(0.0, dl, MVT::v2f64));
29762 Opc = N->getOpcode();
29767 Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
29768 {N->getOperand(0), Src});
29769 Chain = Res.getValue(1);
29771 Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
29773 Results.push_back(Res);
29775 Results.push_back(Chain);
29779 // Custom widen strict v2f32->v2i32 by padding with zeros.
29780 // FIXME: Should generic type legalizer do this?
29781 if (Src.getValueType() == MVT::v2f32 && IsStrict) {
29782 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
29783 DAG.getConstantFP(0.0, dl, MVT::v2f32));
29784 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
29785 {N->getOperand(0), Src});
29786 Results.push_back(Res);
29787 Results.push_back(Res.getValue(1));
29791 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
29792 // so early out here.
29796 assert(!VT.isVector() && "Vectors should have been handled above!");
29798 if (Subtarget.hasDQI() && VT == MVT::i64 &&
29799 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
29800 assert(!Subtarget.is64Bit() && "i64 should be legal");
29801 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
29802 // If we use a 128-bit result we might need to use a target specific node.
29804 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
29805 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
29806 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
29807 unsigned Opc = N->getOpcode();
29808 if (NumElts != SrcElts) {
29810 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29812 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29815 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
29816 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
29817 DAG.getConstantFP(0.0, dl, VecInVT), Src,
29821 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
29822 Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
29823 Chain = Res.getValue(1);
29825 Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
29826 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
29827 Results.push_back(Res);
29829 Results.push_back(Chain);
29834 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
29835 Results.push_back(V);
29837 Results.push_back(Chain);
29842 case ISD::LLRINT: {
29843 if (SDValue V = LRINT_LLRINTHelper(N, DAG))
29844 Results.push_back(V);
29848 case ISD::SINT_TO_FP:
29849 case ISD::STRICT_SINT_TO_FP:
29850 case ISD::UINT_TO_FP:
29851 case ISD::STRICT_UINT_TO_FP: {
29852 bool IsStrict = N->isStrictFPOpcode();
29853 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
29854 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
29855 EVT VT = N->getValueType(0);
29856 if (VT != MVT::v2f32)
29858 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29859 EVT SrcVT = Src.getValueType();
29860 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
29862 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
29863 : X86ISD::STRICT_CVTUI2P;
29864 SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
29865 {N->getOperand(0), Src});
29866 Results.push_back(Res);
29867 Results.push_back(Res.getValue(1));
29869 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
29870 Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
29874 if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
29875 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
29876 SDValue Zero = DAG.getConstant(0, dl, SrcVT);
29877 SDValue One = DAG.getConstant(1, dl, SrcVT);
29878 SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
29879 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
29880 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
29881 SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
29882 SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
29883 SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
29884 for (int i = 0; i != 2; ++i) {
29885 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
29886 SignSrc, DAG.getIntPtrConstant(i, dl));
29889 DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
29890 {N->getOperand(0), Elt});
29892 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
29894 SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
29895 SDValue Slow, Chain;
29897 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29898 SignCvts[0].getValue(1), SignCvts[1].getValue(1));
29899 Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
29900 {Chain, SignCvt, SignCvt});
29901 Chain = Slow.getValue(1);
29903 Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
29905 IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
29907 DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
29908 SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
29909 Results.push_back(Cvt);
29911 Results.push_back(Chain);
29915 if (SrcVT != MVT::v2i32)
29918 if (IsSigned || Subtarget.hasAVX512()) {
29922 // Custom widen strict v2i32->v2f32 to avoid scalarization.
29923 // FIXME: Should generic type legalizer do this?
29924 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
29925 DAG.getConstant(0, dl, MVT::v2i32));
29926 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
29927 {N->getOperand(0), Src});
29928 Results.push_back(Res);
29929 Results.push_back(Res.getValue(1));
29933 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29934 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
29936 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
29937 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
29938 DAG.getBitcast(MVT::v2i64, VBias));
29939 Or = DAG.getBitcast(MVT::v2f64, Or);
29941 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
29942 {N->getOperand(0), Or, VBias});
29943 SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
29944 {MVT::v4f32, MVT::Other},
29945 {Sub.getValue(1), Sub});
29946 Results.push_back(Res);
29947 Results.push_back(Res.getValue(1));
29949 // TODO: Are there any fast-math-flags to propagate here?
29950 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
29951 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
29955 case ISD::STRICT_FP_ROUND:
29956 case ISD::FP_ROUND: {
29957 bool IsStrict = N->isStrictFPOpcode();
29958 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29959 if (!isTypeLegal(Src.getValueType()))
29963 V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {MVT::v4f32, MVT::Other},
29964 {N->getOperand(0), N->getOperand(1)});
29966 V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
29967 Results.push_back(V);
29969 Results.push_back(V.getValue(1));
29972 case ISD::FP_EXTEND:
29973 case ISD::STRICT_FP_EXTEND: {
29974 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
29975 // No other ValueType for FP_EXTEND should reach this point.
29976 assert(N->getValueType(0) == MVT::v2f32 &&
29977 "Do not know how to legalize this Node");
29980 case ISD::INTRINSIC_W_CHAIN: {
29981 unsigned IntNo = N->getConstantOperandVal(1);
29983 default : llvm_unreachable("Do not know how to custom type "
29984 "legalize this intrinsic operation!");
29985 case Intrinsic::x86_rdtsc:
29986 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
29988 case Intrinsic::x86_rdtscp:
29989 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
29991 case Intrinsic::x86_rdpmc:
29992 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
29995 case Intrinsic::x86_xgetbv:
29996 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
30001 case ISD::READCYCLECOUNTER: {
30002 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
30004 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
30005 EVT T = N->getValueType(0);
30006 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
30007 bool Regs64bit = T == MVT::i128;
30008 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
30009 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
30010 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
30011 SDValue cpInL, cpInH;
30012 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
30013 DAG.getConstant(0, dl, HalfT));
30014 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
30015 DAG.getConstant(1, dl, HalfT));
30016 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
30017 Regs64bit ? X86::RAX : X86::EAX,
30019 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
30020 Regs64bit ? X86::RDX : X86::EDX,
30021 cpInH, cpInL.getValue(1));
30022 SDValue swapInL, swapInH;
30023 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
30024 DAG.getConstant(0, dl, HalfT));
30025 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
30026 DAG.getConstant(1, dl, HalfT));
30028 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
30029 swapInH, cpInH.getValue(1));
30030 // If the current function needs the base pointer, RBX,
30031 // we shouldn't use cmpxchg directly.
30032 // Indeed the lowering of that instruction will clobber
30033 // that register and since RBX will be a reserved register
30034 // the register allocator will not make sure its value will
30035 // be properly saved and restored around this live-range.
30036 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
30038 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
30039 Register BasePtr = TRI->getBaseRegister();
30040 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
30041 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
30042 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
30043 // ISel prefers the LCMPXCHG64 variant.
30044 // If that assert breaks, that means it is not the case anymore,
30045 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
30046 // not just EBX. This is a matter of accepting i64 input for that
30047 // pseudo, and restoring into the register of the right wide
30048 // in expand pseudo. Everything else should just work.
30049 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
30050 "Saving only half of the RBX");
30051 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
30052 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
30053 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
30054 Regs64bit ? X86::RBX : X86::EBX,
30055 HalfT, swapInH.getValue(1));
30056 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
30058 /*Glue*/ RBXSave.getValue(2)};
30059 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
30062 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
30063 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
30064 Regs64bit ? X86::RBX : X86::EBX, swapInL,
30065 swapInH.getValue(1));
30066 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
30067 swapInL.getValue(1)};
30068 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
30070 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
30071 Regs64bit ? X86::RAX : X86::EAX,
30072 HalfT, Result.getValue(1));
30073 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
30074 Regs64bit ? X86::RDX : X86::EDX,
30075 HalfT, cpOutL.getValue(2));
30076 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
30078 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
30079 MVT::i32, cpOutH.getValue(2));
30080 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
30081 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
30083 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
30084 Results.push_back(Success);
30085 Results.push_back(EFLAGS.getValue(1));
30088 case ISD::ATOMIC_LOAD: {
30089 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
30090 bool NoImplicitFloatOps =
30091 DAG.getMachineFunction().getFunction().hasFnAttribute(
30092 Attribute::NoImplicitFloat);
30093 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
30094 auto *Node = cast<AtomicSDNode>(N);
30095 if (Subtarget.hasSSE1()) {
30096 // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
30097 // Then extract the lower 64-bits.
30098 MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
30099 SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
30100 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
30101 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
30102 MVT::i64, Node->getMemOperand());
30103 if (Subtarget.hasSSE2()) {
30104 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
30105 DAG.getIntPtrConstant(0, dl));
30106 Results.push_back(Res);
30107 Results.push_back(Ld.getValue(1));
30110 // We use an alternative sequence for SSE1 that extracts as v2f32 and
30111 // then casts to i64. This avoids a 128-bit stack temporary being
30112 // created by type legalization if we were to cast v4f32->v2i64.
30113 SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
30114 DAG.getIntPtrConstant(0, dl));
30115 Res = DAG.getBitcast(MVT::i64, Res);
30116 Results.push_back(Res);
30117 Results.push_back(Ld.getValue(1));
30120 if (Subtarget.hasX87()) {
30121 // First load this into an 80-bit X87 register. This will put the whole
30122 // integer into the significand.
30123 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
30124 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
30125 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
30126 dl, Tys, Ops, MVT::i64,
30127 Node->getMemOperand());
30128 SDValue Chain = Result.getValue(1);
30130 // Now store the X87 register to a stack temporary and convert to i64.
30131 // This store is not atomic and doesn't need to be.
30132 // FIXME: We don't need a stack temporary if the result of the load
30133 // is already being stored. We could just directly store there.
30134 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
30135 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
30136 MachinePointerInfo MPI =
30137 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
30138 SDValue StoreOps[] = { Chain, Result, StackPtr };
30139 Chain = DAG.getMemIntrinsicNode(
30140 X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
30141 MPI, None /*Align*/, MachineMemOperand::MOStore);
30143 // Finally load the value back from the stack temporary and return it.
30144 // This load is not atomic and doesn't need to be.
30145 // This load will be further type legalized.
30146 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
30147 Results.push_back(Result);
30148 Results.push_back(Result.getValue(1));
30152 // TODO: Use MOVLPS when SSE1 is available?
30153 // Delegate to generic TypeLegalization. Situations we can really handle
30154 // should have already been dealt with by AtomicExpandPass.cpp.
30157 case ISD::ATOMIC_SWAP:
30158 case ISD::ATOMIC_LOAD_ADD:
30159 case ISD::ATOMIC_LOAD_SUB:
30160 case ISD::ATOMIC_LOAD_AND:
30161 case ISD::ATOMIC_LOAD_OR:
30162 case ISD::ATOMIC_LOAD_XOR:
30163 case ISD::ATOMIC_LOAD_NAND:
30164 case ISD::ATOMIC_LOAD_MIN:
30165 case ISD::ATOMIC_LOAD_MAX:
30166 case ISD::ATOMIC_LOAD_UMIN:
30167 case ISD::ATOMIC_LOAD_UMAX:
30168 // Delegate to generic TypeLegalization. Situations we can really handle
30169 // should have already been dealt with by AtomicExpandPass.cpp.
30172 case ISD::BITCAST: {
30173 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
30174 EVT DstVT = N->getValueType(0);
30175 EVT SrcVT = N->getOperand(0).getValueType();
30177 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
30178 // we can split using the k-register rather than memory.
30179 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
30180 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
30182 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
30183 Lo = DAG.getBitcast(MVT::i32, Lo);
30184 Hi = DAG.getBitcast(MVT::i32, Hi);
30185 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
30186 Results.push_back(Res);
30190 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
30191 // FIXME: Use v4f32 for SSE1?
30192 assert(Subtarget.hasSSE2() && "Requires SSE2");
30193 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
30194 "Unexpected type action!");
30195 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
30196 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
30198 Res = DAG.getBitcast(WideVT, Res);
30199 Results.push_back(Res);
30205 case ISD::MGATHER: {
30206 EVT VT = N->getValueType(0);
30207 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
30208 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
30209 auto *Gather = cast<MaskedGatherSDNode>(N);
30210 SDValue Index = Gather->getIndex();
30211 if (Index.getValueType() != MVT::v2i64)
30213 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
30214 "Unexpected type action!");
30215 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
30216 SDValue Mask = Gather->getMask();
30217 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
30218 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
30219 Gather->getPassThru(),
30221 if (!Subtarget.hasVLX()) {
30222 // We need to widen the mask, but the instruction will only use 2
30223 // of its elements. So we can use undef.
30224 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
30225 DAG.getUNDEF(MVT::v2i1));
30226 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
30228 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
30229 Gather->getBasePtr(), Index, Gather->getScale() };
30230 SDValue Res = DAG.getMemIntrinsicNode(
30231 X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
30232 Gather->getMemoryVT(), Gather->getMemOperand());
30233 Results.push_back(Res);
30234 Results.push_back(Res.getValue(1));
30240 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
30241 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
30242 // cast since type legalization will try to use an i64 load.
30243 MVT VT = N->getSimpleValueType(0);
30244 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
30245 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
30246 "Unexpected type action!");
30247 if (!ISD::isNON_EXTLoad(N))
30249 auto *Ld = cast<LoadSDNode>(N);
30250 if (Subtarget.hasSSE2()) {
30251 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
30252 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
30253 Ld->getPointerInfo(), Ld->getOriginalAlign(),
30254 Ld->getMemOperand()->getFlags());
30255 SDValue Chain = Res.getValue(1);
30256 MVT VecVT = MVT::getVectorVT(LdVT, 2);
30257 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
30258 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
30259 Res = DAG.getBitcast(WideVT, Res);
30260 Results.push_back(Res);
30261 Results.push_back(Chain);
30264 assert(Subtarget.hasSSE1() && "Expected SSE");
30265 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
30266 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
30267 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
30268 MVT::i64, Ld->getMemOperand());
30269 Results.push_back(Res);
30270 Results.push_back(Res.getValue(1));
30273 case ISD::ADDRSPACECAST: {
30274 SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
30275 Results.push_back(V);
30281 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
30282 switch ((X86ISD::NodeType)Opcode) {
30283 case X86ISD::FIRST_NUMBER: break;
30284 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
30285 NODE_NAME_CASE(BSF)
30286 NODE_NAME_CASE(BSR)
30287 NODE_NAME_CASE(FSHL)
30288 NODE_NAME_CASE(FSHR)
30289 NODE_NAME_CASE(FAND)
30290 NODE_NAME_CASE(FANDN)
30291 NODE_NAME_CASE(FOR)
30292 NODE_NAME_CASE(FXOR)
30293 NODE_NAME_CASE(FILD)
30294 NODE_NAME_CASE(FIST)
30295 NODE_NAME_CASE(FP_TO_INT_IN_MEM)
30296 NODE_NAME_CASE(FLD)
30297 NODE_NAME_CASE(FST)
30298 NODE_NAME_CASE(CALL)
30300 NODE_NAME_CASE(CMP)
30301 NODE_NAME_CASE(FCMP)
30302 NODE_NAME_CASE(STRICT_FCMP)
30303 NODE_NAME_CASE(STRICT_FCMPS)
30304 NODE_NAME_CASE(COMI)
30305 NODE_NAME_CASE(UCOMI)
30306 NODE_NAME_CASE(CMPM)
30307 NODE_NAME_CASE(STRICT_CMPM)
30308 NODE_NAME_CASE(CMPM_SAE)
30309 NODE_NAME_CASE(SETCC)
30310 NODE_NAME_CASE(SETCC_CARRY)
30311 NODE_NAME_CASE(FSETCC)
30312 NODE_NAME_CASE(FSETCCM)
30313 NODE_NAME_CASE(FSETCCM_SAE)
30314 NODE_NAME_CASE(CMOV)
30315 NODE_NAME_CASE(BRCOND)
30316 NODE_NAME_CASE(RET_FLAG)
30317 NODE_NAME_CASE(IRET)
30318 NODE_NAME_CASE(REP_STOS)
30319 NODE_NAME_CASE(REP_MOVS)
30320 NODE_NAME_CASE(GlobalBaseReg)
30321 NODE_NAME_CASE(Wrapper)
30322 NODE_NAME_CASE(WrapperRIP)
30323 NODE_NAME_CASE(MOVQ2DQ)
30324 NODE_NAME_CASE(MOVDQ2Q)
30325 NODE_NAME_CASE(MMX_MOVD2W)
30326 NODE_NAME_CASE(MMX_MOVW2D)
30327 NODE_NAME_CASE(PEXTRB)
30328 NODE_NAME_CASE(PEXTRW)
30329 NODE_NAME_CASE(INSERTPS)
30330 NODE_NAME_CASE(PINSRB)
30331 NODE_NAME_CASE(PINSRW)
30332 NODE_NAME_CASE(PSHUFB)
30333 NODE_NAME_CASE(ANDNP)
30334 NODE_NAME_CASE(BLENDI)
30335 NODE_NAME_CASE(BLENDV)
30336 NODE_NAME_CASE(HADD)
30337 NODE_NAME_CASE(HSUB)
30338 NODE_NAME_CASE(FHADD)
30339 NODE_NAME_CASE(FHSUB)
30340 NODE_NAME_CASE(CONFLICT)
30341 NODE_NAME_CASE(FMAX)
30342 NODE_NAME_CASE(FMAXS)
30343 NODE_NAME_CASE(FMAX_SAE)
30344 NODE_NAME_CASE(FMAXS_SAE)
30345 NODE_NAME_CASE(FMIN)
30346 NODE_NAME_CASE(FMINS)
30347 NODE_NAME_CASE(FMIN_SAE)
30348 NODE_NAME_CASE(FMINS_SAE)
30349 NODE_NAME_CASE(FMAXC)
30350 NODE_NAME_CASE(FMINC)
30351 NODE_NAME_CASE(FRSQRT)
30352 NODE_NAME_CASE(FRCP)
30353 NODE_NAME_CASE(EXTRQI)
30354 NODE_NAME_CASE(INSERTQI)
30355 NODE_NAME_CASE(TLSADDR)
30356 NODE_NAME_CASE(TLSBASEADDR)
30357 NODE_NAME_CASE(TLSCALL)
30358 NODE_NAME_CASE(EH_SJLJ_SETJMP)
30359 NODE_NAME_CASE(EH_SJLJ_LONGJMP)
30360 NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
30361 NODE_NAME_CASE(EH_RETURN)
30362 NODE_NAME_CASE(TC_RETURN)
30363 NODE_NAME_CASE(FNSTCW16m)
30364 NODE_NAME_CASE(LCMPXCHG_DAG)
30365 NODE_NAME_CASE(LCMPXCHG8_DAG)
30366 NODE_NAME_CASE(LCMPXCHG16_DAG)
30367 NODE_NAME_CASE(LCMPXCHG8_SAVE_EBX_DAG)
30368 NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
30369 NODE_NAME_CASE(LADD)
30370 NODE_NAME_CASE(LSUB)
30371 NODE_NAME_CASE(LOR)
30372 NODE_NAME_CASE(LXOR)
30373 NODE_NAME_CASE(LAND)
30374 NODE_NAME_CASE(VZEXT_MOVL)
30375 NODE_NAME_CASE(VZEXT_LOAD)
30376 NODE_NAME_CASE(VEXTRACT_STORE)
30377 NODE_NAME_CASE(VTRUNC)
30378 NODE_NAME_CASE(VTRUNCS)
30379 NODE_NAME_CASE(VTRUNCUS)
30380 NODE_NAME_CASE(VMTRUNC)
30381 NODE_NAME_CASE(VMTRUNCS)
30382 NODE_NAME_CASE(VMTRUNCUS)
30383 NODE_NAME_CASE(VTRUNCSTORES)
30384 NODE_NAME_CASE(VTRUNCSTOREUS)
30385 NODE_NAME_CASE(VMTRUNCSTORES)
30386 NODE_NAME_CASE(VMTRUNCSTOREUS)
30387 NODE_NAME_CASE(VFPEXT)
30388 NODE_NAME_CASE(STRICT_VFPEXT)
30389 NODE_NAME_CASE(VFPEXT_SAE)
30390 NODE_NAME_CASE(VFPEXTS)
30391 NODE_NAME_CASE(VFPEXTS_SAE)
30392 NODE_NAME_CASE(VFPROUND)
30393 NODE_NAME_CASE(STRICT_VFPROUND)
30394 NODE_NAME_CASE(VMFPROUND)
30395 NODE_NAME_CASE(VFPROUND_RND)
30396 NODE_NAME_CASE(VFPROUNDS)
30397 NODE_NAME_CASE(VFPROUNDS_RND)
30398 NODE_NAME_CASE(VSHLDQ)
30399 NODE_NAME_CASE(VSRLDQ)
30400 NODE_NAME_CASE(VSHL)
30401 NODE_NAME_CASE(VSRL)
30402 NODE_NAME_CASE(VSRA)
30403 NODE_NAME_CASE(VSHLI)
30404 NODE_NAME_CASE(VSRLI)
30405 NODE_NAME_CASE(VSRAI)
30406 NODE_NAME_CASE(VSHLV)
30407 NODE_NAME_CASE(VSRLV)
30408 NODE_NAME_CASE(VSRAV)
30409 NODE_NAME_CASE(VROTLI)
30410 NODE_NAME_CASE(VROTRI)
30411 NODE_NAME_CASE(VPPERM)
30412 NODE_NAME_CASE(CMPP)
30413 NODE_NAME_CASE(STRICT_CMPP)
30414 NODE_NAME_CASE(PCMPEQ)
30415 NODE_NAME_CASE(PCMPGT)
30416 NODE_NAME_CASE(PHMINPOS)
30417 NODE_NAME_CASE(ADD)
30418 NODE_NAME_CASE(SUB)
30419 NODE_NAME_CASE(ADC)
30420 NODE_NAME_CASE(SBB)
30421 NODE_NAME_CASE(SMUL)
30422 NODE_NAME_CASE(UMUL)
30424 NODE_NAME_CASE(XOR)
30425 NODE_NAME_CASE(AND)
30426 NODE_NAME_CASE(BEXTR)
30427 NODE_NAME_CASE(BZHI)
30428 NODE_NAME_CASE(PDEP)
30429 NODE_NAME_CASE(PEXT)
30430 NODE_NAME_CASE(MUL_IMM)
30431 NODE_NAME_CASE(MOVMSK)
30432 NODE_NAME_CASE(PTEST)
30433 NODE_NAME_CASE(TESTP)
30434 NODE_NAME_CASE(KORTEST)
30435 NODE_NAME_CASE(KTEST)
30436 NODE_NAME_CASE(KADD)
30437 NODE_NAME_CASE(KSHIFTL)
30438 NODE_NAME_CASE(KSHIFTR)
30439 NODE_NAME_CASE(PACKSS)
30440 NODE_NAME_CASE(PACKUS)
30441 NODE_NAME_CASE(PALIGNR)
30442 NODE_NAME_CASE(VALIGN)
30443 NODE_NAME_CASE(VSHLD)
30444 NODE_NAME_CASE(VSHRD)
30445 NODE_NAME_CASE(VSHLDV)
30446 NODE_NAME_CASE(VSHRDV)
30447 NODE_NAME_CASE(PSHUFD)
30448 NODE_NAME_CASE(PSHUFHW)
30449 NODE_NAME_CASE(PSHUFLW)
30450 NODE_NAME_CASE(SHUFP)
30451 NODE_NAME_CASE(SHUF128)
30452 NODE_NAME_CASE(MOVLHPS)
30453 NODE_NAME_CASE(MOVHLPS)
30454 NODE_NAME_CASE(MOVDDUP)
30455 NODE_NAME_CASE(MOVSHDUP)
30456 NODE_NAME_CASE(MOVSLDUP)
30457 NODE_NAME_CASE(MOVSD)
30458 NODE_NAME_CASE(MOVSS)
30459 NODE_NAME_CASE(UNPCKL)
30460 NODE_NAME_CASE(UNPCKH)
30461 NODE_NAME_CASE(VBROADCAST)
30462 NODE_NAME_CASE(VBROADCAST_LOAD)
30463 NODE_NAME_CASE(VBROADCASTM)
30464 NODE_NAME_CASE(SUBV_BROADCAST)
30465 NODE_NAME_CASE(VPERMILPV)
30466 NODE_NAME_CASE(VPERMILPI)
30467 NODE_NAME_CASE(VPERM2X128)
30468 NODE_NAME_CASE(VPERMV)
30469 NODE_NAME_CASE(VPERMV3)
30470 NODE_NAME_CASE(VPERMI)
30471 NODE_NAME_CASE(VPTERNLOG)
30472 NODE_NAME_CASE(VFIXUPIMM)
30473 NODE_NAME_CASE(VFIXUPIMM_SAE)
30474 NODE_NAME_CASE(VFIXUPIMMS)
30475 NODE_NAME_CASE(VFIXUPIMMS_SAE)
30476 NODE_NAME_CASE(VRANGE)
30477 NODE_NAME_CASE(VRANGE_SAE)
30478 NODE_NAME_CASE(VRANGES)
30479 NODE_NAME_CASE(VRANGES_SAE)
30480 NODE_NAME_CASE(PMULUDQ)
30481 NODE_NAME_CASE(PMULDQ)
30482 NODE_NAME_CASE(PSADBW)
30483 NODE_NAME_CASE(DBPSADBW)
30484 NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
30485 NODE_NAME_CASE(VAARG_64)
30486 NODE_NAME_CASE(WIN_ALLOCA)
30487 NODE_NAME_CASE(MEMBARRIER)
30488 NODE_NAME_CASE(MFENCE)
30489 NODE_NAME_CASE(SEG_ALLOCA)
30490 NODE_NAME_CASE(PROBED_ALLOCA)
30491 NODE_NAME_CASE(RDRAND)
30492 NODE_NAME_CASE(RDSEED)
30493 NODE_NAME_CASE(RDPKRU)
30494 NODE_NAME_CASE(WRPKRU)
30495 NODE_NAME_CASE(VPMADDUBSW)
30496 NODE_NAME_CASE(VPMADDWD)
30497 NODE_NAME_CASE(VPSHA)
30498 NODE_NAME_CASE(VPSHL)
30499 NODE_NAME_CASE(VPCOM)
30500 NODE_NAME_CASE(VPCOMU)
30501 NODE_NAME_CASE(VPERMIL2)
30502 NODE_NAME_CASE(FMSUB)
30503 NODE_NAME_CASE(STRICT_FMSUB)
30504 NODE_NAME_CASE(FNMADD)
30505 NODE_NAME_CASE(STRICT_FNMADD)
30506 NODE_NAME_CASE(FNMSUB)
30507 NODE_NAME_CASE(STRICT_FNMSUB)
30508 NODE_NAME_CASE(FMADDSUB)
30509 NODE_NAME_CASE(FMSUBADD)
30510 NODE_NAME_CASE(FMADD_RND)
30511 NODE_NAME_CASE(FNMADD_RND)
30512 NODE_NAME_CASE(FMSUB_RND)
30513 NODE_NAME_CASE(FNMSUB_RND)
30514 NODE_NAME_CASE(FMADDSUB_RND)
30515 NODE_NAME_CASE(FMSUBADD_RND)
30516 NODE_NAME_CASE(VPMADD52H)
30517 NODE_NAME_CASE(VPMADD52L)
30518 NODE_NAME_CASE(VRNDSCALE)
30519 NODE_NAME_CASE(STRICT_VRNDSCALE)
30520 NODE_NAME_CASE(VRNDSCALE_SAE)
30521 NODE_NAME_CASE(VRNDSCALES)
30522 NODE_NAME_CASE(VRNDSCALES_SAE)
30523 NODE_NAME_CASE(VREDUCE)
30524 NODE_NAME_CASE(VREDUCE_SAE)
30525 NODE_NAME_CASE(VREDUCES)
30526 NODE_NAME_CASE(VREDUCES_SAE)
30527 NODE_NAME_CASE(VGETMANT)
30528 NODE_NAME_CASE(VGETMANT_SAE)
30529 NODE_NAME_CASE(VGETMANTS)
30530 NODE_NAME_CASE(VGETMANTS_SAE)
30531 NODE_NAME_CASE(PCMPESTR)
30532 NODE_NAME_CASE(PCMPISTR)
30533 NODE_NAME_CASE(XTEST)
30534 NODE_NAME_CASE(COMPRESS)
30535 NODE_NAME_CASE(EXPAND)
30536 NODE_NAME_CASE(SELECTS)
30537 NODE_NAME_CASE(ADDSUB)
30538 NODE_NAME_CASE(RCP14)
30539 NODE_NAME_CASE(RCP14S)
30540 NODE_NAME_CASE(RCP28)
30541 NODE_NAME_CASE(RCP28_SAE)
30542 NODE_NAME_CASE(RCP28S)
30543 NODE_NAME_CASE(RCP28S_SAE)
30544 NODE_NAME_CASE(EXP2)
30545 NODE_NAME_CASE(EXP2_SAE)
30546 NODE_NAME_CASE(RSQRT14)
30547 NODE_NAME_CASE(RSQRT14S)
30548 NODE_NAME_CASE(RSQRT28)
30549 NODE_NAME_CASE(RSQRT28_SAE)
30550 NODE_NAME_CASE(RSQRT28S)
30551 NODE_NAME_CASE(RSQRT28S_SAE)
30552 NODE_NAME_CASE(FADD_RND)
30553 NODE_NAME_CASE(FADDS)
30554 NODE_NAME_CASE(FADDS_RND)
30555 NODE_NAME_CASE(FSUB_RND)
30556 NODE_NAME_CASE(FSUBS)
30557 NODE_NAME_CASE(FSUBS_RND)
30558 NODE_NAME_CASE(FMUL_RND)
30559 NODE_NAME_CASE(FMULS)
30560 NODE_NAME_CASE(FMULS_RND)
30561 NODE_NAME_CASE(FDIV_RND)
30562 NODE_NAME_CASE(FDIVS)
30563 NODE_NAME_CASE(FDIVS_RND)
30564 NODE_NAME_CASE(FSQRT_RND)
30565 NODE_NAME_CASE(FSQRTS)
30566 NODE_NAME_CASE(FSQRTS_RND)
30567 NODE_NAME_CASE(FGETEXP)
30568 NODE_NAME_CASE(FGETEXP_SAE)
30569 NODE_NAME_CASE(FGETEXPS)
30570 NODE_NAME_CASE(FGETEXPS_SAE)
30571 NODE_NAME_CASE(SCALEF)
30572 NODE_NAME_CASE(SCALEF_RND)
30573 NODE_NAME_CASE(SCALEFS)
30574 NODE_NAME_CASE(SCALEFS_RND)
30575 NODE_NAME_CASE(AVG)
30576 NODE_NAME_CASE(MULHRS)
30577 NODE_NAME_CASE(SINT_TO_FP_RND)
30578 NODE_NAME_CASE(UINT_TO_FP_RND)
30579 NODE_NAME_CASE(CVTTP2SI)
30580 NODE_NAME_CASE(CVTTP2UI)
30581 NODE_NAME_CASE(STRICT_CVTTP2SI)
30582 NODE_NAME_CASE(STRICT_CVTTP2UI)
30583 NODE_NAME_CASE(MCVTTP2SI)
30584 NODE_NAME_CASE(MCVTTP2UI)
30585 NODE_NAME_CASE(CVTTP2SI_SAE)
30586 NODE_NAME_CASE(CVTTP2UI_SAE)
30587 NODE_NAME_CASE(CVTTS2SI)
30588 NODE_NAME_CASE(CVTTS2UI)
30589 NODE_NAME_CASE(CVTTS2SI_SAE)
30590 NODE_NAME_CASE(CVTTS2UI_SAE)
30591 NODE_NAME_CASE(CVTSI2P)
30592 NODE_NAME_CASE(CVTUI2P)
30593 NODE_NAME_CASE(STRICT_CVTSI2P)
30594 NODE_NAME_CASE(STRICT_CVTUI2P)
30595 NODE_NAME_CASE(MCVTSI2P)
30596 NODE_NAME_CASE(MCVTUI2P)
30597 NODE_NAME_CASE(VFPCLASS)
30598 NODE_NAME_CASE(VFPCLASSS)
30599 NODE_NAME_CASE(MULTISHIFT)
30600 NODE_NAME_CASE(SCALAR_SINT_TO_FP)
30601 NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
30602 NODE_NAME_CASE(SCALAR_UINT_TO_FP)
30603 NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
30604 NODE_NAME_CASE(CVTPS2PH)
30605 NODE_NAME_CASE(STRICT_CVTPS2PH)
30606 NODE_NAME_CASE(MCVTPS2PH)
30607 NODE_NAME_CASE(CVTPH2PS)
30608 NODE_NAME_CASE(STRICT_CVTPH2PS)
30609 NODE_NAME_CASE(CVTPH2PS_SAE)
30610 NODE_NAME_CASE(CVTP2SI)
30611 NODE_NAME_CASE(CVTP2UI)
30612 NODE_NAME_CASE(MCVTP2SI)
30613 NODE_NAME_CASE(MCVTP2UI)
30614 NODE_NAME_CASE(CVTP2SI_RND)
30615 NODE_NAME_CASE(CVTP2UI_RND)
30616 NODE_NAME_CASE(CVTS2SI)
30617 NODE_NAME_CASE(CVTS2UI)
30618 NODE_NAME_CASE(CVTS2SI_RND)
30619 NODE_NAME_CASE(CVTS2UI_RND)
30620 NODE_NAME_CASE(CVTNE2PS2BF16)
30621 NODE_NAME_CASE(CVTNEPS2BF16)
30622 NODE_NAME_CASE(MCVTNEPS2BF16)
30623 NODE_NAME_CASE(DPBF16PS)
30624 NODE_NAME_CASE(LWPINS)
30625 NODE_NAME_CASE(MGATHER)
30626 NODE_NAME_CASE(MSCATTER)
30627 NODE_NAME_CASE(VPDPBUSD)
30628 NODE_NAME_CASE(VPDPBUSDS)
30629 NODE_NAME_CASE(VPDPWSSD)
30630 NODE_NAME_CASE(VPDPWSSDS)
30631 NODE_NAME_CASE(VPSHUFBITQMB)
30632 NODE_NAME_CASE(GF2P8MULB)
30633 NODE_NAME_CASE(GF2P8AFFINEQB)
30634 NODE_NAME_CASE(GF2P8AFFINEINVQB)
30635 NODE_NAME_CASE(NT_CALL)
30636 NODE_NAME_CASE(NT_BRIND)
30637 NODE_NAME_CASE(UMWAIT)
30638 NODE_NAME_CASE(TPAUSE)
30639 NODE_NAME_CASE(ENQCMD)
30640 NODE_NAME_CASE(ENQCMDS)
30641 NODE_NAME_CASE(VP2INTERSECT)
30644 #undef NODE_NAME_CASE
30647 /// Return true if the addressing mode represented by AM is legal for this
30648 /// target, for a load/store of the specified type.
30649 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
30650 const AddrMode &AM, Type *Ty,
30652 Instruction *I) const {
30653 // X86 supports extremely general addressing modes.
30654 CodeModel::Model M = getTargetMachine().getCodeModel();
30656 // X86 allows a sign-extended 32-bit immediate field as a displacement.
30657 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
30661 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
30663 // If a reference to this global requires an extra load, we can't fold it.
30664 if (isGlobalStubReference(GVFlags))
30667 // If BaseGV requires a register for the PIC base, we cannot also have a
30668 // BaseReg specified.
30669 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
30672 // If lower 4G is not available, then we must use rip-relative addressing.
30673 if ((M != CodeModel::Small || isPositionIndependent()) &&
30674 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
30678 switch (AM.Scale) {
30684 // These scales always work.
30689 // These scales are formed with basereg+scalereg. Only accept if there is
30694 default: // Other stuff never works.
30701 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
30702 unsigned Bits = Ty->getScalarSizeInBits();
30704 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
30705 // particularly cheaper than those without.
30709 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
30710 // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
30711 if (Subtarget.hasXOP() &&
30712 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
30715 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
30716 // shifts just as cheap as scalar ones.
30717 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
30720 // AVX512BW has shifts such as vpsllvw.
30721 if (Subtarget.hasBWI() && Bits == 16)
30724 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
30725 // fully general vector.
30729 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
30731 // These are non-commutative binops.
30732 // TODO: Add more X86ISD opcodes once we have test coverage.
30733 case X86ISD::ANDNP:
30734 case X86ISD::PCMPGT:
30737 case X86ISD::FANDN:
30741 return TargetLoweringBase::isBinOp(Opcode);
30744 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
30746 // TODO: Add more X86ISD opcodes once we have test coverage.
30747 case X86ISD::PCMPEQ:
30748 case X86ISD::PMULDQ:
30749 case X86ISD::PMULUDQ:
30750 case X86ISD::FMAXC:
30751 case X86ISD::FMINC:
30758 return TargetLoweringBase::isCommutativeBinOp(Opcode);
30761 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
30762 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30764 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
30765 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
30766 return NumBits1 > NumBits2;
30769 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
30770 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30773 if (!isTypeLegal(EVT::getEVT(Ty1)))
30776 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
30778 // Assuming the caller doesn't have a zeroext or signext return parameter,
30779 // truncation all the way down to i1 is valid.
30783 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
30784 return isInt<32>(Imm);
30787 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
30788 // Can also use sub to handle negated immediates.
30789 return isInt<32>(Imm);
30792 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
30793 return isInt<32>(Imm);
30796 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
30797 if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
30799 unsigned NumBits1 = VT1.getSizeInBits();
30800 unsigned NumBits2 = VT2.getSizeInBits();
30801 return NumBits1 > NumBits2;
30804 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
30805 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30806 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
30809 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
30810 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30811 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
30814 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
30815 EVT VT1 = Val.getValueType();
30816 if (isZExtFree(VT1, VT2))
30819 if (Val.getOpcode() != ISD::LOAD)
30822 if (!VT1.isSimple() || !VT1.isInteger() ||
30823 !VT2.isSimple() || !VT2.isInteger())
30826 switch (VT1.getSimpleVT().SimpleTy) {
30831 // X86 has 8, 16, and 32-bit zero-extending loads.
30838 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
30839 SmallVectorImpl<Use *> &Ops) const {
30840 // A uniform shift amount in a vector shift or funnel shift may be much
30841 // cheaper than a generic variable vector shift, so make that pattern visible
30842 // to SDAG by sinking the shuffle instruction next to the shift.
30843 int ShiftAmountOpNum = -1;
30845 ShiftAmountOpNum = 1;
30846 else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
30847 if (II->getIntrinsicID() == Intrinsic::fshl ||
30848 II->getIntrinsicID() == Intrinsic::fshr)
30849 ShiftAmountOpNum = 2;
30852 if (ShiftAmountOpNum == -1)
30855 auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
30856 if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
30857 isVectorShiftByScalarCheap(I->getType())) {
30858 Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
30865 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
30866 if (!Subtarget.is64Bit())
30868 return TargetLowering::shouldConvertPhiType(From, To);
30871 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
30872 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
30875 EVT SrcVT = ExtVal.getOperand(0).getValueType();
30877 // There is no extending load for vXi1.
30878 if (SrcVT.getScalarType() == MVT::i1)
30884 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
30886 if (!Subtarget.hasAnyFMA())
30889 VT = VT.getScalarType();
30891 if (!VT.isSimple())
30894 switch (VT.getSimpleVT().SimpleTy) {
30905 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
30906 // i16 instructions are longer (0x66 prefix) and potentially slower.
30907 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
30910 /// Targets can use this to indicate that they only support *some*
30911 /// VECTOR_SHUFFLE operations, those with specific masks.
30912 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
30913 /// are assumed to be legal.
30914 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
30915 if (!VT.isSimple())
30918 // Not for i1 vectors
30919 if (VT.getSimpleVT().getScalarType() == MVT::i1)
30922 // Very little shuffling can be done for 64-bit vectors right now.
30923 if (VT.getSimpleVT().getSizeInBits() == 64)
30926 // We only care that the types being shuffled are legal. The lowering can
30927 // handle any possible shuffle mask that results.
30928 return isTypeLegal(VT.getSimpleVT());
30931 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
30933 // Don't convert an 'and' into a shuffle that we don't directly support.
30934 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
30935 if (!Subtarget.hasAVX2())
30936 if (VT == MVT::v32i8 || VT == MVT::v16i16)
30939 // Just delegate to the generic legality, clear masks aren't special.
30940 return isShuffleMaskLegal(Mask, VT);
30943 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
30944 // If the subtarget is using thunks, we need to not generate jump tables.
30945 if (Subtarget.useIndirectThunkBranches())
30948 // Otherwise, fallback on the generic logic.
30949 return TargetLowering::areJTsAllowed(Fn);
30952 //===----------------------------------------------------------------------===//
30953 // X86 Scheduler Hooks
30954 //===----------------------------------------------------------------------===//
30956 // Returns true if EFLAG is consumed after this iterator in the rest of the
30957 // basic block or any successors of the basic block.
30958 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
30959 MachineBasicBlock *BB) {
30960 // Scan forward through BB for a use/def of EFLAGS.
30961 for (MachineBasicBlock::iterator miI = std::next(Itr), miE = BB->end();
30962 miI != miE; ++miI) {
30963 const MachineInstr& mi = *miI;
30964 if (mi.readsRegister(X86::EFLAGS))
30966 // If we found a def, we can stop searching.
30967 if (mi.definesRegister(X86::EFLAGS))
30971 // If we hit the end of the block, check whether EFLAGS is live into a
30973 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
30974 sEnd = BB->succ_end();
30975 sItr != sEnd; ++sItr) {
30976 MachineBasicBlock* succ = *sItr;
30977 if (succ->isLiveIn(X86::EFLAGS))
30984 /// Utility function to emit xbegin specifying the start of an RTM region.
30985 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
30986 const TargetInstrInfo *TII) {
30987 DebugLoc DL = MI.getDebugLoc();
30989 const BasicBlock *BB = MBB->getBasicBlock();
30990 MachineFunction::iterator I = ++MBB->getIterator();
30992 // For the v = xbegin(), we generate
31001 // eax = # XABORT_DEF
31005 // v = phi(s0/mainBB, s1/fallBB)
31007 MachineBasicBlock *thisMBB = MBB;
31008 MachineFunction *MF = MBB->getParent();
31009 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
31010 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
31011 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31012 MF->insert(I, mainMBB);
31013 MF->insert(I, fallMBB);
31014 MF->insert(I, sinkMBB);
31016 if (isEFLAGSLiveAfter(MI, MBB)) {
31017 mainMBB->addLiveIn(X86::EFLAGS);
31018 fallMBB->addLiveIn(X86::EFLAGS);
31019 sinkMBB->addLiveIn(X86::EFLAGS);
31022 // Transfer the remainder of BB and its successor edges to sinkMBB.
31023 sinkMBB->splice(sinkMBB->begin(), MBB,
31024 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31025 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31027 MachineRegisterInfo &MRI = MF->getRegInfo();
31028 Register DstReg = MI.getOperand(0).getReg();
31029 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
31030 Register mainDstReg = MRI.createVirtualRegister(RC);
31031 Register fallDstReg = MRI.createVirtualRegister(RC);
31035 // # fallthrough to mainMBB
31036 // # abortion to fallMBB
31037 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
31038 thisMBB->addSuccessor(mainMBB);
31039 thisMBB->addSuccessor(fallMBB);
31042 // mainDstReg := -1
31043 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
31044 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
31045 mainMBB->addSuccessor(sinkMBB);
31048 // ; pseudo instruction to model hardware's definition from XABORT
31049 // EAX := XABORT_DEF
31050 // fallDstReg := EAX
31051 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
31052 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
31054 fallMBB->addSuccessor(sinkMBB);
31057 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
31058 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
31059 .addReg(mainDstReg).addMBB(mainMBB)
31060 .addReg(fallDstReg).addMBB(fallMBB);
31062 MI.eraseFromParent();
31068 MachineBasicBlock *
31069 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
31070 MachineBasicBlock *MBB) const {
31071 // Emit va_arg instruction on X86-64.
31073 // Operands to this pseudo-instruction:
31074 // 0 ) Output : destination address (reg)
31075 // 1-5) Input : va_list address (addr, i64mem)
31076 // 6 ) ArgSize : Size (in bytes) of vararg type
31077 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
31078 // 8 ) Align : Alignment of type
31079 // 9 ) EFLAGS (implicit-def)
31081 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
31082 static_assert(X86::AddrNumOperands == 5,
31083 "VAARG_64 assumes 5 address operands");
31085 Register DestReg = MI.getOperand(0).getReg();
31086 MachineOperand &Base = MI.getOperand(1);
31087 MachineOperand &Scale = MI.getOperand(2);
31088 MachineOperand &Index = MI.getOperand(3);
31089 MachineOperand &Disp = MI.getOperand(4);
31090 MachineOperand &Segment = MI.getOperand(5);
31091 unsigned ArgSize = MI.getOperand(6).getImm();
31092 unsigned ArgMode = MI.getOperand(7).getImm();
31093 Align Alignment = Align(MI.getOperand(8).getImm());
31095 MachineFunction *MF = MBB->getParent();
31097 // Memory Reference
31098 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
31100 MachineMemOperand *OldMMO = MI.memoperands().front();
31102 // Clone the MMO into two separate MMOs for loading and storing
31103 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
31104 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
31105 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
31106 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
31108 // Machine Information
31109 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31110 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
31111 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
31112 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
31113 DebugLoc DL = MI.getDebugLoc();
31115 // struct va_list {
31118 // i64 overflow_area (address)
31119 // i64 reg_save_area (address)
31121 // sizeof(va_list) = 24
31122 // alignment(va_list) = 8
31124 unsigned TotalNumIntRegs = 6;
31125 unsigned TotalNumXMMRegs = 8;
31126 bool UseGPOffset = (ArgMode == 1);
31127 bool UseFPOffset = (ArgMode == 2);
31128 unsigned MaxOffset = TotalNumIntRegs * 8 +
31129 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
31131 /* Align ArgSize to a multiple of 8 */
31132 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
31133 bool NeedsAlign = (Alignment > 8);
31135 MachineBasicBlock *thisMBB = MBB;
31136 MachineBasicBlock *overflowMBB;
31137 MachineBasicBlock *offsetMBB;
31138 MachineBasicBlock *endMBB;
31140 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
31141 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
31142 unsigned OffsetReg = 0;
31144 if (!UseGPOffset && !UseFPOffset) {
31145 // If we only pull from the overflow region, we don't create a branch.
31146 // We don't need to alter control flow.
31147 OffsetDestReg = 0; // unused
31148 OverflowDestReg = DestReg;
31150 offsetMBB = nullptr;
31151 overflowMBB = thisMBB;
31154 // First emit code to check if gp_offset (or fp_offset) is below the bound.
31155 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
31156 // If not, pull from overflow_area. (branch to overflowMBB)
31161 // offsetMBB overflowMBB
31166 // Registers for the PHI in endMBB
31167 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
31168 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
31170 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
31171 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31172 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31173 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31175 MachineFunction::iterator MBBIter = ++MBB->getIterator();
31177 // Insert the new basic blocks
31178 MF->insert(MBBIter, offsetMBB);
31179 MF->insert(MBBIter, overflowMBB);
31180 MF->insert(MBBIter, endMBB);
31182 // Transfer the remainder of MBB and its successor edges to endMBB.
31183 endMBB->splice(endMBB->begin(), thisMBB,
31184 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
31185 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
31187 // Make offsetMBB and overflowMBB successors of thisMBB
31188 thisMBB->addSuccessor(offsetMBB);
31189 thisMBB->addSuccessor(overflowMBB);
31191 // endMBB is a successor of both offsetMBB and overflowMBB
31192 offsetMBB->addSuccessor(endMBB);
31193 overflowMBB->addSuccessor(endMBB);
31195 // Load the offset value into a register
31196 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
31197 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
31201 .addDisp(Disp, UseFPOffset ? 4 : 0)
31203 .setMemRefs(LoadOnlyMMO);
31205 // Check if there is enough room left to pull this argument.
31206 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
31208 .addImm(MaxOffset + 8 - ArgSizeA8);
31210 // Branch to "overflowMBB" if offset >= max
31211 // Fall through to "offsetMBB" otherwise
31212 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
31213 .addMBB(overflowMBB).addImm(X86::COND_AE);
31216 // In offsetMBB, emit code to use the reg_save_area.
31218 assert(OffsetReg != 0);
31220 // Read the reg_save_area address.
31221 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
31222 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
31228 .setMemRefs(LoadOnlyMMO);
31230 // Zero-extend the offset
31231 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
31232 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
31235 .addImm(X86::sub_32bit);
31237 // Add the offset to the reg_save_area to get the final address.
31238 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
31239 .addReg(OffsetReg64)
31240 .addReg(RegSaveReg);
31242 // Compute the offset for the next argument
31243 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
31244 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
31246 .addImm(UseFPOffset ? 16 : 8);
31248 // Store it back into the va_list.
31249 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
31253 .addDisp(Disp, UseFPOffset ? 4 : 0)
31255 .addReg(NextOffsetReg)
31256 .setMemRefs(StoreOnlyMMO);
31259 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
31264 // Emit code to use overflow area
31267 // Load the overflow_area address into a register.
31268 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
31269 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
31275 .setMemRefs(LoadOnlyMMO);
31277 // If we need to align it, do so. Otherwise, just copy the address
31278 // to OverflowDestReg.
31280 // Align the overflow address
31281 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
31283 // aligned_addr = (addr + (align-1)) & ~(align-1)
31284 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
31285 .addReg(OverflowAddrReg)
31286 .addImm(Alignment.value() - 1);
31288 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
31290 .addImm(~(uint64_t)(Alignment.value() - 1));
31292 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
31293 .addReg(OverflowAddrReg);
31296 // Compute the next overflow address after this argument.
31297 // (the overflow address should be kept 8-byte aligned)
31298 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
31299 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
31300 .addReg(OverflowDestReg)
31301 .addImm(ArgSizeA8);
31303 // Store the new overflow address.
31304 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
31310 .addReg(NextAddrReg)
31311 .setMemRefs(StoreOnlyMMO);
31313 // If we branched, emit the PHI to the front of endMBB.
31315 BuildMI(*endMBB, endMBB->begin(), DL,
31316 TII->get(X86::PHI), DestReg)
31317 .addReg(OffsetDestReg).addMBB(offsetMBB)
31318 .addReg(OverflowDestReg).addMBB(overflowMBB);
31321 // Erase the pseudo instruction
31322 MI.eraseFromParent();
31327 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
31328 MachineInstr &MI, MachineBasicBlock *MBB) const {
31329 // Emit code to save XMM registers to the stack. The ABI says that the
31330 // number of registers to save is given in %al, so it's theoretically
31331 // possible to do an indirect jump trick to avoid saving all of them,
31332 // however this code takes a simpler approach and just executes all
31333 // of the stores if %al is non-zero. It's less code, and it's probably
31334 // easier on the hardware branch predictor, and stores aren't all that
31335 // expensive anyway.
31337 // Create the new basic blocks. One block contains all the XMM stores,
31338 // and one block is the final destination regardless of whether any
31339 // stores were performed.
31340 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
31341 MachineFunction *F = MBB->getParent();
31342 MachineFunction::iterator MBBIter = ++MBB->getIterator();
31343 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
31344 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
31345 F->insert(MBBIter, XMMSaveMBB);
31346 F->insert(MBBIter, EndMBB);
31348 // Transfer the remainder of MBB and its successor edges to EndMBB.
31349 EndMBB->splice(EndMBB->begin(), MBB,
31350 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31351 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
31353 // The original block will now fall through to the XMM save block.
31354 MBB->addSuccessor(XMMSaveMBB);
31355 // The XMMSaveMBB will fall through to the end block.
31356 XMMSaveMBB->addSuccessor(EndMBB);
31358 // Now add the instructions.
31359 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31360 DebugLoc DL = MI.getDebugLoc();
31362 Register CountReg = MI.getOperand(0).getReg();
31363 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
31364 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
31366 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
31367 // If %al is 0, branch around the XMM save block.
31368 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
31369 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
31370 MBB->addSuccessor(EndMBB);
31373 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
31374 // that was just emitted, but clearly shouldn't be "saved".
31375 assert((MI.getNumOperands() <= 3 ||
31376 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
31377 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
31378 "Expected last argument to be EFLAGS");
31379 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
31380 // In the XMM save block, save all the XMM argument registers.
31381 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
31382 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
31383 MachineMemOperand *MMO = F->getMachineMemOperand(
31384 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
31385 MachineMemOperand::MOStore,
31386 /*Size=*/16, Align(16));
31387 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
31388 .addFrameIndex(RegSaveFrameIndex)
31389 .addImm(/*Scale=*/1)
31390 .addReg(/*IndexReg=*/0)
31391 .addImm(/*Disp=*/Offset)
31392 .addReg(/*Segment=*/0)
31393 .addReg(MI.getOperand(i).getReg())
31394 .addMemOperand(MMO);
31397 MI.eraseFromParent(); // The pseudo instruction is gone now.
31402 // The EFLAGS operand of SelectItr might be missing a kill marker
31403 // because there were multiple uses of EFLAGS, and ISel didn't know
31404 // which to mark. Figure out whether SelectItr should have had a
31405 // kill marker, and set it if it should. Returns the correct kill
31407 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
31408 MachineBasicBlock* BB,
31409 const TargetRegisterInfo* TRI) {
31410 if (isEFLAGSLiveAfter(SelectItr, BB))
31413 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
31414 // out. SelectMI should have a kill flag on EFLAGS.
31415 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
31419 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
31420 // together with other CMOV pseudo-opcodes into a single basic-block with
31421 // conditional jump around it.
31422 static bool isCMOVPseudo(MachineInstr &MI) {
31423 switch (MI.getOpcode()) {
31424 case X86::CMOV_FR32:
31425 case X86::CMOV_FR32X:
31426 case X86::CMOV_FR64:
31427 case X86::CMOV_FR64X:
31428 case X86::CMOV_GR8:
31429 case X86::CMOV_GR16:
31430 case X86::CMOV_GR32:
31431 case X86::CMOV_RFP32:
31432 case X86::CMOV_RFP64:
31433 case X86::CMOV_RFP80:
31434 case X86::CMOV_VR64:
31435 case X86::CMOV_VR128:
31436 case X86::CMOV_VR128X:
31437 case X86::CMOV_VR256:
31438 case X86::CMOV_VR256X:
31439 case X86::CMOV_VR512:
31440 case X86::CMOV_VK1:
31441 case X86::CMOV_VK2:
31442 case X86::CMOV_VK4:
31443 case X86::CMOV_VK8:
31444 case X86::CMOV_VK16:
31445 case X86::CMOV_VK32:
31446 case X86::CMOV_VK64:
31454 // Helper function, which inserts PHI functions into SinkMBB:
31455 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
31456 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
31457 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
31458 // the last PHI function inserted.
31459 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
31460 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
31461 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
31462 MachineBasicBlock *SinkMBB) {
31463 MachineFunction *MF = TrueMBB->getParent();
31464 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
31465 DebugLoc DL = MIItBegin->getDebugLoc();
31467 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
31468 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
31470 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
31472 // As we are creating the PHIs, we have to be careful if there is more than
31473 // one. Later CMOVs may reference the results of earlier CMOVs, but later
31474 // PHIs have to reference the individual true/false inputs from earlier PHIs.
31475 // That also means that PHI construction must work forward from earlier to
31476 // later, and that the code must maintain a mapping from earlier PHI's
31477 // destination registers, and the registers that went into the PHI.
31478 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
31479 MachineInstrBuilder MIB;
31481 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
31482 Register DestReg = MIIt->getOperand(0).getReg();
31483 Register Op1Reg = MIIt->getOperand(1).getReg();
31484 Register Op2Reg = MIIt->getOperand(2).getReg();
31486 // If this CMOV we are generating is the opposite condition from
31487 // the jump we generated, then we have to swap the operands for the
31488 // PHI that is going to be generated.
31489 if (MIIt->getOperand(3).getImm() == OppCC)
31490 std::swap(Op1Reg, Op2Reg);
31492 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
31493 Op1Reg = RegRewriteTable[Op1Reg].first;
31495 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
31496 Op2Reg = RegRewriteTable[Op2Reg].second;
31498 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
31504 // Add this PHI to the rewrite table.
31505 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
31511 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
31512 MachineBasicBlock *
31513 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
31514 MachineInstr &SecondCascadedCMOV,
31515 MachineBasicBlock *ThisMBB) const {
31516 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31517 DebugLoc DL = FirstCMOV.getDebugLoc();
31519 // We lower cascaded CMOVs such as
31521 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
31523 // to two successive branches.
31525 // Without this, we would add a PHI between the two jumps, which ends up
31526 // creating a few copies all around. For instance, for
31528 // (sitofp (zext (fcmp une)))
31530 // we would generate:
31532 // ucomiss %xmm1, %xmm0
31533 // movss <1.0f>, %xmm0
31534 // movaps %xmm0, %xmm1
31536 // xorps %xmm1, %xmm1
31539 // movaps %xmm1, %xmm0
31543 // because this custom-inserter would have generated:
31555 // A: X = ...; Y = ...
31557 // C: Z = PHI [X, A], [Y, B]
31559 // E: PHI [X, C], [Z, D]
31561 // If we lower both CMOVs in a single step, we can instead generate:
31573 // A: X = ...; Y = ...
31575 // E: PHI [X, A], [X, C], [Y, D]
31577 // Which, in our sitofp/fcmp example, gives us something like:
31579 // ucomiss %xmm1, %xmm0
31580 // movss <1.0f>, %xmm0
31583 // xorps %xmm0, %xmm0
31588 // We lower cascaded CMOV into two successive branches to the same block.
31589 // EFLAGS is used by both, so mark it as live in the second.
31590 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31591 MachineFunction *F = ThisMBB->getParent();
31592 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
31593 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
31594 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31596 MachineFunction::iterator It = ++ThisMBB->getIterator();
31597 F->insert(It, FirstInsertedMBB);
31598 F->insert(It, SecondInsertedMBB);
31599 F->insert(It, SinkMBB);
31601 // For a cascaded CMOV, we lower it to two successive branches to
31602 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
31603 // the FirstInsertedMBB.
31604 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
31606 // If the EFLAGS register isn't dead in the terminator, then claim that it's
31607 // live into the sink and copy blocks.
31608 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31609 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
31610 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
31611 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
31612 SinkMBB->addLiveIn(X86::EFLAGS);
31615 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31616 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
31617 std::next(MachineBasicBlock::iterator(FirstCMOV)),
31619 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31621 // Fallthrough block for ThisMBB.
31622 ThisMBB->addSuccessor(FirstInsertedMBB);
31623 // The true block target of the first branch is always SinkMBB.
31624 ThisMBB->addSuccessor(SinkMBB);
31625 // Fallthrough block for FirstInsertedMBB.
31626 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
31627 // The true block for the branch of FirstInsertedMBB.
31628 FirstInsertedMBB->addSuccessor(SinkMBB);
31629 // This is fallthrough.
31630 SecondInsertedMBB->addSuccessor(SinkMBB);
31632 // Create the conditional branch instructions.
31633 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
31634 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
31636 X86::CondCode SecondCC =
31637 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
31638 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
31641 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
31642 Register DestReg = FirstCMOV.getOperand(0).getReg();
31643 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
31644 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
31645 MachineInstrBuilder MIB =
31646 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
31648 .addMBB(SecondInsertedMBB)
31652 // The second SecondInsertedMBB provides the same incoming value as the
31653 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
31654 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
31655 // Copy the PHI result to the register defined by the second CMOV.
31656 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
31657 TII->get(TargetOpcode::COPY),
31658 SecondCascadedCMOV.getOperand(0).getReg())
31659 .addReg(FirstCMOV.getOperand(0).getReg());
31661 // Now remove the CMOVs.
31662 FirstCMOV.eraseFromParent();
31663 SecondCascadedCMOV.eraseFromParent();
31668 MachineBasicBlock *
31669 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
31670 MachineBasicBlock *ThisMBB) const {
31671 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31672 DebugLoc DL = MI.getDebugLoc();
31674 // To "insert" a SELECT_CC instruction, we actually have to insert the
31675 // diamond control-flow pattern. The incoming instruction knows the
31676 // destination vreg to set, the condition code register to branch on, the
31677 // true/false values to select between and a branch opcode to use.
31682 // cmpTY ccX, r1, r2
31684 // fallthrough --> FalseMBB
31686 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
31687 // as described above, by inserting a BB, and then making a PHI at the join
31688 // point to select the true and false operands of the CMOV in the PHI.
31690 // The code also handles two different cases of multiple CMOV opcodes
31694 // In this case, there are multiple CMOVs in a row, all which are based on
31695 // the same condition setting (or the exact opposite condition setting).
31696 // In this case we can lower all the CMOVs using a single inserted BB, and
31697 // then make a number of PHIs at the join point to model the CMOVs. The only
31698 // trickiness here, is that in a case like:
31700 // t2 = CMOV cond1 t1, f1
31701 // t3 = CMOV cond1 t2, f2
31703 // when rewriting this into PHIs, we have to perform some renaming on the
31704 // temps since you cannot have a PHI operand refer to a PHI result earlier
31705 // in the same block. The "simple" but wrong lowering would be:
31707 // t2 = PHI t1(BB1), f1(BB2)
31708 // t3 = PHI t2(BB1), f2(BB2)
31710 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
31711 // renaming is to note that on the path through BB1, t2 is really just a
31712 // copy of t1, and do that renaming, properly generating:
31714 // t2 = PHI t1(BB1), f1(BB2)
31715 // t3 = PHI t1(BB1), f2(BB2)
31718 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
31719 // function - EmitLoweredCascadedSelect.
31721 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
31722 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
31723 MachineInstr *LastCMOV = &MI;
31724 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
31726 // Check for case 1, where there are multiple CMOVs with the same condition
31727 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
31728 // number of jumps the most.
31730 if (isCMOVPseudo(MI)) {
31731 // See if we have a string of CMOVS with the same condition. Skip over
31732 // intervening debug insts.
31733 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
31734 (NextMIIt->getOperand(3).getImm() == CC ||
31735 NextMIIt->getOperand(3).getImm() == OppCC)) {
31736 LastCMOV = &*NextMIIt;
31737 NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
31741 // This checks for case 2, but only do this if we didn't already find
31742 // case 1, as indicated by LastCMOV == MI.
31743 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
31744 NextMIIt->getOpcode() == MI.getOpcode() &&
31745 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
31746 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
31747 NextMIIt->getOperand(1).isKill()) {
31748 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
31751 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31752 MachineFunction *F = ThisMBB->getParent();
31753 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
31754 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31756 MachineFunction::iterator It = ++ThisMBB->getIterator();
31757 F->insert(It, FalseMBB);
31758 F->insert(It, SinkMBB);
31760 // If the EFLAGS register isn't dead in the terminator, then claim that it's
31761 // live into the sink and copy blocks.
31762 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31763 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
31764 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
31765 FalseMBB->addLiveIn(X86::EFLAGS);
31766 SinkMBB->addLiveIn(X86::EFLAGS);
31769 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
31770 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
31771 auto DbgIt = MachineBasicBlock::iterator(MI);
31772 while (DbgIt != DbgEnd) {
31773 auto Next = std::next(DbgIt);
31774 if (DbgIt->isDebugInstr())
31775 SinkMBB->push_back(DbgIt->removeFromParent());
31779 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31780 SinkMBB->splice(SinkMBB->end(), ThisMBB,
31781 std::next(MachineBasicBlock::iterator(LastCMOV)),
31783 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31785 // Fallthrough block for ThisMBB.
31786 ThisMBB->addSuccessor(FalseMBB);
31787 // The true block target of the first (or only) branch is always a SinkMBB.
31788 ThisMBB->addSuccessor(SinkMBB);
31789 // Fallthrough block for FalseMBB.
31790 FalseMBB->addSuccessor(SinkMBB);
31792 // Create the conditional branch instruction.
31793 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
31796 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
31798 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
31799 MachineBasicBlock::iterator MIItEnd =
31800 std::next(MachineBasicBlock::iterator(LastCMOV));
31801 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
31803 // Now remove the CMOV(s).
31804 ThisMBB->erase(MIItBegin, MIItEnd);
31809 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
31812 return X86::SUB64ri8;
31813 return X86::SUB64ri32;
31816 return X86::SUB32ri8;
31817 return X86::SUB32ri;
31821 MachineBasicBlock *
31822 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
31823 MachineBasicBlock *MBB) const {
31824 MachineFunction *MF = MBB->getParent();
31825 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31826 const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
31827 DebugLoc DL = MI.getDebugLoc();
31828 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
31830 const unsigned ProbeSize = getStackProbeSize(*MF);
31832 MachineRegisterInfo &MRI = MF->getRegInfo();
31833 MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31834 MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31835 MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31837 MachineFunction::iterator MBBIter = ++MBB->getIterator();
31838 MF->insert(MBBIter, testMBB);
31839 MF->insert(MBBIter, blockMBB);
31840 MF->insert(MBBIter, tailMBB);
31842 Register sizeVReg = MI.getOperand(1).getReg();
31844 Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
31846 Register TmpStackPtr = MRI.createVirtualRegister(
31847 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
31848 Register FinalStackPtr = MRI.createVirtualRegister(
31849 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
31851 BuildMI(*MBB, {MI}, DL, TII->get(TargetOpcode::COPY), TmpStackPtr)
31852 .addReg(physSPReg);
31854 const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
31855 BuildMI(*MBB, {MI}, DL, TII->get(Opc), FinalStackPtr)
31856 .addReg(TmpStackPtr)
31862 BuildMI(testMBB, DL,
31863 TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
31864 .addReg(FinalStackPtr)
31865 .addReg(physSPReg);
31867 BuildMI(testMBB, DL, TII->get(X86::JCC_1))
31869 .addImm(X86::COND_L);
31870 testMBB->addSuccessor(blockMBB);
31871 testMBB->addSuccessor(tailMBB);
31873 // Touch the block then extend it. This is done on the opposite side of
31874 // static probe where we allocate then touch, to avoid the need of probing the
31875 // tail of the static alloca. Possible scenarios are:
31877 // + ---- <- ------------ <- ------------- <- ------------ +
31879 // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
31881 // + <- ----------- <- ------------ <- ----------- <- ------------ +
31883 // The property we want to enforce is to never have more than [page alloc] between two probes.
31885 const unsigned MovMIOpc =
31886 TFI.Uses64BitFramePtr ? X86::MOV64mi32 : X86::MOV32mi;
31887 addRegOffset(BuildMI(blockMBB, DL, TII->get(MovMIOpc)), physSPReg, false, 0)
31890 BuildMI(blockMBB, DL,
31891 TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr, ProbeSize)), physSPReg)
31893 .addImm(ProbeSize);
31896 BuildMI(blockMBB, DL, TII->get(X86::JMP_1)).addMBB(testMBB);
31897 blockMBB->addSuccessor(testMBB);
31899 // Replace original instruction by the expected stack ptr
31900 BuildMI(tailMBB, DL, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
31901 .addReg(FinalStackPtr);
31903 tailMBB->splice(tailMBB->end(), MBB,
31904 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31905 tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
31906 MBB->addSuccessor(testMBB);
31908 // Delete the original pseudo instruction.
31909 MI.eraseFromParent();
31915 MachineBasicBlock *
31916 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
31917 MachineBasicBlock *BB) const {
31918 MachineFunction *MF = BB->getParent();
31919 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31920 DebugLoc DL = MI.getDebugLoc();
31921 const BasicBlock *LLVM_BB = BB->getBasicBlock();
31923 assert(MF->shouldSplitStack());
31925 const bool Is64Bit = Subtarget.is64Bit();
31926 const bool IsLP64 = Subtarget.isTarget64BitLP64();
31928 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
31929 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
31932 // ... [Till the alloca]
31933 // If stacklet is not large enough, jump to mallocMBB
31936 // Allocate by subtracting from RSP
31937 // Jump to continueMBB
31940 // Allocate by call to runtime
31944 // [rest of original BB]
31947 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31948 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31949 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31951 MachineRegisterInfo &MRI = MF->getRegInfo();
31952 const TargetRegisterClass *AddrRegClass =
31953 getRegClassFor(getPointerTy(MF->getDataLayout()));
31955 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31956 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31957 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
31958 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
31959 sizeVReg = MI.getOperand(1).getReg(),
31961 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
31963 MachineFunction::iterator MBBIter = ++BB->getIterator();
31965 MF->insert(MBBIter, bumpMBB);
31966 MF->insert(MBBIter, mallocMBB);
31967 MF->insert(MBBIter, continueMBB);
31969 continueMBB->splice(continueMBB->begin(), BB,
31970 std::next(MachineBasicBlock::iterator(MI)), BB->end());
31971 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
31973 // Add code to the main basic block to check if the stack limit has been hit,
31974 // and if so, jump to mallocMBB otherwise to bumpMBB.
31975 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
31976 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
31977 .addReg(tmpSPVReg).addReg(sizeVReg);
31978 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
31979 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
31980 .addReg(SPLimitVReg);
31981 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
31983 // bumpMBB simply decreases the stack pointer, since we know the current
31984 // stacklet has enough space.
31985 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
31986 .addReg(SPLimitVReg);
31987 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
31988 .addReg(SPLimitVReg);
31989 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31991 // Calls into a routine in libgcc to allocate more space from the heap.
31992 const uint32_t *RegMask =
31993 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
31995 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
31997 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31998 .addExternalSymbol("__morestack_allocate_stack_space")
31999 .addRegMask(RegMask)
32000 .addReg(X86::RDI, RegState::Implicit)
32001 .addReg(X86::RAX, RegState::ImplicitDefine);
32002 } else if (Is64Bit) {
32003 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
32005 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
32006 .addExternalSymbol("__morestack_allocate_stack_space")
32007 .addRegMask(RegMask)
32008 .addReg(X86::EDI, RegState::Implicit)
32009 .addReg(X86::EAX, RegState::ImplicitDefine);
32011 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
32013 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
32014 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
32015 .addExternalSymbol("__morestack_allocate_stack_space")
32016 .addRegMask(RegMask)
32017 .addReg(X86::EAX, RegState::ImplicitDefine);
32021 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
32024 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
32025 .addReg(IsLP64 ? X86::RAX : X86::EAX);
32026 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
32028 // Set up the CFG correctly.
32029 BB->addSuccessor(bumpMBB);
32030 BB->addSuccessor(mallocMBB);
32031 mallocMBB->addSuccessor(continueMBB);
32032 bumpMBB->addSuccessor(continueMBB);
32034 // Take care of the PHI nodes.
32035 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
32036 MI.getOperand(0).getReg())
32037 .addReg(mallocPtrVReg)
32039 .addReg(bumpSPPtrVReg)
32042 // Delete the original pseudo instruction.
32043 MI.eraseFromParent();
32046 return continueMBB;
32049 MachineBasicBlock *
32050 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
32051 MachineBasicBlock *BB) const {
32052 MachineFunction *MF = BB->getParent();
32053 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
32054 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
32055 DebugLoc DL = MI.getDebugLoc();
32057 assert(!isAsynchronousEHPersonality(
32058 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
32059 "SEH does not use catchret!");
32061 // Only 32-bit EH needs to worry about manually restoring stack pointers.
32062 if (!Subtarget.is32Bit())
32065 // C++ EH creates a new target block to hold the restore code, and wires up
32066 // the new block to the return destination with a normal JMP_4.
32067 MachineBasicBlock *RestoreMBB =
32068 MF->CreateMachineBasicBlock(BB->getBasicBlock());
32069 assert(BB->succ_size() == 1);
32070 MF->insert(std::next(BB->getIterator()), RestoreMBB);
32071 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
32072 BB->addSuccessor(RestoreMBB);
32073 MI.getOperand(0).setMBB(RestoreMBB);
32075 // Marking this as an EH pad but not a funclet entry block causes PEI to
32076 // restore stack pointers in the block.
32077 RestoreMBB->setIsEHPad(true);
32079 auto RestoreMBBI = RestoreMBB->begin();
32080 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
32084 MachineBasicBlock *
32085 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
32086 MachineBasicBlock *BB) const {
32087 // So, here we replace TLSADDR with the sequence:
32088 // adjust_stackdown -> TLSADDR -> adjust_stackup.
32089 // We need this because TLSADDR is lowered into calls
32090 // inside MC, therefore without the two markers shrink-wrapping
32091 // may push the prologue/epilogue pass them.
32092 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
32093 DebugLoc DL = MI.getDebugLoc();
32094 MachineFunction &MF = *BB->getParent();
32096 // Emit CALLSEQ_START right before the instruction.
32097 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
32098 MachineInstrBuilder CallseqStart =
32099 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
32100 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
32102 // Emit CALLSEQ_END right after the instruction.
32103 // We don't call erase from parent because we want to keep the
32104 // original instruction around.
32105 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
32106 MachineInstrBuilder CallseqEnd =
32107 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
32108 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
32113 MachineBasicBlock *
32114 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
32115 MachineBasicBlock *BB) const {
32116 // This is pretty easy. We're taking the value that we received from
32117 // our load from the relocation, sticking it in either RDI (x86-64)
32118 // or EAX and doing an indirect call. The return value will then
32119 // be in the normal return register.
32120 MachineFunction *F = BB->getParent();
32121 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32122 DebugLoc DL = MI.getDebugLoc();
32124 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
32125 assert(MI.getOperand(3).isGlobal() && "This should be a global");
32127 // Get a register mask for the lowered call.
32128 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
32129 // proper register mask.
32130 const uint32_t *RegMask =
32131 Subtarget.is64Bit() ?
32132 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
32133 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
32134 if (Subtarget.is64Bit()) {
32135 MachineInstrBuilder MIB =
32136 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
32140 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
32141 MI.getOperand(3).getTargetFlags())
32143 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
32144 addDirectMem(MIB, X86::RDI);
32145 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
32146 } else if (!isPositionIndependent()) {
32147 MachineInstrBuilder MIB =
32148 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
32152 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
32153 MI.getOperand(3).getTargetFlags())
32155 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
32156 addDirectMem(MIB, X86::EAX);
32157 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
32159 MachineInstrBuilder MIB =
32160 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
32161 .addReg(TII->getGlobalBaseReg(F))
32164 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
32165 MI.getOperand(3).getTargetFlags())
32167 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
32168 addDirectMem(MIB, X86::EAX);
32169 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
32172 MI.eraseFromParent(); // The pseudo instruction is gone now.
32176 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
32178 case X86::INDIRECT_THUNK_CALL32:
32179 return X86::CALLpcrel32;
32180 case X86::INDIRECT_THUNK_CALL64:
32181 return X86::CALL64pcrel32;
32182 case X86::INDIRECT_THUNK_TCRETURN32:
32183 return X86::TCRETURNdi;
32184 case X86::INDIRECT_THUNK_TCRETURN64:
32185 return X86::TCRETURNdi64;
32187 llvm_unreachable("not indirect thunk opcode");
32190 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
32192 if (Subtarget.useRetpolineExternalThunk()) {
32193 // When using an external thunk for retpolines, we pick names that match the
32194 // names GCC happens to use as well. This helps simplify the implementation
32195 // of the thunks for kernels where they have no easy ability to create
32196 // aliases and are doing non-trivial configuration of the thunk's body. For
32197 // example, the Linux kernel will do boot-time hot patching of the thunk
32198 // bodies and cannot easily export aliases of these to loaded modules.
32200 // Note that at any point in the future, we may need to change the semantics
32201 // of how we implement retpolines and at that time will likely change the
32202 // name of the called thunk. Essentially, there is no hard guarantee that
32203 // LLVM will generate calls to specific thunks, we merely make a best-effort
32204 // attempt to help out kernels and other systems where duplicating the
32205 // thunks is costly.
32208 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32209 return "__x86_indirect_thunk_eax";
32211 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32212 return "__x86_indirect_thunk_ecx";
32214 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32215 return "__x86_indirect_thunk_edx";
32217 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32218 return "__x86_indirect_thunk_edi";
32220 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
32221 return "__x86_indirect_thunk_r11";
32223 llvm_unreachable("unexpected reg for external indirect thunk");
32226 if (Subtarget.useRetpolineIndirectCalls() ||
32227 Subtarget.useRetpolineIndirectBranches()) {
32228 // When targeting an internal COMDAT thunk use an LLVM-specific name.
32231 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32232 return "__llvm_retpoline_eax";
32234 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32235 return "__llvm_retpoline_ecx";
32237 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32238 return "__llvm_retpoline_edx";
32240 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32241 return "__llvm_retpoline_edi";
32243 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
32244 return "__llvm_retpoline_r11";
32246 llvm_unreachable("unexpected reg for retpoline");
32249 if (Subtarget.useLVIControlFlowIntegrity()) {
32250 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
32251 return "__llvm_lvi_thunk_r11";
32253 llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
32256 MachineBasicBlock *
32257 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
32258 MachineBasicBlock *BB) const {
32259 // Copy the virtual register into the R11 physical register and
32260 // call the retpoline thunk.
32261 DebugLoc DL = MI.getDebugLoc();
32262 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32263 Register CalleeVReg = MI.getOperand(0).getReg();
32264 unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
32266 // Find an available scratch register to hold the callee. On 64-bit, we can
32267 // just use R11, but we scan for uses anyway to ensure we don't generate
32268 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
32269 // already a register use operand to the call to hold the callee. If none
32270 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
32271 // register and ESI is the base pointer to realigned stack frames with VLAs.
32272 SmallVector<unsigned, 3> AvailableRegs;
32273 if (Subtarget.is64Bit())
32274 AvailableRegs.push_back(X86::R11);
32276 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
32278 // Zero out any registers that are already used.
32279 for (const auto &MO : MI.operands()) {
32280 if (MO.isReg() && MO.isUse())
32281 for (unsigned &Reg : AvailableRegs)
32282 if (Reg == MO.getReg())
32286 // Choose the first remaining non-zero available register.
32287 unsigned AvailableReg = 0;
32288 for (unsigned MaybeReg : AvailableRegs) {
32290 AvailableReg = MaybeReg;
32295 report_fatal_error("calling convention incompatible with retpoline, no "
32296 "available registers");
32298 const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
32300 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
32301 .addReg(CalleeVReg);
32302 MI.getOperand(0).ChangeToES(Symbol);
32303 MI.setDesc(TII->get(Opc));
32304 MachineInstrBuilder(*BB->getParent(), &MI)
32305 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
32309 /// SetJmp implies future control flow change upon calling the corresponding
32311 /// Instead of using the 'return' instruction, the long jump fixes the stack and
32312 /// performs an indirect branch. To do so it uses the registers that were stored
32313 /// in the jump buffer (when calling SetJmp).
32314 /// In case the shadow stack is enabled we need to fix it as well, because some
32315 /// return addresses will be skipped.
32316 /// The function will save the SSP for future fixing in the function
32317 /// emitLongJmpShadowStackFix.
32318 /// \sa emitLongJmpShadowStackFix
32319 /// \param [in] MI The temporary Machine Instruction for the builtin.
32320 /// \param [in] MBB The Machine Basic Block that will be modified.
32321 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
32322 MachineBasicBlock *MBB) const {
32323 DebugLoc DL = MI.getDebugLoc();
32324 MachineFunction *MF = MBB->getParent();
32325 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32326 MachineRegisterInfo &MRI = MF->getRegInfo();
32327 MachineInstrBuilder MIB;
32329 // Memory Reference.
32330 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32331 MI.memoperands_end());
32333 // Initialize a register with zero.
32334 MVT PVT = getPointerTy(MF->getDataLayout());
32335 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
32336 Register ZReg = MRI.createVirtualRegister(PtrRC);
32337 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
32338 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
32340 .addReg(ZReg, RegState::Undef)
32341 .addReg(ZReg, RegState::Undef);
32343 // Read the current SSP Register value to the zeroed register.
32344 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
32345 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
32346 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
32348 // Write the SSP register value to offset 3 in input memory buffer.
32349 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
32350 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
32351 const int64_t SSPOffset = 3 * PVT.getStoreSize();
32352 const unsigned MemOpndSlot = 1;
32353 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32354 if (i == X86::AddrDisp)
32355 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
32357 MIB.add(MI.getOperand(MemOpndSlot + i));
32359 MIB.addReg(SSPCopyReg);
32360 MIB.setMemRefs(MMOs);
32363 MachineBasicBlock *
32364 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
32365 MachineBasicBlock *MBB) const {
32366 DebugLoc DL = MI.getDebugLoc();
32367 MachineFunction *MF = MBB->getParent();
32368 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32369 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
32370 MachineRegisterInfo &MRI = MF->getRegInfo();
32372 const BasicBlock *BB = MBB->getBasicBlock();
32373 MachineFunction::iterator I = ++MBB->getIterator();
32375 // Memory Reference
32376 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32377 MI.memoperands_end());
32380 unsigned MemOpndSlot = 0;
32382 unsigned CurOp = 0;
32384 DstReg = MI.getOperand(CurOp++).getReg();
32385 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
32386 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
32388 Register mainDstReg = MRI.createVirtualRegister(RC);
32389 Register restoreDstReg = MRI.createVirtualRegister(RC);
32391 MemOpndSlot = CurOp;
32393 MVT PVT = getPointerTy(MF->getDataLayout());
32394 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
32395 "Invalid Pointer Size!");
32397 // For v = setjmp(buf), we generate
32400 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
32401 // SjLjSetup restoreMBB
32407 // v = phi(main, restore)
32410 // if base pointer being used, load it from frame
32413 MachineBasicBlock *thisMBB = MBB;
32414 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
32415 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
32416 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
32417 MF->insert(I, mainMBB);
32418 MF->insert(I, sinkMBB);
32419 MF->push_back(restoreMBB);
32420 restoreMBB->setHasAddressTaken();
32422 MachineInstrBuilder MIB;
32424 // Transfer the remainder of BB and its successor edges to sinkMBB.
32425 sinkMBB->splice(sinkMBB->begin(), MBB,
32426 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
32427 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
32430 unsigned PtrStoreOpc = 0;
32431 unsigned LabelReg = 0;
32432 const int64_t LabelOffset = 1 * PVT.getStoreSize();
32433 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
32434 !isPositionIndependent();
32436 // Prepare IP either in reg or imm.
32437 if (!UseImmLabel) {
32438 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
32439 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
32440 LabelReg = MRI.createVirtualRegister(PtrRC);
32441 if (Subtarget.is64Bit()) {
32442 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
32446 .addMBB(restoreMBB)
32449 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
32450 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
32451 .addReg(XII->getGlobalBaseReg(MF))
32454 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
32458 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
32460 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
32461 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32462 if (i == X86::AddrDisp)
32463 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
32465 MIB.add(MI.getOperand(MemOpndSlot + i));
32468 MIB.addReg(LabelReg);
32470 MIB.addMBB(restoreMBB);
32471 MIB.setMemRefs(MMOs);
32473 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
32474 emitSetJmpShadowStackFix(MI, thisMBB);
32478 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
32479 .addMBB(restoreMBB);
32481 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
32482 MIB.addRegMask(RegInfo->getNoPreservedMask());
32483 thisMBB->addSuccessor(mainMBB);
32484 thisMBB->addSuccessor(restoreMBB);
32488 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
32489 mainMBB->addSuccessor(sinkMBB);
32492 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
32493 TII->get(X86::PHI), DstReg)
32494 .addReg(mainDstReg).addMBB(mainMBB)
32495 .addReg(restoreDstReg).addMBB(restoreMBB);
32498 if (RegInfo->hasBasePointer(*MF)) {
32499 const bool Uses64BitFramePtr =
32500 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32501 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
32502 X86FI->setRestoreBasePointer(MF);
32503 Register FramePtr = RegInfo->getFrameRegister(*MF);
32504 Register BasePtr = RegInfo->getBaseRegister();
32505 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
32506 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
32507 FramePtr, true, X86FI->getRestoreBasePointerOffset())
32508 .setMIFlag(MachineInstr::FrameSetup);
32510 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
32511 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
32512 restoreMBB->addSuccessor(sinkMBB);
32514 MI.eraseFromParent();
32518 /// Fix the shadow stack using the previously saved SSP pointer.
32519 /// \sa emitSetJmpShadowStackFix
32520 /// \param [in] MI The temporary Machine Instruction for the builtin.
32521 /// \param [in] MBB The Machine Basic Block that will be modified.
32522 /// \return The sink MBB that will perform the future indirect branch.
32523 MachineBasicBlock *
32524 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
32525 MachineBasicBlock *MBB) const {
32526 DebugLoc DL = MI.getDebugLoc();
32527 MachineFunction *MF = MBB->getParent();
32528 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32529 MachineRegisterInfo &MRI = MF->getRegInfo();
32531 // Memory Reference
32532 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32533 MI.memoperands_end());
32535 MVT PVT = getPointerTy(MF->getDataLayout());
32536 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
32539 // xor vreg1, vreg1
32541 // test vreg1, vreg1
32542 // je sinkMBB # Jump if Shadow Stack is not supported
32544 // mov buf+24/12(%rip), vreg2
32545 // sub vreg1, vreg2
32546 // jbe sinkMBB # No need to fix the Shadow Stack
32549 // incssp vreg2 # fix the SSP according to the lower 8 bits
32552 // fixShadowLoopPrepareMBB:
32555 // fixShadowLoopMBB:
32558 // jne fixShadowLoopMBB # Iterate until you finish fixing
32559 // # the Shadow Stack
32562 MachineFunction::iterator I = ++MBB->getIterator();
32563 const BasicBlock *BB = MBB->getBasicBlock();
32565 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
32566 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
32567 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
32568 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
32569 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
32570 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
32571 MF->insert(I, checkSspMBB);
32572 MF->insert(I, fallMBB);
32573 MF->insert(I, fixShadowMBB);
32574 MF->insert(I, fixShadowLoopPrepareMBB);
32575 MF->insert(I, fixShadowLoopMBB);
32576 MF->insert(I, sinkMBB);
32578 // Transfer the remainder of BB and its successor edges to sinkMBB.
32579 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
32581 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
32583 MBB->addSuccessor(checkSspMBB);
32585 // Initialize a register with zero.
32586 Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
32587 BuildMI(checkSspMBB, DL, TII->get(X86::MOV32r0), ZReg);
32589 if (PVT == MVT::i64) {
32590 Register TmpZReg = MRI.createVirtualRegister(PtrRC);
32591 BuildMI(checkSspMBB, DL, TII->get(X86::SUBREG_TO_REG), TmpZReg)
32594 .addImm(X86::sub_32bit);
32598 // Read the current SSP Register value to the zeroed register.
32599 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
32600 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
32601 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
32603 // Check whether the result of the SSP register is zero and jump directly
32605 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
32606 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
32607 .addReg(SSPCopyReg)
32608 .addReg(SSPCopyReg);
32609 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
32610 checkSspMBB->addSuccessor(sinkMBB);
32611 checkSspMBB->addSuccessor(fallMBB);
32613 // Reload the previously saved SSP register value.
32614 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
32615 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
32616 const int64_t SPPOffset = 3 * PVT.getStoreSize();
32617 MachineInstrBuilder MIB =
32618 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
32619 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32620 const MachineOperand &MO = MI.getOperand(i);
32621 if (i == X86::AddrDisp)
32622 MIB.addDisp(MO, SPPOffset);
32623 else if (MO.isReg()) // Don't add the whole operand, we don't want to
32624 // preserve kill flags.
32625 MIB.addReg(MO.getReg());
32629 MIB.setMemRefs(MMOs);
32631 // Subtract the current SSP from the previous SSP.
32632 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
32633 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
32634 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
32635 .addReg(PrevSSPReg)
32636 .addReg(SSPCopyReg);
32638 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
32639 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
32640 fallMBB->addSuccessor(sinkMBB);
32641 fallMBB->addSuccessor(fixShadowMBB);
32643 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
32644 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
32645 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
32646 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
32647 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
32651 // Increase SSP when looking only on the lower 8 bits of the delta.
32652 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
32653 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
32655 // Reset the lower 8 bits.
32656 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
32657 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
32658 .addReg(SspFirstShrReg)
32661 // Jump if the result of the shift is zero.
32662 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
32663 fixShadowMBB->addSuccessor(sinkMBB);
32664 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
32666 // Do a single shift left.
32667 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
32668 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
32669 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
32670 .addReg(SspSecondShrReg);
32672 // Save the value 128 to a register (will be used next with incssp).
32673 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
32674 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
32675 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
32677 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
32679 // Since incssp only looks at the lower 8 bits, we might need to do several
32680 // iterations of incssp until we finish fixing the shadow stack.
32681 Register DecReg = MRI.createVirtualRegister(PtrRC);
32682 Register CounterReg = MRI.createVirtualRegister(PtrRC);
32683 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
32684 .addReg(SspAfterShlReg)
32685 .addMBB(fixShadowLoopPrepareMBB)
32687 .addMBB(fixShadowLoopMBB);
32689 // Every iteration we increase the SSP by 128.
32690 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
32692 // Every iteration we decrement the counter by 1.
32693 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
32694 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
32696 // Jump if the counter is not zero yet.
32697 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
32698 fixShadowLoopMBB->addSuccessor(sinkMBB);
32699 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
32704 MachineBasicBlock *
32705 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
32706 MachineBasicBlock *MBB) const {
32707 DebugLoc DL = MI.getDebugLoc();
32708 MachineFunction *MF = MBB->getParent();
32709 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32710 MachineRegisterInfo &MRI = MF->getRegInfo();
32712 // Memory Reference
32713 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32714 MI.memoperands_end());
32716 MVT PVT = getPointerTy(MF->getDataLayout());
32717 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
32718 "Invalid Pointer Size!");
32720 const TargetRegisterClass *RC =
32721 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
32722 Register Tmp = MRI.createVirtualRegister(RC);
32723 // Since FP is only updated here but NOT referenced, it's treated as GPR.
32724 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
32725 Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
32726 Register SP = RegInfo->getStackRegister();
32728 MachineInstrBuilder MIB;
32730 const int64_t LabelOffset = 1 * PVT.getStoreSize();
32731 const int64_t SPOffset = 2 * PVT.getStoreSize();
32733 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
32734 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
32736 MachineBasicBlock *thisMBB = MBB;
32738 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
32739 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
32740 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
32744 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
32745 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32746 const MachineOperand &MO = MI.getOperand(i);
32747 if (MO.isReg()) // Don't add the whole operand, we don't want to
32748 // preserve kill flags.
32749 MIB.addReg(MO.getReg());
32753 MIB.setMemRefs(MMOs);
32756 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
32757 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32758 const MachineOperand &MO = MI.getOperand(i);
32759 if (i == X86::AddrDisp)
32760 MIB.addDisp(MO, LabelOffset);
32761 else if (MO.isReg()) // Don't add the whole operand, we don't want to
32762 // preserve kill flags.
32763 MIB.addReg(MO.getReg());
32767 MIB.setMemRefs(MMOs);
32770 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
32771 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32772 if (i == X86::AddrDisp)
32773 MIB.addDisp(MI.getOperand(i), SPOffset);
32775 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
32776 // the last instruction of the expansion.
32778 MIB.setMemRefs(MMOs);
32781 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
32783 MI.eraseFromParent();
32787 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
32788 MachineBasicBlock *MBB,
32789 MachineBasicBlock *DispatchBB,
32791 DebugLoc DL = MI.getDebugLoc();
32792 MachineFunction *MF = MBB->getParent();
32793 MachineRegisterInfo *MRI = &MF->getRegInfo();
32794 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32796 MVT PVT = getPointerTy(MF->getDataLayout());
32797 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
32802 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
32803 !isPositionIndependent();
32806 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
32808 const TargetRegisterClass *TRC =
32809 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
32810 VR = MRI->createVirtualRegister(TRC);
32811 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
32813 if (Subtarget.is64Bit())
32814 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
32818 .addMBB(DispatchBB)
32821 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
32822 .addReg(0) /* TII->getGlobalBaseReg(MF) */
32825 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
32829 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
32830 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
32832 MIB.addMBB(DispatchBB);
32837 MachineBasicBlock *
32838 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
32839 MachineBasicBlock *BB) const {
32840 DebugLoc DL = MI.getDebugLoc();
32841 MachineFunction *MF = BB->getParent();
32842 MachineRegisterInfo *MRI = &MF->getRegInfo();
32843 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32844 int FI = MF->getFrameInfo().getFunctionContextIndex();
32846 // Get a mapping of the call site numbers to all of the landing pads they're
32847 // associated with.
32848 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
32849 unsigned MaxCSNum = 0;
32850 for (auto &MBB : *MF) {
32851 if (!MBB.isEHPad())
32854 MCSymbol *Sym = nullptr;
32855 for (const auto &MI : MBB) {
32856 if (MI.isDebugInstr())
32859 assert(MI.isEHLabel() && "expected EH_LABEL");
32860 Sym = MI.getOperand(0).getMCSymbol();
32864 if (!MF->hasCallSiteLandingPad(Sym))
32867 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
32868 CallSiteNumToLPad[CSI].push_back(&MBB);
32869 MaxCSNum = std::max(MaxCSNum, CSI);
32873 // Get an ordered list of the machine basic blocks for the jump table.
32874 std::vector<MachineBasicBlock *> LPadList;
32875 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
32876 LPadList.reserve(CallSiteNumToLPad.size());
32878 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
32879 for (auto &LP : CallSiteNumToLPad[CSI]) {
32880 LPadList.push_back(LP);
32881 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
32885 assert(!LPadList.empty() &&
32886 "No landing pad destinations for the dispatch jump table!");
32888 // Create the MBBs for the dispatch code.
32890 // Shove the dispatch's address into the return slot in the function context.
32891 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
32892 DispatchBB->setIsEHPad(true);
32894 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
32895 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
32896 DispatchBB->addSuccessor(TrapBB);
32898 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
32899 DispatchBB->addSuccessor(DispContBB);
32902 MF->push_back(DispatchBB);
32903 MF->push_back(DispContBB);
32904 MF->push_back(TrapBB);
32906 // Insert code into the entry block that creates and registers the function
32908 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
32910 // Create the jump table and associated information
32911 unsigned JTE = getJumpTableEncoding();
32912 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
32913 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
32915 const X86RegisterInfo &RI = TII->getRegisterInfo();
32916 // Add a register mask with no preserved registers. This results in all
32917 // registers being marked as clobbered.
32918 if (RI.hasBasePointer(*MF)) {
32919 const bool FPIs64Bit =
32920 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32921 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
32922 MFI->setRestoreBasePointer(MF);
32924 Register FP = RI.getFrameRegister(*MF);
32925 Register BP = RI.getBaseRegister();
32926 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
32927 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
32928 MFI->getRestoreBasePointerOffset())
32929 .addRegMask(RI.getNoPreservedMask());
32931 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
32932 .addRegMask(RI.getNoPreservedMask());
32935 // IReg is used as an index in a memory operand and therefore can't be SP
32936 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
32937 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
32938 Subtarget.is64Bit() ? 8 : 4);
32939 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
32941 .addImm(LPadList.size());
32942 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
32944 if (Subtarget.is64Bit()) {
32945 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32946 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
32948 // leaq .LJTI0_0(%rip), BReg
32949 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
32953 .addJumpTableIndex(MJTI)
32955 // movzx IReg64, IReg
32956 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
32959 .addImm(X86::sub_32bit);
32962 case MachineJumpTableInfo::EK_BlockAddress:
32963 // jmpq *(BReg,IReg64,8)
32964 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
32971 case MachineJumpTableInfo::EK_LabelDifference32: {
32972 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
32973 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
32974 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32976 // movl (BReg,IReg64,4), OReg
32977 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
32983 // movsx OReg64, OReg
32984 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
32985 // addq BReg, OReg64, TReg
32986 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
32990 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
32994 llvm_unreachable("Unexpected jump table encoding");
32997 // jmpl *.LJTI0_0(,IReg,4)
32998 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
33002 .addJumpTableIndex(MJTI)
33006 // Add the jump table entries as successors to the MBB.
33007 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
33008 for (auto &LP : LPadList)
33009 if (SeenMBBs.insert(LP).second)
33010 DispContBB->addSuccessor(LP);
33012 // N.B. the order the invoke BBs are processed in doesn't matter here.
33013 SmallVector<MachineBasicBlock *, 64> MBBLPads;
33014 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
33015 for (MachineBasicBlock *MBB : InvokeBBs) {
33016 // Remove the landing pad successor from the invoke block and replace it
33017 // with the new dispatch block.
33018 // Keep a copy of Successors since it's modified inside the loop.
33019 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
33021 // FIXME: Avoid quadratic complexity.
33022 for (auto MBBS : Successors) {
33023 if (MBBS->isEHPad()) {
33024 MBB->removeSuccessor(MBBS);
33025 MBBLPads.push_back(MBBS);
33029 MBB->addSuccessor(DispatchBB);
33031 // Find the invoke call and mark all of the callee-saved registers as
33032 // 'implicit defined' so that they're spilled. This prevents code from
33033 // moving instructions to before the EH block, where they will never be
33035 for (auto &II : reverse(*MBB)) {
33039 DenseMap<unsigned, bool> DefRegs;
33040 for (auto &MOp : II.operands())
33042 DefRegs[MOp.getReg()] = true;
33044 MachineInstrBuilder MIB(*MF, &II);
33045 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
33046 unsigned Reg = SavedRegs[RegIdx];
33048 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
33055 // Mark all former landing pads as non-landing pads. The dispatch is the only
33056 // landing pad now.
33057 for (auto &LP : MBBLPads)
33058 LP->setIsEHPad(false);
33060 // The instruction is gone now.
33061 MI.eraseFromParent();
33065 MachineBasicBlock *
33066 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
33067 MachineBasicBlock *BB) const {
33068 MachineFunction *MF = BB->getParent();
33069 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
33070 DebugLoc DL = MI.getDebugLoc();
33072 auto TMMImmToTMMReg = [](unsigned Imm) {
33073 assert (Imm < 8 && "Illegal tmm index");
33074 return X86::TMM0 + Imm;
33076 switch (MI.getOpcode()) {
33077 default: llvm_unreachable("Unexpected instr type to insert");
33078 case X86::TLS_addr32:
33079 case X86::TLS_addr64:
33080 case X86::TLS_base_addr32:
33081 case X86::TLS_base_addr64:
33082 return EmitLoweredTLSAddr(MI, BB);
33083 case X86::INDIRECT_THUNK_CALL32:
33084 case X86::INDIRECT_THUNK_CALL64:
33085 case X86::INDIRECT_THUNK_TCRETURN32:
33086 case X86::INDIRECT_THUNK_TCRETURN64:
33087 return EmitLoweredIndirectThunk(MI, BB);
33088 case X86::CATCHRET:
33089 return EmitLoweredCatchRet(MI, BB);
33090 case X86::SEG_ALLOCA_32:
33091 case X86::SEG_ALLOCA_64:
33092 return EmitLoweredSegAlloca(MI, BB);
33093 case X86::PROBED_ALLOCA_32:
33094 case X86::PROBED_ALLOCA_64:
33095 return EmitLoweredProbedAlloca(MI, BB);
33096 case X86::TLSCall_32:
33097 case X86::TLSCall_64:
33098 return EmitLoweredTLSCall(MI, BB);
33099 case X86::CMOV_FR32:
33100 case X86::CMOV_FR32X:
33101 case X86::CMOV_FR64:
33102 case X86::CMOV_FR64X:
33103 case X86::CMOV_GR8:
33104 case X86::CMOV_GR16:
33105 case X86::CMOV_GR32:
33106 case X86::CMOV_RFP32:
33107 case X86::CMOV_RFP64:
33108 case X86::CMOV_RFP80:
33109 case X86::CMOV_VR64:
33110 case X86::CMOV_VR128:
33111 case X86::CMOV_VR128X:
33112 case X86::CMOV_VR256:
33113 case X86::CMOV_VR256X:
33114 case X86::CMOV_VR512:
33115 case X86::CMOV_VK1:
33116 case X86::CMOV_VK2:
33117 case X86::CMOV_VK4:
33118 case X86::CMOV_VK8:
33119 case X86::CMOV_VK16:
33120 case X86::CMOV_VK32:
33121 case X86::CMOV_VK64:
33122 return EmitLoweredSelect(MI, BB);
33124 case X86::RDFLAGS32:
33125 case X86::RDFLAGS64: {
33127 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
33128 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
33129 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
33130 // Permit reads of the EFLAGS and DF registers without them being defined.
33131 // This intrinsic exists to read external processor state in flags, such as
33132 // the trap flag, interrupt flag, and direction flag, none of which are
33133 // modeled by the backend.
33134 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
33135 "Unexpected register in operand!");
33136 Push->getOperand(2).setIsUndef();
33137 assert(Push->getOperand(3).getReg() == X86::DF &&
33138 "Unexpected register in operand!");
33139 Push->getOperand(3).setIsUndef();
33140 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
33142 MI.eraseFromParent(); // The pseudo is gone now.
33146 case X86::WRFLAGS32:
33147 case X86::WRFLAGS64: {
33149 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
33151 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
33152 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
33153 BuildMI(*BB, MI, DL, TII->get(PopF));
33155 MI.eraseFromParent(); // The pseudo is gone now.
33159 case X86::FP32_TO_INT16_IN_MEM:
33160 case X86::FP32_TO_INT32_IN_MEM:
33161 case X86::FP32_TO_INT64_IN_MEM:
33162 case X86::FP64_TO_INT16_IN_MEM:
33163 case X86::FP64_TO_INT32_IN_MEM:
33164 case X86::FP64_TO_INT64_IN_MEM:
33165 case X86::FP80_TO_INT16_IN_MEM:
33166 case X86::FP80_TO_INT32_IN_MEM:
33167 case X86::FP80_TO_INT64_IN_MEM: {
33168 // Change the floating point control register to use "round towards zero"
33169 // mode when truncating to an integer value.
33170 int OrigCWFrameIdx =
33171 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
33172 addFrameReference(BuildMI(*BB, MI, DL,
33173 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
33175 // Load the old value of the control word...
33176 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
33177 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
33180 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
33181 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
33182 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
33183 .addReg(OldCW, RegState::Kill).addImm(0xC00);
33185 // Extract to 16 bits.
33187 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
33188 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
33189 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
33191 // Prepare memory for FLDCW.
33192 int NewCWFrameIdx =
33193 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
33194 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
33196 .addReg(NewCW16, RegState::Kill);
33198 // Reload the modified control word now...
33199 addFrameReference(BuildMI(*BB, MI, DL,
33200 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
33202 // Get the X86 opcode to use.
33204 switch (MI.getOpcode()) {
33205 default: llvm_unreachable("illegal opcode!");
33206 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
33207 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
33208 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
33209 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
33210 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
33211 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
33212 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
33213 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
33214 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
33217 X86AddressMode AM = getAddressFromInstr(&MI, 0);
33218 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
33219 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
33221 // Reload the original control word now.
33222 addFrameReference(BuildMI(*BB, MI, DL,
33223 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
33225 MI.eraseFromParent(); // The pseudo instruction is gone now.
33231 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
33233 case X86::VASTART_SAVE_XMM_REGS:
33234 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
33236 case X86::VAARG_64:
33237 return EmitVAARG64WithCustomInserter(MI, BB);
33239 case X86::EH_SjLj_SetJmp32:
33240 case X86::EH_SjLj_SetJmp64:
33241 return emitEHSjLjSetJmp(MI, BB);
33243 case X86::EH_SjLj_LongJmp32:
33244 case X86::EH_SjLj_LongJmp64:
33245 return emitEHSjLjLongJmp(MI, BB);
33247 case X86::Int_eh_sjlj_setup_dispatch:
33248 return EmitSjLjDispatchBlock(MI, BB);
33250 case TargetOpcode::STATEPOINT:
33251 // As an implementation detail, STATEPOINT shares the STACKMAP format at
33252 // this point in the process. We diverge later.
33253 return emitPatchPoint(MI, BB);
33255 case TargetOpcode::STACKMAP:
33256 case TargetOpcode::PATCHPOINT:
33257 return emitPatchPoint(MI, BB);
33259 case TargetOpcode::PATCHABLE_EVENT_CALL:
33260 return emitXRayCustomEvent(MI, BB);
33262 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
33263 return emitXRayTypedEvent(MI, BB);
33265 case X86::LCMPXCHG8B: {
33266 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
33267 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
33268 // requires a memory operand. If it happens that current architecture is
33269 // i686 and for current function we need a base pointer
33270 // - which is ESI for i686 - register allocator would not be able to
33271 // allocate registers for an address in form of X(%reg, %reg, Y)
33272 // - there never would be enough unreserved registers during regalloc
33273 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
33274 // We are giving a hand to register allocator by precomputing the address in
33275 // a new vreg using LEA.
33277 // If it is not i686 or there is no base pointer - nothing to do here.
33278 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
33281 // Even though this code does not necessarily needs the base pointer to
33282 // be ESI, we check for that. The reason: if this assert fails, there are
33283 // some changes happened in the compiler base pointer handling, which most
33284 // probably have to be addressed somehow here.
33285 assert(TRI->getBaseRegister() == X86::ESI &&
33286 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
33287 "base pointer in mind");
33289 MachineRegisterInfo &MRI = MF->getRegInfo();
33290 MVT SPTy = getPointerTy(MF->getDataLayout());
33291 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
33292 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
33294 X86AddressMode AM = getAddressFromInstr(&MI, 0);
33295 // Regalloc does not need any help when the memory operand of CMPXCHG8B
33296 // does not use index register.
33297 if (AM.IndexReg == X86::NoRegister)
33300 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
33301 // four operand definitions that are E[ABCD] registers. We skip them and
33302 // then insert the LEA.
33303 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
33304 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
33305 RMBBI->definesRegister(X86::EBX) ||
33306 RMBBI->definesRegister(X86::ECX) ||
33307 RMBBI->definesRegister(X86::EDX))) {
33310 MachineBasicBlock::iterator MBBI(RMBBI);
33312 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
33314 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
33318 case X86::LCMPXCHG16B:
33320 case X86::LCMPXCHG8B_SAVE_EBX:
33321 case X86::LCMPXCHG16B_SAVE_RBX: {
33323 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
33324 if (!BB->isLiveIn(BasePtr))
33325 BB->addLiveIn(BasePtr);
33328 case TargetOpcode::PREALLOCATED_SETUP: {
33329 assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
33330 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
33331 MFI->setHasPreallocatedCall(true);
33332 int64_t PreallocatedId = MI.getOperand(0).getImm();
33333 size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
33334 assert(StackAdjustment != 0 && "0 stack adjustment");
33335 LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
33336 << StackAdjustment << "\n");
33337 BuildMI(*BB, MI, DL, TII->get(X86::SUB32ri), X86::ESP)
33339 .addImm(StackAdjustment);
33340 MI.eraseFromParent();
33343 case TargetOpcode::PREALLOCATED_ARG: {
33344 assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
33345 int64_t PreallocatedId = MI.getOperand(1).getImm();
33346 int64_t ArgIdx = MI.getOperand(2).getImm();
33347 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
33348 size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
33349 LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
33350 << ", arg offset " << ArgOffset << "\n");
33351 // stack pointer + offset
33353 BuildMI(*BB, MI, DL, TII->get(X86::LEA32r), MI.getOperand(0).getReg()),
33354 X86::ESP, false, ArgOffset);
33355 MI.eraseFromParent();
33358 case X86::PTDPBSSD:
33359 case X86::PTDPBSUD:
33360 case X86::PTDPBUSD:
33361 case X86::PTDPBUUD:
33362 case X86::PTDPBF16PS: {
33363 const DebugLoc &DL = MI.getDebugLoc();
33365 switch (MI.getOpcode()) {
33366 case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
33367 case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
33368 case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
33369 case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
33370 case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
33373 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
33374 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
33375 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
33376 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
33377 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
33379 MI.eraseFromParent(); // The pseudo is gone now.
33382 case X86::PTILEZERO: {
33383 const DebugLoc &DL = MI.getDebugLoc();
33384 unsigned Imm = MI.getOperand(0).getImm();
33385 BuildMI(*BB, MI, DL, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
33386 MI.eraseFromParent(); // The pseudo is gone now.
33389 case X86::PTILELOADD:
33390 case X86::PTILELOADDT1:
33391 case X86::PTILESTORED: {
33392 const DebugLoc &DL = MI.getDebugLoc();
33394 switch (MI.getOpcode()) {
33395 case X86::PTILELOADD: Opc = X86::TILELOADD; break;
33396 case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
33397 case X86::PTILESTORED: Opc = X86::TILESTORED; break;
33400 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
33401 unsigned CurOp = 0;
33402 if (Opc != X86::TILESTORED)
33403 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
33406 MIB.add(MI.getOperand(CurOp++)); // base
33407 MIB.add(MI.getOperand(CurOp++)); // scale
33408 MIB.add(MI.getOperand(CurOp++)); // index -- stride
33409 MIB.add(MI.getOperand(CurOp++)); // displacement
33410 MIB.add(MI.getOperand(CurOp++)); // segment
33412 if (Opc == X86::TILESTORED)
33413 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
33416 MI.eraseFromParent(); // The pseudo is gone now.
33422 //===----------------------------------------------------------------------===//
33423 // X86 Optimization Hooks
33424 //===----------------------------------------------------------------------===//
33427 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
33428 const APInt &DemandedBits,
33429 const APInt &DemandedElts,
33430 TargetLoweringOpt &TLO) const {
33431 EVT VT = Op.getValueType();
33432 unsigned Opcode = Op.getOpcode();
33433 unsigned EltSize = VT.getScalarSizeInBits();
33435 if (VT.isVector()) {
33436 // If the constant is only all signbits in the active bits, then we should
33437 // extend it to the entire constant to allow it act as a boolean constant
33439 auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
33440 if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
33442 for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
33443 if (!DemandedElts[i] || V.getOperand(i).isUndef())
33445 const APInt &Val = V.getConstantOperandAPInt(i);
33446 if (Val.getBitWidth() > Val.getNumSignBits() &&
33447 Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
33452 // For vectors - if we have a constant, then try to sign extend.
33453 // TODO: Handle AND/ANDN cases.
33454 unsigned ActiveBits = DemandedBits.getActiveBits();
33455 if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
33456 (Opcode == ISD::OR || Opcode == ISD::XOR) &&
33457 NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
33458 EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
33459 EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
33460 VT.getVectorNumElements());
33462 TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
33463 Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
33465 TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
33466 return TLO.CombineTo(Op, NewOp);
33471 // Only optimize Ands to prevent shrinking a constant that could be
33472 // matched by movzx.
33473 if (Opcode != ISD::AND)
33476 // Make sure the RHS really is a constant.
33477 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
33481 const APInt &Mask = C->getAPIntValue();
33483 // Clear all non-demanded bits initially.
33484 APInt ShrunkMask = Mask & DemandedBits;
33486 // Find the width of the shrunk mask.
33487 unsigned Width = ShrunkMask.getActiveBits();
33489 // If the mask is all 0s there's nothing to do here.
33493 // Find the next power of 2 width, rounding up to a byte.
33494 Width = PowerOf2Ceil(std::max(Width, 8U));
33495 // Truncate the width to size to handle illegal types.
33496 Width = std::min(Width, EltSize);
33498 // Calculate a possible zero extend mask for this constant.
33499 APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
33501 // If we aren't changing the mask, just return true to keep it and prevent
33502 // the caller from optimizing.
33503 if (ZeroExtendMask == Mask)
33506 // Make sure the new mask can be represented by a combination of mask bits
33507 // and non-demanded bits.
33508 if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
33511 // Replace the constant with the zero extend mask.
33513 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
33514 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
33515 return TLO.CombineTo(Op, NewOp);
33518 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
33520 const APInt &DemandedElts,
33521 const SelectionDAG &DAG,
33522 unsigned Depth) const {
33523 unsigned BitWidth = Known.getBitWidth();
33524 unsigned NumElts = DemandedElts.getBitWidth();
33525 unsigned Opc = Op.getOpcode();
33526 EVT VT = Op.getValueType();
33527 assert((Opc >= ISD::BUILTIN_OP_END ||
33528 Opc == ISD::INTRINSIC_WO_CHAIN ||
33529 Opc == ISD::INTRINSIC_W_CHAIN ||
33530 Opc == ISD::INTRINSIC_VOID) &&
33531 "Should use MaskedValueIsZero if you don't know whether Op"
33532 " is a target node!");
33537 case X86ISD::SETCC:
33538 Known.Zero.setBitsFrom(1);
33540 case X86ISD::MOVMSK: {
33541 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
33542 Known.Zero.setBitsFrom(NumLoBits);
33545 case X86ISD::PEXTRB:
33546 case X86ISD::PEXTRW: {
33547 SDValue Src = Op.getOperand(0);
33548 EVT SrcVT = Src.getValueType();
33549 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
33550 Op.getConstantOperandVal(1));
33551 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
33552 Known = Known.anyextOrTrunc(BitWidth);
33553 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
33556 case X86ISD::VSRAI:
33557 case X86ISD::VSHLI:
33558 case X86ISD::VSRLI: {
33559 unsigned ShAmt = Op.getConstantOperandVal(1);
33560 if (ShAmt >= VT.getScalarSizeInBits()) {
33561 Known.setAllZero();
33565 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
33566 if (Opc == X86ISD::VSHLI) {
33567 Known.Zero <<= ShAmt;
33568 Known.One <<= ShAmt;
33569 // Low bits are known zero.
33570 Known.Zero.setLowBits(ShAmt);
33571 } else if (Opc == X86ISD::VSRLI) {
33572 Known.Zero.lshrInPlace(ShAmt);
33573 Known.One.lshrInPlace(ShAmt);
33574 // High bits are known zero.
33575 Known.Zero.setHighBits(ShAmt);
33577 Known.Zero.ashrInPlace(ShAmt);
33578 Known.One.ashrInPlace(ShAmt);
33582 case X86ISD::PACKUS: {
33583 // PACKUS is just a truncation if the upper half is zero.
33584 APInt DemandedLHS, DemandedRHS;
33585 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
33587 Known.One = APInt::getAllOnesValue(BitWidth * 2);
33588 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
33591 if (!!DemandedLHS) {
33592 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
33593 Known.One &= Known2.One;
33594 Known.Zero &= Known2.Zero;
33596 if (!!DemandedRHS) {
33597 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
33598 Known.One &= Known2.One;
33599 Known.Zero &= Known2.Zero;
33602 if (Known.countMinLeadingZeros() < BitWidth)
33604 Known = Known.trunc(BitWidth);
33607 case X86ISD::ANDNP: {
33609 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
33610 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
33612 // ANDNP = (~X & Y);
33613 Known.One &= Known2.Zero;
33614 Known.Zero |= Known2.One;
33617 case X86ISD::FOR: {
33619 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
33620 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
33625 case X86ISD::PSADBW: {
33626 assert(VT.getScalarType() == MVT::i64 &&
33627 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
33628 "Unexpected PSADBW types");
33630 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
33631 Known.Zero.setBitsFrom(16);
33634 case X86ISD::CMOV: {
33635 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
33636 // If we don't know any bits, early out.
33637 if (Known.isUnknown())
33639 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
33641 // Only known if known in both the LHS and RHS.
33642 Known.One &= Known2.One;
33643 Known.Zero &= Known2.Zero;
33646 case X86ISD::BEXTR: {
33647 SDValue Op0 = Op.getOperand(0);
33648 SDValue Op1 = Op.getOperand(1);
33650 if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
33651 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
33652 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
33654 // If the length is 0, the result is 0.
33656 Known.setAllZero();
33660 if ((Shift + Length) <= BitWidth) {
33661 Known = DAG.computeKnownBits(Op0, Depth + 1);
33662 Known = Known.extractBits(Length, Shift);
33663 Known = Known.zextOrTrunc(BitWidth);
33668 case X86ISD::CVTSI2P:
33669 case X86ISD::CVTUI2P:
33670 case X86ISD::CVTP2SI:
33671 case X86ISD::CVTP2UI:
33672 case X86ISD::MCVTP2SI:
33673 case X86ISD::MCVTP2UI:
33674 case X86ISD::CVTTP2SI:
33675 case X86ISD::CVTTP2UI:
33676 case X86ISD::MCVTTP2SI:
33677 case X86ISD::MCVTTP2UI:
33678 case X86ISD::MCVTSI2P:
33679 case X86ISD::MCVTUI2P:
33680 case X86ISD::VFPROUND:
33681 case X86ISD::VMFPROUND:
33682 case X86ISD::CVTPS2PH:
33683 case X86ISD::MCVTPS2PH: {
33684 // Conversions - upper elements are known zero.
33685 EVT SrcVT = Op.getOperand(0).getValueType();
33686 if (SrcVT.isVector()) {
33687 unsigned NumSrcElts = SrcVT.getVectorNumElements();
33688 if (NumElts > NumSrcElts &&
33689 DemandedElts.countTrailingZeros() >= NumSrcElts)
33690 Known.setAllZero();
33694 case X86ISD::STRICT_CVTTP2SI:
33695 case X86ISD::STRICT_CVTTP2UI:
33696 case X86ISD::STRICT_CVTSI2P:
33697 case X86ISD::STRICT_CVTUI2P:
33698 case X86ISD::STRICT_VFPROUND:
33699 case X86ISD::STRICT_CVTPS2PH: {
33700 // Strict Conversions - upper elements are known zero.
33701 EVT SrcVT = Op.getOperand(1).getValueType();
33702 if (SrcVT.isVector()) {
33703 unsigned NumSrcElts = SrcVT.getVectorNumElements();
33704 if (NumElts > NumSrcElts &&
33705 DemandedElts.countTrailingZeros() >= NumSrcElts)
33706 Known.setAllZero();
33710 case X86ISD::MOVQ2DQ: {
33711 // Move from MMX to XMM. Upper half of XMM should be 0.
33712 if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
33713 Known.setAllZero();
33718 // Handle target shuffles.
33719 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
33720 if (isTargetShuffle(Opc)) {
33722 SmallVector<int, 64> Mask;
33723 SmallVector<SDValue, 2> Ops;
33724 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
33726 unsigned NumOps = Ops.size();
33727 unsigned NumElts = VT.getVectorNumElements();
33728 if (Mask.size() == NumElts) {
33729 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
33730 Known.Zero.setAllBits(); Known.One.setAllBits();
33731 for (unsigned i = 0; i != NumElts; ++i) {
33732 if (!DemandedElts[i])
33735 if (M == SM_SentinelUndef) {
33736 // For UNDEF elements, we don't know anything about the common state
33737 // of the shuffle result.
33740 } else if (M == SM_SentinelZero) {
33741 Known.One.clearAllBits();
33744 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
33745 "Shuffle index out of range");
33747 unsigned OpIdx = (unsigned)M / NumElts;
33748 unsigned EltIdx = (unsigned)M % NumElts;
33749 if (Ops[OpIdx].getValueType() != VT) {
33750 // TODO - handle target shuffle ops with different value types.
33754 DemandedOps[OpIdx].setBit(EltIdx);
33756 // Known bits are the values that are shared by every demanded element.
33757 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
33758 if (!DemandedOps[i])
33761 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
33762 Known.One &= Known2.One;
33763 Known.Zero &= Known2.Zero;
33770 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
33771 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
33772 unsigned Depth) const {
33773 EVT VT = Op.getValueType();
33774 unsigned VTBits = VT.getScalarSizeInBits();
33775 unsigned Opcode = Op.getOpcode();
33777 case X86ISD::SETCC_CARRY:
33778 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
33781 case X86ISD::VTRUNC: {
33782 SDValue Src = Op.getOperand(0);
33783 MVT SrcVT = Src.getSimpleValueType();
33784 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
33785 assert(VTBits < NumSrcBits && "Illegal truncation input type");
33786 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
33787 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
33788 if (Tmp > (NumSrcBits - VTBits))
33789 return Tmp - (NumSrcBits - VTBits);
33793 case X86ISD::PACKSS: {
33794 // PACKSS is just a truncation if the sign bits extend to the packed size.
33795 APInt DemandedLHS, DemandedRHS;
33796 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
33799 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
33800 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
33802 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
33804 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
33805 unsigned Tmp = std::min(Tmp0, Tmp1);
33806 if (Tmp > (SrcBits - VTBits))
33807 return Tmp - (SrcBits - VTBits);
33811 case X86ISD::VSHLI: {
33812 SDValue Src = Op.getOperand(0);
33813 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
33814 if (ShiftVal.uge(VTBits))
33815 return VTBits; // Shifted all bits out --> zero.
33816 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
33817 if (ShiftVal.uge(Tmp))
33818 return 1; // Shifted all sign bits out --> unknown.
33819 return Tmp - ShiftVal.getZExtValue();
33822 case X86ISD::VSRAI: {
33823 SDValue Src = Op.getOperand(0);
33824 APInt ShiftVal = Op.getConstantOperandAPInt(1);
33825 if (ShiftVal.uge(VTBits - 1))
33826 return VTBits; // Sign splat.
33827 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
33829 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
33832 case X86ISD::PCMPGT:
33833 case X86ISD::PCMPEQ:
33835 case X86ISD::VPCOM:
33836 case X86ISD::VPCOMU:
33837 // Vector compares return zero/all-bits result values.
33840 case X86ISD::ANDNP: {
33842 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
33843 if (Tmp0 == 1) return 1; // Early out.
33845 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
33846 return std::min(Tmp0, Tmp1);
33849 case X86ISD::CMOV: {
33850 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
33851 if (Tmp0 == 1) return 1; // Early out.
33852 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
33853 return std::min(Tmp0, Tmp1);
33857 // Handle target shuffles.
33858 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
33859 if (isTargetShuffle(Opcode)) {
33861 SmallVector<int, 64> Mask;
33862 SmallVector<SDValue, 2> Ops;
33863 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
33865 unsigned NumOps = Ops.size();
33866 unsigned NumElts = VT.getVectorNumElements();
33867 if (Mask.size() == NumElts) {
33868 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
33869 for (unsigned i = 0; i != NumElts; ++i) {
33870 if (!DemandedElts[i])
33873 if (M == SM_SentinelUndef) {
33874 // For UNDEF elements, we don't know anything about the common state
33875 // of the shuffle result.
33877 } else if (M == SM_SentinelZero) {
33878 // Zero = all sign bits.
33881 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
33882 "Shuffle index out of range");
33884 unsigned OpIdx = (unsigned)M / NumElts;
33885 unsigned EltIdx = (unsigned)M % NumElts;
33886 if (Ops[OpIdx].getValueType() != VT) {
33887 // TODO - handle target shuffle ops with different value types.
33890 DemandedOps[OpIdx].setBit(EltIdx);
33892 unsigned Tmp0 = VTBits;
33893 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
33894 if (!DemandedOps[i])
33897 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
33898 Tmp0 = std::min(Tmp0, Tmp1);
33909 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
33910 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
33911 return N->getOperand(0);
33915 // Helper to look for a normal load that can be narrowed into a vzload with the
33916 // specified VT and memory VT. Returns SDValue() on failure.
33917 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
33918 SelectionDAG &DAG) {
33919 // Can't if the load is volatile or atomic.
33920 if (!LN->isSimple())
33923 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
33924 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
33925 return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
33926 LN->getPointerInfo(), LN->getOriginalAlign(),
33927 LN->getMemOperand()->getFlags());
33930 // Attempt to match a combined shuffle mask against supported unary shuffle
33932 // TODO: Investigate sharing more of this with shuffle lowering.
33933 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
33934 bool AllowFloatDomain, bool AllowIntDomain,
33935 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
33936 const X86Subtarget &Subtarget, unsigned &Shuffle,
33937 MVT &SrcVT, MVT &DstVT) {
33938 unsigned NumMaskElts = Mask.size();
33939 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
33941 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
33942 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
33943 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
33944 Shuffle = X86ISD::VZEXT_MOVL;
33945 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
33949 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
33950 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
33951 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
33952 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
33953 unsigned MaxScale = 64 / MaskEltSize;
33954 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
33955 bool MatchAny = true;
33956 bool MatchZero = true;
33957 unsigned NumDstElts = NumMaskElts / Scale;
33958 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
33959 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
33960 MatchAny = MatchZero = false;
33963 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
33964 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
33966 if (MatchAny || MatchZero) {
33967 assert(MatchZero && "Failed to match zext but matched aext?");
33968 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
33969 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
33970 MVT::getIntegerVT(MaskEltSize);
33971 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
33973 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
33974 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
33976 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
33977 if (SrcVT.getVectorNumElements() != NumDstElts)
33978 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
33980 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
33981 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
33987 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
33988 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
33989 isUndefOrEqual(Mask[0], 0) &&
33990 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
33991 Shuffle = X86ISD::VZEXT_MOVL;
33992 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
33996 // Check if we have SSE3 which will let us use MOVDDUP etc. The
33997 // instructions are no slower than UNPCKLPD but has the option to
33998 // fold the input operand into even an unaligned memory load.
33999 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
34000 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
34001 Shuffle = X86ISD::MOVDDUP;
34002 SrcVT = DstVT = MVT::v2f64;
34005 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
34006 Shuffle = X86ISD::MOVSLDUP;
34007 SrcVT = DstVT = MVT::v4f32;
34010 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
34011 Shuffle = X86ISD::MOVSHDUP;
34012 SrcVT = DstVT = MVT::v4f32;
34017 if (MaskVT.is256BitVector() && AllowFloatDomain) {
34018 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
34019 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
34020 Shuffle = X86ISD::MOVDDUP;
34021 SrcVT = DstVT = MVT::v4f64;
34024 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
34025 Shuffle = X86ISD::MOVSLDUP;
34026 SrcVT = DstVT = MVT::v8f32;
34029 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
34030 Shuffle = X86ISD::MOVSHDUP;
34031 SrcVT = DstVT = MVT::v8f32;
34036 if (MaskVT.is512BitVector() && AllowFloatDomain) {
34037 assert(Subtarget.hasAVX512() &&
34038 "AVX512 required for 512-bit vector shuffles");
34039 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
34040 Shuffle = X86ISD::MOVDDUP;
34041 SrcVT = DstVT = MVT::v8f64;
34044 if (isTargetShuffleEquivalent(
34045 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
34046 Shuffle = X86ISD::MOVSLDUP;
34047 SrcVT = DstVT = MVT::v16f32;
34050 if (isTargetShuffleEquivalent(
34051 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
34052 Shuffle = X86ISD::MOVSHDUP;
34053 SrcVT = DstVT = MVT::v16f32;
34061 // Attempt to match a combined shuffle mask against supported unary immediate
34062 // permute instructions.
34063 // TODO: Investigate sharing more of this with shuffle lowering.
34064 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
34065 const APInt &Zeroable,
34066 bool AllowFloatDomain, bool AllowIntDomain,
34067 const X86Subtarget &Subtarget,
34068 unsigned &Shuffle, MVT &ShuffleVT,
34069 unsigned &PermuteImm) {
34070 unsigned NumMaskElts = Mask.size();
34071 unsigned InputSizeInBits = MaskVT.getSizeInBits();
34072 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
34073 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
34074 bool ContainsZeros = isAnyZero(Mask);
34076 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
34077 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
34078 // Check for lane crossing permutes.
34079 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
34080 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
34081 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
34082 Shuffle = X86ISD::VPERMI;
34083 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
34084 PermuteImm = getV4X86ShuffleImm(Mask);
34087 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
34088 SmallVector<int, 4> RepeatedMask;
34089 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
34090 Shuffle = X86ISD::VPERMI;
34091 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
34092 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
34096 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
34097 // VPERMILPD can permute with a non-repeating shuffle.
34098 Shuffle = X86ISD::VPERMILPI;
34099 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
34101 for (int i = 0, e = Mask.size(); i != e; ++i) {
34103 if (M == SM_SentinelUndef)
34105 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
34106 PermuteImm |= (M & 1) << i;
34112 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
34113 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
34114 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
34115 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
34116 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
34117 SmallVector<int, 4> RepeatedMask;
34118 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
34119 // Narrow the repeated mask to create 32-bit element permutes.
34120 SmallVector<int, 4> WordMask = RepeatedMask;
34121 if (MaskScalarSizeInBits == 64)
34122 narrowShuffleMaskElts(2, RepeatedMask, WordMask);
34124 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
34125 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
34126 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
34127 PermuteImm = getV4X86ShuffleImm(WordMask);
34132 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
34133 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
34134 SmallVector<int, 4> RepeatedMask;
34135 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
34136 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
34137 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
34139 // PSHUFLW: permute lower 4 elements only.
34140 if (isUndefOrInRange(LoMask, 0, 4) &&
34141 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
34142 Shuffle = X86ISD::PSHUFLW;
34143 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
34144 PermuteImm = getV4X86ShuffleImm(LoMask);
34148 // PSHUFHW: permute upper 4 elements only.
34149 if (isUndefOrInRange(HiMask, 4, 8) &&
34150 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
34151 // Offset the HiMask so that we can create the shuffle immediate.
34152 int OffsetHiMask[4];
34153 for (int i = 0; i != 4; ++i)
34154 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
34156 Shuffle = X86ISD::PSHUFHW;
34157 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
34158 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
34164 // Attempt to match against byte/bit shifts.
34165 if (AllowIntDomain &&
34166 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
34167 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
34168 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34169 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
34170 Mask, 0, Zeroable, Subtarget);
34171 if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
34172 32 <= ShuffleVT.getScalarSizeInBits())) {
34173 PermuteImm = (unsigned)ShiftAmt;
34178 // Attempt to match against bit rotates.
34179 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
34180 ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
34181 Subtarget.hasAVX512())) {
34182 int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
34184 if (0 < RotateAmt) {
34185 Shuffle = X86ISD::VROTLI;
34186 PermuteImm = (unsigned)RotateAmt;
34194 // Attempt to match a combined unary shuffle mask against supported binary
34195 // shuffle instructions.
34196 // TODO: Investigate sharing more of this with shuffle lowering.
34197 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
34198 bool AllowFloatDomain, bool AllowIntDomain,
34199 SDValue &V1, SDValue &V2, const SDLoc &DL,
34200 SelectionDAG &DAG, const X86Subtarget &Subtarget,
34201 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
34203 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
34205 if (MaskVT.is128BitVector()) {
34206 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
34208 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
34209 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
34210 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
34213 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
34215 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
34216 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
34219 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
34220 (AllowFloatDomain || !Subtarget.hasSSE41())) {
34222 Shuffle = X86ISD::MOVSD;
34223 SrcVT = DstVT = MVT::v2f64;
34226 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
34227 (AllowFloatDomain || !Subtarget.hasSSE41())) {
34228 Shuffle = X86ISD::MOVSS;
34229 SrcVT = DstVT = MVT::v4f32;
34234 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
34235 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
34236 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
34237 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
34238 if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
34245 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
34246 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
34247 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
34248 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
34249 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
34250 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
34251 if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
34253 SrcVT = DstVT = MaskVT;
34254 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
34255 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
34263 static bool matchBinaryPermuteShuffle(
34264 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
34265 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
34266 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
34267 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
34268 unsigned NumMaskElts = Mask.size();
34269 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
34271 // Attempt to match against VALIGND/VALIGNQ rotate.
34272 if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
34273 ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
34274 (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
34275 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34276 if (!isAnyZero(Mask)) {
34277 int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
34278 if (0 < Rotation) {
34279 Shuffle = X86ISD::VALIGN;
34280 if (EltSizeInBits == 64)
34281 ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
34283 ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
34284 PermuteImm = Rotation;
34290 // Attempt to match against PALIGNR byte rotate.
34291 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
34292 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
34293 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
34294 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
34295 if (0 < ByteRotation) {
34296 Shuffle = X86ISD::PALIGNR;
34297 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
34298 PermuteImm = ByteRotation;
34303 // Attempt to combine to X86ISD::BLENDI.
34304 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
34305 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
34306 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
34307 uint64_t BlendMask = 0;
34308 bool ForceV1Zero = false, ForceV2Zero = false;
34309 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
34310 if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
34311 ForceV2Zero, BlendMask)) {
34312 if (MaskVT == MVT::v16i16) {
34313 // We can only use v16i16 PBLENDW if the lanes are repeated.
34314 SmallVector<int, 8> RepeatedMask;
34315 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
34317 assert(RepeatedMask.size() == 8 &&
34318 "Repeated mask size doesn't match!");
34320 for (int i = 0; i < 8; ++i)
34321 if (RepeatedMask[i] >= 8)
34322 PermuteImm |= 1 << i;
34323 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
34324 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
34325 Shuffle = X86ISD::BLENDI;
34326 ShuffleVT = MaskVT;
34330 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
34331 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
34332 PermuteImm = (unsigned)BlendMask;
34333 Shuffle = X86ISD::BLENDI;
34334 ShuffleVT = MaskVT;
34340 // Attempt to combine to INSERTPS, but only if it has elements that need to
34342 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
34343 MaskVT.is128BitVector() && isAnyZero(Mask) &&
34344 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
34345 Shuffle = X86ISD::INSERTPS;
34346 ShuffleVT = MVT::v4f32;
34350 // Attempt to combine to SHUFPD.
34351 if (AllowFloatDomain && EltSizeInBits == 64 &&
34352 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
34353 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
34354 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34355 bool ForceV1Zero = false, ForceV2Zero = false;
34356 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
34357 PermuteImm, Mask, Zeroable)) {
34358 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
34359 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
34360 Shuffle = X86ISD::SHUFP;
34361 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
34366 // Attempt to combine to SHUFPS.
34367 if (AllowFloatDomain && EltSizeInBits == 32 &&
34368 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
34369 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
34370 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34371 SmallVector<int, 4> RepeatedMask;
34372 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
34373 // Match each half of the repeated mask, to determine if its just
34374 // referencing one of the vectors, is zeroable or entirely undef.
34375 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
34376 int M0 = RepeatedMask[Offset];
34377 int M1 = RepeatedMask[Offset + 1];
34379 if (isUndefInRange(RepeatedMask, Offset, 2)) {
34380 return DAG.getUNDEF(MaskVT);
34381 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
34382 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
34383 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
34384 return getZeroVector(MaskVT, Subtarget, DAG, DL);
34385 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
34386 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
34387 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
34389 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
34390 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
34391 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
34398 int ShufMask[4] = {-1, -1, -1, -1};
34399 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
34400 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
34405 Shuffle = X86ISD::SHUFP;
34406 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
34407 PermuteImm = getV4X86ShuffleImm(ShufMask);
34413 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
34414 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
34415 MaskVT.is128BitVector() &&
34416 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
34417 Shuffle = X86ISD::INSERTPS;
34418 ShuffleVT = MVT::v4f32;
34425 static SDValue combineX86ShuffleChainWithExtract(
34426 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
34427 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
34428 const X86Subtarget &Subtarget);
34430 /// Combine an arbitrary chain of shuffles into a single instruction if
34433 /// This is the leaf of the recursive combine below. When we have found some
34434 /// chain of single-use x86 shuffle instructions and accumulated the combined
34435 /// shuffle mask represented by them, this will try to pattern match that mask
34436 /// into either a single instruction if there is a special purpose instruction
34437 /// for this operation, or into a PSHUFB instruction which is a fully general
34438 /// instruction but should only be used to replace chains over a certain depth.
34439 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
34440 ArrayRef<int> BaseMask, int Depth,
34441 bool HasVariableMask,
34442 bool AllowVariableMask, SelectionDAG &DAG,
34443 const X86Subtarget &Subtarget) {
34444 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
34445 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
34446 "Unexpected number of shuffle inputs!");
34448 // Find the inputs that enter the chain. Note that multiple uses are OK
34449 // here, we're not going to remove the operands we find.
34450 bool UnaryShuffle = (Inputs.size() == 1);
34451 SDValue V1 = peekThroughBitcasts(Inputs[0]);
34452 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
34453 : peekThroughBitcasts(Inputs[1]));
34455 MVT VT1 = V1.getSimpleValueType();
34456 MVT VT2 = V2.getSimpleValueType();
34457 MVT RootVT = Root.getSimpleValueType();
34458 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
34459 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
34460 "Vector size mismatch");
34465 unsigned NumBaseMaskElts = BaseMask.size();
34466 if (NumBaseMaskElts == 1) {
34467 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
34468 return DAG.getBitcast(RootVT, V1);
34471 bool OptForSize = DAG.shouldOptForSize();
34472 unsigned RootSizeInBits = RootVT.getSizeInBits();
34473 unsigned NumRootElts = RootVT.getVectorNumElements();
34474 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
34475 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
34476 (RootVT.isFloatingPoint() && Depth >= 1) ||
34477 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
34479 // Don't combine if we are a AVX512/EVEX target and the mask element size
34480 // is different from the root element size - this would prevent writemasks
34481 // from being reused.
34482 bool IsMaskedShuffle = false;
34483 if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
34484 if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
34485 Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
34486 IsMaskedShuffle = true;
34490 // If we are shuffling a broadcast (and not introducing zeros) then
34491 // we can just use the broadcast directly. This works for smaller broadcast
34492 // elements as well as they already repeat across each mask element
34493 if (UnaryShuffle && isTargetShuffleSplat(V1) && !isAnyZero(BaseMask) &&
34494 (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0) {
34495 return DAG.getBitcast(RootVT, V1);
34498 // Attempt to match a subvector broadcast.
34499 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
34500 if (UnaryShuffle &&
34501 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
34502 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
34503 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
34504 SDValue Src = Inputs[0];
34505 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
34506 Src.getOperand(0).isUndef() &&
34507 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
34508 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
34509 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
34510 Src.getValueType(),
34511 Src.getOperand(1)));
34516 // Handle 128/256-bit lane shuffles of 512-bit vectors.
34517 if (RootVT.is512BitVector() &&
34518 (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
34519 MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
34521 // If the upper subvectors are zeroable, then an extract+insert is more
34522 // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
34523 // to zero the upper subvectors.
34524 if (isUndefOrZeroInRange(BaseMask, 1, NumBaseMaskElts - 1)) {
34525 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
34526 return SDValue(); // Nothing to do!
34527 assert(isInRange(BaseMask[0], 0, NumBaseMaskElts) &&
34528 "Unexpected lane shuffle");
34529 Res = DAG.getBitcast(ShuffleVT, V1);
34530 unsigned SubIdx = BaseMask[0] * (8 / NumBaseMaskElts);
34531 bool UseZero = isAnyZero(BaseMask);
34532 Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
34533 Res = widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
34534 return DAG.getBitcast(RootVT, Res);
34537 // Narrow shuffle mask to v4x128.
34538 SmallVector<int, 4> Mask;
34539 assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
34540 narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, BaseMask, Mask);
34542 // Try to lower to vshuf64x2/vshuf32x4.
34543 auto MatchSHUF128 = [](MVT ShuffleVT, const SDLoc &DL, ArrayRef<int> Mask,
34544 SDValue V1, SDValue V2, SelectionDAG &DAG) {
34545 unsigned PermMask = 0;
34546 // Insure elements came from the same Op.
34547 SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
34548 for (int i = 0; i < 4; ++i) {
34549 assert(Mask[i] >= -1 && "Illegal shuffle sentinel value");
34553 SDValue Op = Mask[i] >= 4 ? V2 : V1;
34554 unsigned OpIndex = i / 2;
34555 if (Ops[OpIndex].isUndef())
34557 else if (Ops[OpIndex] != Op)
34560 // Convert the 128-bit shuffle mask selection values into 128-bit
34561 // selection bits defined by a vshuf64x2 instruction's immediate control
34563 PermMask |= (Mask[i] % 4) << (i * 2);
34566 return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
34567 DAG.getBitcast(ShuffleVT, Ops[0]),
34568 DAG.getBitcast(ShuffleVT, Ops[1]),
34569 DAG.getTargetConstant(PermMask, DL, MVT::i8));
34572 // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
34573 // doesn't work because our mask is for 128 bits and we don't have an MVT
34576 UnaryShuffle && isUndefOrInRange(Mask[0], 0, 2) &&
34577 isUndefOrInRange(Mask[1], 0, 2) && isUndefOrInRange(Mask[2], 2, 4) &&
34578 isUndefOrInRange(Mask[3], 2, 4) &&
34579 (Mask[0] < 0 || Mask[2] < 0 || Mask[0] == (Mask[2] % 2)) &&
34580 (Mask[1] < 0 || Mask[3] < 0 || Mask[1] == (Mask[3] % 2));
34582 if (!isAnyZero(Mask) && !PreferPERMQ) {
34583 if (SDValue V = MatchSHUF128(ShuffleVT, DL, Mask, V1, V2, DAG))
34584 return DAG.getBitcast(RootVT, V);
34588 // Handle 128-bit lane shuffles of 256-bit vectors.
34589 if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
34590 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
34592 // If the upper half is zeroable, then an extract+insert is more optimal
34593 // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
34594 // zero the upper half.
34595 if (isUndefOrZero(BaseMask[1])) {
34596 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
34597 return SDValue(); // Nothing to do!
34598 assert(isInRange(BaseMask[0], 0, 2) && "Unexpected lane shuffle");
34599 Res = DAG.getBitcast(ShuffleVT, V1);
34600 Res = extract128BitVector(Res, BaseMask[0] * 2, DAG, DL);
34601 Res = widenSubVector(Res, BaseMask[1] == SM_SentinelZero, Subtarget, DAG,
34603 return DAG.getBitcast(RootVT, Res);
34606 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
34607 return SDValue(); // Nothing to do!
34609 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
34610 // we need to use the zeroing feature.
34611 // Prefer blends for sequential shuffles unless we are optimizing for size.
34612 if (UnaryShuffle &&
34613 !(Subtarget.hasAVX2() && isUndefOrInRange(BaseMask, 0, 2)) &&
34614 (OptForSize || !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0))) {
34615 unsigned PermMask = 0;
34616 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
34617 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
34619 Res = DAG.getBitcast(ShuffleVT, V1);
34620 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
34621 DAG.getUNDEF(ShuffleVT),
34622 DAG.getTargetConstant(PermMask, DL, MVT::i8));
34623 return DAG.getBitcast(RootVT, Res);
34626 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
34627 return SDValue(); // Nothing to do!
34629 // TODO - handle AVX512VL cases with X86ISD::SHUF128.
34630 if (!UnaryShuffle && !IsMaskedShuffle) {
34631 assert(llvm::all_of(BaseMask, [](int M) { return 0 <= M && M < 4; }) &&
34632 "Unexpected shuffle sentinel value");
34633 // Prefer blends to X86ISD::VPERM2X128.
34634 if (!((BaseMask[0] == 0 && BaseMask[1] == 3) ||
34635 (BaseMask[0] == 2 && BaseMask[1] == 1))) {
34636 unsigned PermMask = 0;
34637 PermMask |= ((BaseMask[0] & 3) << 0);
34638 PermMask |= ((BaseMask[1] & 3) << 4);
34641 X86ISD::VPERM2X128, DL, ShuffleVT,
34642 DAG.getBitcast(ShuffleVT, isInRange(BaseMask[0], 0, 2) ? V1 : V2),
34643 DAG.getBitcast(ShuffleVT, isInRange(BaseMask[1], 0, 2) ? V1 : V2),
34644 DAG.getTargetConstant(PermMask, DL, MVT::i8));
34645 return DAG.getBitcast(RootVT, Res);
34650 // For masks that have been widened to 128-bit elements or more,
34651 // narrow back down to 64-bit elements.
34652 SmallVector<int, 64> Mask;
34653 if (BaseMaskEltSizeInBits > 64) {
34654 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
34655 int MaskScale = BaseMaskEltSizeInBits / 64;
34656 narrowShuffleMaskElts(MaskScale, BaseMask, Mask);
34658 Mask.assign(BaseMask.begin(), BaseMask.end());
34661 // For masked shuffles, we're trying to match the root width for better
34662 // writemask folding, attempt to scale the mask.
34663 // TODO - variable shuffles might need this to be widened again.
34664 if (IsMaskedShuffle && NumRootElts > Mask.size()) {
34665 assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
34666 int MaskScale = NumRootElts / Mask.size();
34667 SmallVector<int, 64> ScaledMask;
34668 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
34669 Mask = std::move(ScaledMask);
34672 unsigned NumMaskElts = Mask.size();
34673 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
34675 // Determine the effective mask value type.
34676 FloatDomain &= (32 <= MaskEltSizeInBits);
34677 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
34678 : MVT::getIntegerVT(MaskEltSizeInBits);
34679 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
34681 // Only allow legal mask types.
34682 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
34685 // Attempt to match the mask against known shuffle patterns.
34686 MVT ShuffleSrcVT, ShuffleVT;
34687 unsigned Shuffle, PermuteImm;
34689 // Which shuffle domains are permitted?
34690 // Permit domain crossing at higher combine depths.
34691 // TODO: Should we indicate which domain is preferred if both are allowed?
34692 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
34693 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
34694 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
34696 // Determine zeroable mask elements.
34697 APInt KnownUndef, KnownZero;
34698 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
34699 APInt Zeroable = KnownUndef | KnownZero;
34701 if (UnaryShuffle) {
34702 // Attempt to match against broadcast-from-vector.
34703 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
34704 if ((Subtarget.hasAVX2() ||
34705 (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
34706 (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
34707 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
34708 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
34709 if (V1.getValueType() == MaskVT &&
34710 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
34711 MayFoldLoad(V1.getOperand(0))) {
34712 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
34713 return SDValue(); // Nothing to do!
34714 Res = V1.getOperand(0);
34715 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
34716 return DAG.getBitcast(RootVT, Res);
34718 if (Subtarget.hasAVX2()) {
34719 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
34720 return SDValue(); // Nothing to do!
34721 Res = DAG.getBitcast(MaskVT, V1);
34722 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
34723 return DAG.getBitcast(RootVT, Res);
34728 SDValue NewV1 = V1; // Save operand in case early exit happens.
34729 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
34730 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
34732 (!IsMaskedShuffle ||
34733 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34734 if (Depth == 0 && Root.getOpcode() == Shuffle)
34735 return SDValue(); // Nothing to do!
34736 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
34737 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
34738 return DAG.getBitcast(RootVT, Res);
34741 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
34742 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
34744 (!IsMaskedShuffle ||
34745 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34746 if (Depth == 0 && Root.getOpcode() == Shuffle)
34747 return SDValue(); // Nothing to do!
34748 Res = DAG.getBitcast(ShuffleVT, V1);
34749 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
34750 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
34751 return DAG.getBitcast(RootVT, Res);
34755 // Attempt to combine to INSERTPS, but only if the inserted element has come
34757 // TODO: Handle other insertions here as well?
34758 if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
34759 MaskEltSizeInBits == 32 && Subtarget.hasSSE41() &&
34760 !isTargetShuffleEquivalent(Mask, {4, 1, 2, 3})) {
34761 SDValue SrcV1 = V1, SrcV2 = V2;
34762 if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask, DAG) &&
34763 SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
34764 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
34765 return SDValue(); // Nothing to do!
34766 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
34767 DAG.getBitcast(MVT::v4f32, SrcV1),
34768 DAG.getBitcast(MVT::v4f32, SrcV2),
34769 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
34770 return DAG.getBitcast(RootVT, Res);
34774 SDValue NewV1 = V1; // Save operands in case early exit happens.
34775 SDValue NewV2 = V2;
34776 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
34777 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
34778 ShuffleVT, UnaryShuffle) &&
34779 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34780 if (Depth == 0 && Root.getOpcode() == Shuffle)
34781 return SDValue(); // Nothing to do!
34782 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
34783 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
34784 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
34785 return DAG.getBitcast(RootVT, Res);
34788 NewV1 = V1; // Save operands in case early exit happens.
34790 if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
34791 AllowIntDomain, NewV1, NewV2, DL, DAG,
34792 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
34793 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34794 if (Depth == 0 && Root.getOpcode() == Shuffle)
34795 return SDValue(); // Nothing to do!
34796 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
34797 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
34798 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
34799 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
34800 return DAG.getBitcast(RootVT, Res);
34803 // Typically from here on, we need an integer version of MaskVT.
34804 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
34805 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
34807 // Annoyingly, SSE4A instructions don't map into the above match helpers.
34808 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
34809 uint64_t BitLen, BitIdx;
34810 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
34812 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
34813 return SDValue(); // Nothing to do!
34814 V1 = DAG.getBitcast(IntMaskVT, V1);
34815 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
34816 DAG.getTargetConstant(BitLen, DL, MVT::i8),
34817 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
34818 return DAG.getBitcast(RootVT, Res);
34821 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
34822 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
34823 return SDValue(); // Nothing to do!
34824 V1 = DAG.getBitcast(IntMaskVT, V1);
34825 V2 = DAG.getBitcast(IntMaskVT, V2);
34826 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
34827 DAG.getTargetConstant(BitLen, DL, MVT::i8),
34828 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
34829 return DAG.getBitcast(RootVT, Res);
34833 // Match shuffle against TRUNCATE patterns.
34834 if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
34835 // Match against a VTRUNC instruction, accounting for src/dst sizes.
34836 if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
34838 bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
34839 ShuffleSrcVT.getVectorNumElements();
34841 IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
34842 if (Depth == 0 && Root.getOpcode() == Opc)
34843 return SDValue(); // Nothing to do!
34844 V1 = DAG.getBitcast(ShuffleSrcVT, V1);
34845 Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
34846 if (ShuffleVT.getSizeInBits() < RootSizeInBits)
34847 Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
34848 return DAG.getBitcast(RootVT, Res);
34851 // Do we need a more general binary truncation pattern?
34852 if (RootSizeInBits < 512 &&
34853 ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
34854 (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
34855 (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
34856 isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
34857 if (Depth == 0 && Root.getOpcode() == ISD::TRUNCATE)
34858 return SDValue(); // Nothing to do!
34859 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
34860 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
34861 V1 = DAG.getBitcast(ShuffleSrcVT, V1);
34862 V2 = DAG.getBitcast(ShuffleSrcVT, V2);
34863 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
34864 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
34865 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
34866 Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
34867 return DAG.getBitcast(RootVT, Res);
34871 // Don't try to re-form single instruction chains under any circumstances now
34872 // that we've done encoding canonicalization for them.
34876 // Depth threshold above which we can efficiently use variable mask shuffles.
34877 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
34878 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
34880 bool MaskContainsZeros = isAnyZero(Mask);
34882 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
34883 // If we have a single input lane-crossing shuffle then lower to VPERMV.
34884 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
34885 ((Subtarget.hasAVX2() &&
34886 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
34887 (Subtarget.hasAVX512() &&
34888 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
34889 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
34890 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
34891 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
34892 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
34893 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
34894 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
34895 Res = DAG.getBitcast(MaskVT, V1);
34896 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
34897 return DAG.getBitcast(RootVT, Res);
34900 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
34901 // vector as the second source.
34902 if (UnaryShuffle && AllowVariableMask &&
34903 ((Subtarget.hasAVX512() &&
34904 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
34905 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
34906 (Subtarget.hasVLX() &&
34907 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
34908 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
34909 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
34910 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
34911 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
34912 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
34913 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
34914 for (unsigned i = 0; i != NumMaskElts; ++i)
34915 if (Mask[i] == SM_SentinelZero)
34916 Mask[i] = NumMaskElts + i;
34918 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
34919 Res = DAG.getBitcast(MaskVT, V1);
34920 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
34921 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
34922 return DAG.getBitcast(RootVT, Res);
34925 // If that failed and either input is extracted then try to combine as a
34926 // shuffle with the larger type.
34927 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
34928 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
34930 return WideShuffle;
34932 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
34933 if (AllowVariableMask && !MaskContainsZeros &&
34934 ((Subtarget.hasAVX512() &&
34935 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
34936 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
34937 (Subtarget.hasVLX() &&
34938 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
34939 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
34940 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
34941 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
34942 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
34943 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
34944 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
34945 V1 = DAG.getBitcast(MaskVT, V1);
34946 V2 = DAG.getBitcast(MaskVT, V2);
34947 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
34948 return DAG.getBitcast(RootVT, Res);
34953 // See if we can combine a single input shuffle with zeros to a bit-mask,
34954 // which is much simpler than any shuffle.
34955 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
34956 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
34957 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
34958 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
34959 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
34960 APInt UndefElts(NumMaskElts, 0);
34961 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
34962 for (unsigned i = 0; i != NumMaskElts; ++i) {
34964 if (M == SM_SentinelUndef) {
34965 UndefElts.setBit(i);
34968 if (M == SM_SentinelZero)
34970 EltBits[i] = AllOnes;
34972 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
34973 Res = DAG.getBitcast(MaskVT, V1);
34974 unsigned AndOpcode =
34975 MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
34976 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
34977 return DAG.getBitcast(RootVT, Res);
34980 // If we have a single input shuffle with different shuffle patterns in the
34981 // the 128-bit lanes use the variable mask to VPERMILPS.
34982 // TODO Combine other mask types at higher depths.
34983 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
34984 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
34985 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
34986 SmallVector<SDValue, 16> VPermIdx;
34987 for (int M : Mask) {
34989 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
34990 VPermIdx.push_back(Idx);
34992 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
34993 Res = DAG.getBitcast(MaskVT, V1);
34994 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
34995 return DAG.getBitcast(RootVT, Res);
34998 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
34999 // to VPERMIL2PD/VPERMIL2PS.
35000 if (AllowVariableMask && Subtarget.hasXOP() &&
35001 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
35002 MaskVT == MVT::v8f32)) {
35003 // VPERMIL2 Operation.
35004 // Bits[3] - Match Bit.
35005 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
35006 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
35007 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
35008 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
35009 SmallVector<int, 8> VPerm2Idx;
35010 unsigned M2ZImm = 0;
35011 for (int M : Mask) {
35012 if (M == SM_SentinelUndef) {
35013 VPerm2Idx.push_back(-1);
35016 if (M == SM_SentinelZero) {
35018 VPerm2Idx.push_back(8);
35021 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
35022 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
35023 VPerm2Idx.push_back(Index);
35025 V1 = DAG.getBitcast(MaskVT, V1);
35026 V2 = DAG.getBitcast(MaskVT, V2);
35027 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
35028 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
35029 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
35030 return DAG.getBitcast(RootVT, Res);
35033 // If we have 3 or more shuffle instructions or a chain involving a variable
35034 // mask, we can replace them with a single PSHUFB instruction profitably.
35035 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
35036 // instructions, but in practice PSHUFB tends to be *very* fast so we're
35037 // more aggressive.
35038 if (UnaryShuffle && AllowVariableMask &&
35039 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
35040 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
35041 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
35042 SmallVector<SDValue, 16> PSHUFBMask;
35043 int NumBytes = RootVT.getSizeInBits() / 8;
35044 int Ratio = NumBytes / NumMaskElts;
35045 for (int i = 0; i < NumBytes; ++i) {
35046 int M = Mask[i / Ratio];
35047 if (M == SM_SentinelUndef) {
35048 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
35051 if (M == SM_SentinelZero) {
35052 PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
35055 M = Ratio * M + i % Ratio;
35056 assert((M / 16) == (i / 16) && "Lane crossing detected");
35057 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
35059 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
35060 Res = DAG.getBitcast(ByteVT, V1);
35061 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
35062 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
35063 return DAG.getBitcast(RootVT, Res);
35066 // With XOP, if we have a 128-bit binary input shuffle we can always combine
35067 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
35068 // slower than PSHUFB on targets that support both.
35069 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
35070 // VPPERM Mask Operation
35071 // Bits[4:0] - Byte Index (0 - 31)
35072 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
35073 SmallVector<SDValue, 16> VPPERMMask;
35075 int Ratio = NumBytes / NumMaskElts;
35076 for (int i = 0; i < NumBytes; ++i) {
35077 int M = Mask[i / Ratio];
35078 if (M == SM_SentinelUndef) {
35079 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
35082 if (M == SM_SentinelZero) {
35083 VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
35086 M = Ratio * M + i % Ratio;
35087 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
35089 MVT ByteVT = MVT::v16i8;
35090 V1 = DAG.getBitcast(ByteVT, V1);
35091 V2 = DAG.getBitcast(ByteVT, V2);
35092 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
35093 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
35094 return DAG.getBitcast(RootVT, Res);
35097 // If that failed and either input is extracted then try to combine as a
35098 // shuffle with the larger type.
35099 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
35100 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
35102 return WideShuffle;
35104 // If we have a dual input shuffle then lower to VPERMV3.
35105 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
35106 ((Subtarget.hasAVX512() &&
35107 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
35108 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
35109 (Subtarget.hasVLX() &&
35110 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
35111 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
35112 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
35113 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
35114 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
35115 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
35116 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
35117 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
35118 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
35119 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
35120 V1 = DAG.getBitcast(MaskVT, V1);
35121 V2 = DAG.getBitcast(MaskVT, V2);
35122 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
35123 return DAG.getBitcast(RootVT, Res);
35126 // Failed to find any combines.
35130 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
35131 // instruction if possible.
35133 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
35134 // type size to attempt to combine:
35135 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
35137 // extract_subvector(shuffle(x,y,m2),0)
35138 static SDValue combineX86ShuffleChainWithExtract(
35139 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
35140 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
35141 const X86Subtarget &Subtarget) {
35142 unsigned NumMaskElts = BaseMask.size();
35143 unsigned NumInputs = Inputs.size();
35144 if (NumInputs == 0)
35147 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
35148 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
35150 // Peek through subvectors.
35151 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
35152 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
35153 for (unsigned i = 0; i != NumInputs; ++i) {
35154 SDValue &Src = WideInputs[i];
35155 unsigned &Offset = Offsets[i];
35156 Src = peekThroughBitcasts(Src);
35157 EVT BaseVT = Src.getValueType();
35158 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
35159 Offset += Src.getConstantOperandVal(1);
35160 Src = Src.getOperand(0);
35162 WideSizeInBits = std::max(WideSizeInBits,
35163 (unsigned)Src.getValueSizeInBits());
35164 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
35165 "Unexpected subvector extraction");
35166 Offset /= BaseVT.getVectorNumElements();
35167 Offset *= NumMaskElts;
35170 // Bail if we're always extracting from the lowest subvectors,
35171 // combineX86ShuffleChain should match this for the current width.
35172 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
35175 EVT RootVT = Root.getValueType();
35176 unsigned RootSizeInBits = RootVT.getSizeInBits();
35177 unsigned Scale = WideSizeInBits / RootSizeInBits;
35178 assert((WideSizeInBits % RootSizeInBits) == 0 &&
35179 "Unexpected subvector extraction");
35181 // If the src vector types aren't the same, see if we can extend
35182 // them to match each other.
35183 // TODO: Support different scalar types?
35184 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
35185 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
35186 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
35187 Op.getValueType().getScalarType() != WideSVT;
35191 for (SDValue &NewInput : WideInputs) {
35192 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
35193 "Shuffle vector size mismatch");
35194 if (WideSizeInBits > NewInput.getValueSizeInBits())
35195 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
35196 SDLoc(NewInput), WideSizeInBits);
35197 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
35198 "Unexpected subvector extraction");
35201 // Create new mask for larger type.
35202 for (unsigned i = 1; i != NumInputs; ++i)
35203 Offsets[i] += i * Scale * NumMaskElts;
35205 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
35206 for (int &M : WideMask) {
35209 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
35211 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
35213 // Remove unused/repeated shuffle source ops.
35214 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
35215 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
35217 if (WideInputs.size() > 2)
35220 // Increase depth for every upper subvector we've peeked through.
35221 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
35223 // Attempt to combine wider chain.
35224 // TODO: Can we use a better Root?
35225 SDValue WideRoot = WideInputs[0];
35226 if (SDValue WideShuffle = combineX86ShuffleChain(
35227 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
35228 AllowVariableMask, DAG, Subtarget)) {
35230 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
35231 return DAG.getBitcast(RootVT, WideShuffle);
35236 // Attempt to constant fold all of the constant source ops.
35237 // Returns true if the entire shuffle is folded to a constant.
35238 // TODO: Extend this to merge multiple constant Ops and update the mask.
35239 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
35240 ArrayRef<int> Mask, SDValue Root,
35241 bool HasVariableMask,
35243 const X86Subtarget &Subtarget) {
35244 MVT VT = Root.getSimpleValueType();
35246 unsigned SizeInBits = VT.getSizeInBits();
35247 unsigned NumMaskElts = Mask.size();
35248 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
35249 unsigned NumOps = Ops.size();
35251 // Extract constant bits from each source op.
35252 bool OneUseConstantOp = false;
35253 SmallVector<APInt, 16> UndefEltsOps(NumOps);
35254 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
35255 for (unsigned i = 0; i != NumOps; ++i) {
35256 SDValue SrcOp = Ops[i];
35257 OneUseConstantOp |= SrcOp.hasOneUse();
35258 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
35263 // Only fold if at least one of the constants is only used once or
35264 // the combined shuffle has included a variable mask shuffle, this
35265 // is to avoid constant pool bloat.
35266 if (!OneUseConstantOp && !HasVariableMask)
35269 // Shuffle the constant bits according to the mask.
35271 APInt UndefElts(NumMaskElts, 0);
35272 APInt ZeroElts(NumMaskElts, 0);
35273 APInt ConstantElts(NumMaskElts, 0);
35274 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
35275 APInt::getNullValue(MaskSizeInBits));
35276 for (unsigned i = 0; i != NumMaskElts; ++i) {
35278 if (M == SM_SentinelUndef) {
35279 UndefElts.setBit(i);
35281 } else if (M == SM_SentinelZero) {
35282 ZeroElts.setBit(i);
35285 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
35287 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
35288 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
35290 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
35291 if (SrcUndefElts[SrcMaskIdx]) {
35292 UndefElts.setBit(i);
35296 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
35297 APInt &Bits = SrcEltBits[SrcMaskIdx];
35299 ZeroElts.setBit(i);
35303 ConstantElts.setBit(i);
35304 ConstantBitData[i] = Bits;
35306 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
35308 // Attempt to create a zero vector.
35309 if ((UndefElts | ZeroElts).isAllOnesValue())
35310 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
35312 // Create the constant data.
35314 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
35315 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
35317 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
35319 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
35320 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
35323 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
35324 return DAG.getBitcast(VT, CstOp);
35327 /// Fully generic combining of x86 shuffle instructions.
35329 /// This should be the last combine run over the x86 shuffle instructions. Once
35330 /// they have been fully optimized, this will recursively consider all chains
35331 /// of single-use shuffle instructions, build a generic model of the cumulative
35332 /// shuffle operation, and check for simpler instructions which implement this
35333 /// operation. We use this primarily for two purposes:
35335 /// 1) Collapse generic shuffles to specialized single instructions when
35336 /// equivalent. In most cases, this is just an encoding size win, but
35337 /// sometimes we will collapse multiple generic shuffles into a single
35338 /// special-purpose shuffle.
35339 /// 2) Look for sequences of shuffle instructions with 3 or more total
35340 /// instructions, and replace them with the slightly more expensive SSSE3
35341 /// PSHUFB instruction if available. We do this as the last combining step
35342 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
35343 /// a suitable short sequence of other instructions. The PSHUFB will either
35344 /// use a register or have to read from memory and so is slightly (but only
35345 /// slightly) more expensive than the other shuffle instructions.
35347 /// Because this is inherently a quadratic operation (for each shuffle in
35348 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
35349 /// This should never be an issue in practice as the shuffle lowering doesn't
35350 /// produce sequences of more than 8 instructions.
35352 /// FIXME: We will currently miss some cases where the redundant shuffling
35353 /// would simplify under the threshold for PSHUFB formation because of
35354 /// combine-ordering. To fix this, we should do the redundant instruction
35355 /// combining in this recursive walk.
35356 static SDValue combineX86ShufflesRecursively(
35357 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
35358 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
35359 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
35360 const X86Subtarget &Subtarget) {
35361 assert(RootMask.size() > 0 &&
35362 (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
35363 "Illegal shuffle root mask");
35365 // Bound the depth of our recursive combine because this is ultimately
35366 // quadratic in nature.
35367 const unsigned MaxRecursionDepth = 8;
35368 if (Depth >= MaxRecursionDepth)
35371 // Directly rip through bitcasts to find the underlying operand.
35372 SDValue Op = SrcOps[SrcOpIndex];
35373 Op = peekThroughOneUseBitcasts(Op);
35375 MVT VT = Op.getSimpleValueType();
35376 if (!VT.isVector())
35377 return SDValue(); // Bail if we hit a non-vector.
35379 assert(Root.getSimpleValueType().isVector() &&
35380 "Shuffles operate on vector types!");
35381 unsigned RootSizeInBits = Root.getSimpleValueType().getSizeInBits();
35382 assert(VT.getSizeInBits() == RootSizeInBits &&
35383 "Can only combine shuffles of the same vector register size.");
35385 // Extract target shuffle mask and resolve sentinels and inputs.
35386 // TODO - determine Op's demanded elts from RootMask.
35387 SmallVector<int, 64> OpMask;
35388 SmallVector<SDValue, 2> OpInputs;
35389 APInt OpUndef, OpZero;
35390 APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
35391 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
35392 if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
35393 OpZero, DAG, Depth, false))
35396 // Shuffle inputs must be the same size as the result, bail on any larger
35397 // inputs and widen any smaller inputs.
35398 if (llvm::any_of(OpInputs, [RootSizeInBits](SDValue Op) {
35399 return Op.getValueSizeInBits() > RootSizeInBits;
35403 for (SDValue &Op : OpInputs)
35404 if (Op.getValueSizeInBits() < RootSizeInBits)
35405 Op = widenSubVector(peekThroughOneUseBitcasts(Op), false, Subtarget, DAG,
35406 SDLoc(Op), RootSizeInBits);
35408 SmallVector<int, 64> Mask;
35409 SmallVector<SDValue, 16> Ops;
35411 // We don't need to merge masks if the root is empty.
35412 bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
35414 // Only resolve zeros if it will remove an input, otherwise we might end
35415 // up in an infinite loop.
35416 bool ResolveKnownZeros = true;
35417 if (!OpZero.isNullValue()) {
35418 APInt UsedInputs = APInt::getNullValue(OpInputs.size());
35419 for (int i = 0, e = OpMask.size(); i != e; ++i) {
35421 if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
35423 UsedInputs.setBit(M / OpMask.size());
35424 if (UsedInputs.isAllOnesValue()) {
35425 ResolveKnownZeros = false;
35430 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
35431 ResolveKnownZeros);
35434 Ops.append(OpInputs.begin(), OpInputs.end());
35436 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
35438 // Add the inputs to the Ops list, avoiding duplicates.
35439 Ops.append(SrcOps.begin(), SrcOps.end());
35441 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
35442 // Attempt to find an existing match.
35443 SDValue InputBC = peekThroughBitcasts(Input);
35444 for (int i = 0, e = Ops.size(); i < e; ++i)
35445 if (InputBC == peekThroughBitcasts(Ops[i]))
35447 // Match failed - should we replace an existing Op?
35448 if (InsertionPoint >= 0) {
35449 Ops[InsertionPoint] = Input;
35450 return InsertionPoint;
35452 // Add to the end of the Ops list.
35453 Ops.push_back(Input);
35454 return Ops.size() - 1;
35457 SmallVector<int, 2> OpInputIdx;
35458 for (SDValue OpInput : OpInputs)
35459 OpInputIdx.push_back(
35460 AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
35462 assert(((RootMask.size() > OpMask.size() &&
35463 RootMask.size() % OpMask.size() == 0) ||
35464 (OpMask.size() > RootMask.size() &&
35465 OpMask.size() % RootMask.size() == 0) ||
35466 OpMask.size() == RootMask.size()) &&
35467 "The smaller number of elements must divide the larger.");
35469 // This function can be performance-critical, so we rely on the power-of-2
35470 // knowledge that we have about the mask sizes to replace div/rem ops with
35471 // bit-masks and shifts.
35472 assert(isPowerOf2_32(RootMask.size()) &&
35473 "Non-power-of-2 shuffle mask sizes");
35474 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
35475 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
35476 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
35478 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
35479 unsigned RootRatio =
35480 std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
35481 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
35482 assert((RootRatio == 1 || OpRatio == 1) &&
35483 "Must not have a ratio for both incoming and op masks!");
35485 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
35486 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
35487 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
35488 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
35489 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
35491 Mask.resize(MaskWidth, SM_SentinelUndef);
35493 // Merge this shuffle operation's mask into our accumulated mask. Note that
35494 // this shuffle's mask will be the first applied to the input, followed by
35495 // the root mask to get us all the way to the root value arrangement. The
35496 // reason for this order is that we are recursing up the operation chain.
35497 for (unsigned i = 0; i < MaskWidth; ++i) {
35498 unsigned RootIdx = i >> RootRatioLog2;
35499 if (RootMask[RootIdx] < 0) {
35500 // This is a zero or undef lane, we're done.
35501 Mask[i] = RootMask[RootIdx];
35505 unsigned RootMaskedIdx =
35507 ? RootMask[RootIdx]
35508 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
35510 // Just insert the scaled root mask value if it references an input other
35511 // than the SrcOp we're currently inserting.
35512 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
35513 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
35514 Mask[i] = RootMaskedIdx;
35518 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
35519 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
35520 if (OpMask[OpIdx] < 0) {
35521 // The incoming lanes are zero or undef, it doesn't matter which ones we
35523 Mask[i] = OpMask[OpIdx];
35527 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
35528 unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
35529 : (OpMask[OpIdx] << OpRatioLog2) +
35530 (RootMaskedIdx & (OpRatio - 1));
35532 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
35533 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
35534 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
35535 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
35537 Mask[i] = OpMaskedIdx;
35541 // Remove unused/repeated shuffle source ops.
35542 resolveTargetShuffleInputsAndMask(Ops, Mask);
35544 // Handle the all undef/zero cases early.
35545 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
35546 return DAG.getUNDEF(Root.getValueType());
35548 // TODO - should we handle the mixed zero/undef case as well? Just returning
35549 // a zero mask will lose information on undef elements possibly reducing
35550 // future combine possibilities.
35551 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
35552 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
35555 assert(!Ops.empty() && "Shuffle with no inputs detected");
35556 HasVariableMask |= IsOpVariableMask;
35558 // Update the list of shuffle nodes that have been combined so far.
35559 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
35561 CombinedNodes.push_back(Op.getNode());
35563 // See if we can recurse into each shuffle source op (if it's a target
35564 // shuffle). The source op should only be generally combined if it either has
35565 // a single use (i.e. current Op) or all its users have already been combined,
35566 // if not then we can still combine but should prevent generation of variable
35567 // shuffles to avoid constant pool bloat.
35568 // Don't recurse if we already have more source ops than we can combine in
35569 // the remaining recursion depth.
35570 if (Ops.size() < (MaxRecursionDepth - Depth)) {
35571 for (int i = 0, e = Ops.size(); i < e; ++i) {
35572 // For empty roots, we need to resolve zeroable elements before combining
35573 // them with other shuffles.
35574 SmallVector<int, 64> ResolvedMask = Mask;
35576 resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
35577 bool AllowVar = false;
35578 if (Ops[i].getNode()->hasOneUse() ||
35579 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
35580 AllowVar = AllowVariableMask;
35581 if (SDValue Res = combineX86ShufflesRecursively(
35582 Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1,
35583 HasVariableMask, AllowVar, DAG, Subtarget))
35588 // Attempt to constant fold all of the constant source ops.
35589 if (SDValue Cst = combineX86ShufflesConstants(
35590 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
35593 // We can only combine unary and binary shuffle mask cases.
35594 if (Ops.size() <= 2) {
35595 // Minor canonicalization of the accumulated shuffle mask to make it easier
35596 // to match below. All this does is detect masks with sequential pairs of
35597 // elements, and shrink them to the half-width mask. It does this in a loop
35598 // so it will reduce the size of the mask to the minimal width mask which
35599 // performs an equivalent shuffle.
35600 SmallVector<int, 64> WidenedMask;
35601 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
35602 Mask = std::move(WidenedMask);
35605 // Canonicalization of binary shuffle masks to improve pattern matching by
35606 // commuting the inputs.
35607 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
35608 ShuffleVectorSDNode::commuteMask(Mask);
35609 std::swap(Ops[0], Ops[1]);
35612 // Finally, try to combine into a single shuffle instruction.
35613 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
35614 AllowVariableMask, DAG, Subtarget);
35617 // If that failed and any input is extracted then try to combine as a
35618 // shuffle with the larger type.
35619 return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
35620 HasVariableMask, AllowVariableMask,
35624 /// Helper entry wrapper to combineX86ShufflesRecursively.
35625 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
35626 const X86Subtarget &Subtarget) {
35627 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
35628 /*HasVarMask*/ false,
35629 /*AllowVarMask*/ true, DAG, Subtarget);
35632 /// Get the PSHUF-style mask from PSHUF node.
35634 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
35635 /// PSHUF-style masks that can be reused with such instructions.
35636 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
35637 MVT VT = N.getSimpleValueType();
35638 SmallVector<int, 4> Mask;
35639 SmallVector<SDValue, 2> Ops;
35642 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
35646 // If we have more than 128-bits, only the low 128-bits of shuffle mask
35647 // matter. Check that the upper masks are repeats and remove them.
35648 if (VT.getSizeInBits() > 128) {
35649 int LaneElts = 128 / VT.getScalarSizeInBits();
35651 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
35652 for (int j = 0; j < LaneElts; ++j)
35653 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
35654 "Mask doesn't repeat in high 128-bit lanes!");
35656 Mask.resize(LaneElts);
35659 switch (N.getOpcode()) {
35660 case X86ISD::PSHUFD:
35662 case X86ISD::PSHUFLW:
35665 case X86ISD::PSHUFHW:
35666 Mask.erase(Mask.begin(), Mask.begin() + 4);
35667 for (int &M : Mask)
35671 llvm_unreachable("No valid shuffle instruction found!");
35675 /// Search for a combinable shuffle across a chain ending in pshufd.
35677 /// We walk up the chain and look for a combinable shuffle, skipping over
35678 /// shuffles that we could hoist this shuffle's transformation past without
35679 /// altering anything.
35681 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
35682 SelectionDAG &DAG) {
35683 assert(N.getOpcode() == X86ISD::PSHUFD &&
35684 "Called with something other than an x86 128-bit half shuffle!");
35687 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
35688 // of the shuffles in the chain so that we can form a fresh chain to replace
35690 SmallVector<SDValue, 8> Chain;
35691 SDValue V = N.getOperand(0);
35692 for (; V.hasOneUse(); V = V.getOperand(0)) {
35693 switch (V.getOpcode()) {
35695 return SDValue(); // Nothing combined!
35698 // Skip bitcasts as we always know the type for the target specific
35702 case X86ISD::PSHUFD:
35703 // Found another dword shuffle.
35706 case X86ISD::PSHUFLW:
35707 // Check that the low words (being shuffled) are the identity in the
35708 // dword shuffle, and the high words are self-contained.
35709 if (Mask[0] != 0 || Mask[1] != 1 ||
35710 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
35713 Chain.push_back(V);
35716 case X86ISD::PSHUFHW:
35717 // Check that the high words (being shuffled) are the identity in the
35718 // dword shuffle, and the low words are self-contained.
35719 if (Mask[2] != 2 || Mask[3] != 3 ||
35720 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
35723 Chain.push_back(V);
35726 case X86ISD::UNPCKL:
35727 case X86ISD::UNPCKH:
35728 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
35729 // shuffle into a preceding word shuffle.
35730 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
35731 V.getSimpleValueType().getVectorElementType() != MVT::i16)
35734 // Search for a half-shuffle which we can combine with.
35735 unsigned CombineOp =
35736 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
35737 if (V.getOperand(0) != V.getOperand(1) ||
35738 !V->isOnlyUserOf(V.getOperand(0).getNode()))
35740 Chain.push_back(V);
35741 V = V.getOperand(0);
35743 switch (V.getOpcode()) {
35745 return SDValue(); // Nothing to combine.
35747 case X86ISD::PSHUFLW:
35748 case X86ISD::PSHUFHW:
35749 if (V.getOpcode() == CombineOp)
35752 Chain.push_back(V);
35756 V = V.getOperand(0);
35760 } while (V.hasOneUse());
35763 // Break out of the loop if we break out of the switch.
35767 if (!V.hasOneUse())
35768 // We fell out of the loop without finding a viable combining instruction.
35771 // Merge this node's mask and our incoming mask.
35772 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
35773 for (int &M : Mask)
35775 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
35776 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
35778 // Rebuild the chain around this new shuffle.
35779 while (!Chain.empty()) {
35780 SDValue W = Chain.pop_back_val();
35782 if (V.getValueType() != W.getOperand(0).getValueType())
35783 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
35785 switch (W.getOpcode()) {
35787 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
35789 case X86ISD::UNPCKL:
35790 case X86ISD::UNPCKH:
35791 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
35794 case X86ISD::PSHUFD:
35795 case X86ISD::PSHUFLW:
35796 case X86ISD::PSHUFHW:
35797 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
35801 if (V.getValueType() != N.getValueType())
35802 V = DAG.getBitcast(N.getValueType(), V);
35804 // Return the new chain to replace N.
35808 // Attempt to commute shufps LHS loads:
35809 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
35810 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
35811 SelectionDAG &DAG) {
35812 // TODO: Add vXf64 support.
35813 if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
35816 // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
35817 auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
35818 if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
35820 SDValue N0 = V.getOperand(0);
35821 SDValue N1 = V.getOperand(1);
35822 unsigned Imm = V.getConstantOperandVal(2);
35823 if (!MayFoldLoad(peekThroughOneUseBitcasts(N0)) ||
35824 MayFoldLoad(peekThroughOneUseBitcasts(N1)))
35826 Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
35827 return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
35828 DAG.getTargetConstant(Imm, DL, MVT::i8));
35831 switch (N.getOpcode()) {
35832 case X86ISD::VPERMILPI:
35833 if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
35834 unsigned Imm = N.getConstantOperandVal(1);
35835 return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
35836 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
35839 case X86ISD::SHUFP: {
35840 SDValue N0 = N.getOperand(0);
35841 SDValue N1 = N.getOperand(1);
35842 unsigned Imm = N.getConstantOperandVal(2);
35844 if (SDValue NewSHUFP = commuteSHUFP(N, N0))
35845 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
35846 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
35847 } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
35848 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
35849 DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
35850 } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
35851 return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
35852 DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
35861 /// Try to combine x86 target specific shuffles.
35862 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
35863 TargetLowering::DAGCombinerInfo &DCI,
35864 const X86Subtarget &Subtarget) {
35866 MVT VT = N.getSimpleValueType();
35867 SmallVector<int, 4> Mask;
35868 unsigned Opcode = N.getOpcode();
35871 SmallVector<int, 64> TargetMask;
35872 SmallVector<SDValue, 2> TargetOps;
35873 if (isTargetShuffle(Opcode))
35874 getTargetShuffleMask(N.getNode(), VT, true, TargetOps, TargetMask, IsUnary);
35876 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
35877 // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
35878 // represents the LHS/RHS inputs for the lower/upper halves.
35879 SmallVector<int, 16> TargetMask128;
35880 if (!TargetMask.empty() && 0 < TargetOps.size() && TargetOps.size() <= 2 &&
35881 isRepeatedTargetShuffleMask(128, VT, TargetMask, TargetMask128)) {
35882 SmallVector<int, 16> WidenedMask128 = TargetMask128;
35883 while (WidenedMask128.size() > 2) {
35884 SmallVector<int, 16> WidenedMask;
35885 if (!canWidenShuffleElements(WidenedMask128, WidenedMask))
35887 WidenedMask128 = std::move(WidenedMask);
35889 if (WidenedMask128.size() == 2) {
35890 assert(isUndefOrZeroOrInRange(WidenedMask128, 0, 4) && "Illegal shuffle");
35891 SDValue BC0 = peekThroughBitcasts(TargetOps.front());
35892 SDValue BC1 = peekThroughBitcasts(TargetOps.back());
35893 EVT VT0 = BC0.getValueType();
35894 EVT VT1 = BC1.getValueType();
35895 unsigned Opcode0 = BC0.getOpcode();
35896 unsigned Opcode1 = BC1.getOpcode();
35897 bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
35898 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
35899 if (Opcode0 == Opcode1 && VT0 == VT1 &&
35900 (isHoriz || Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
35901 bool SingleOp = (TargetOps.size() == 1);
35902 if (!isHoriz || shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
35903 SDValue Lo = isInRange(WidenedMask128[0], 0, 2) ? BC0 : BC1;
35904 SDValue Hi = isInRange(WidenedMask128[1], 0, 2) ? BC0 : BC1;
35905 Lo = Lo.getOperand(WidenedMask128[0] & 1);
35906 Hi = Hi.getOperand(WidenedMask128[1] & 1);
35908 MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
35909 SDValue Undef = DAG.getUNDEF(SrcVT);
35910 SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
35911 Lo = (WidenedMask128[0] == SM_SentinelZero ? Zero : Lo);
35912 Hi = (WidenedMask128[1] == SM_SentinelZero ? Zero : Hi);
35913 Lo = (WidenedMask128[0] == SM_SentinelUndef ? Undef : Lo);
35914 Hi = (WidenedMask128[1] == SM_SentinelUndef ? Undef : Hi);
35916 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
35917 return DAG.getBitcast(VT, Horiz);
35923 if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
35926 // Canonicalize UNARYSHUFFLE(XOR(X,-1) -> XOR(UNARYSHUFFLE(X),-1) to
35927 // help expose the 'NOT' pattern further up the DAG.
35928 // TODO: This might be beneficial for any binop with a 'splattable' operand.
35930 case X86ISD::MOVDDUP:
35931 case X86ISD::PSHUFD: {
35932 SDValue Src = N.getOperand(0);
35933 if (Src.hasOneUse() && Src.getValueType() == VT) {
35934 if (SDValue Not = IsNOT(Src, DAG, /*OneUse*/ true)) {
35935 Not = DAG.getBitcast(VT, Not);
35936 Not = Opcode == X86ISD::MOVDDUP
35937 ? DAG.getNode(Opcode, DL, VT, Not)
35938 : DAG.getNode(Opcode, DL, VT, Not, N.getOperand(1));
35939 EVT IntVT = Not.getValueType().changeTypeToInteger();
35940 SDValue AllOnes = DAG.getConstant(-1, DL, IntVT);
35941 Not = DAG.getBitcast(IntVT, Not);
35942 Not = DAG.getNode(ISD::XOR, DL, IntVT, Not, AllOnes);
35943 return DAG.getBitcast(VT, Not);
35950 // Handle specific target shuffles.
35952 case X86ISD::MOVDDUP: {
35953 SDValue Src = N.getOperand(0);
35954 // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
35955 if (VT == MVT::v2f64 && Src.hasOneUse() &&
35956 ISD::isNormalLoad(Src.getNode())) {
35957 LoadSDNode *LN = cast<LoadSDNode>(Src);
35958 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
35959 SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
35960 DCI.CombineTo(N.getNode(), Movddup);
35961 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
35962 DCI.recursivelyDeleteUnusedNodes(LN);
35963 return N; // Return N so it doesn't get rechecked!
35969 case X86ISD::VBROADCAST: {
35970 SDValue Src = N.getOperand(0);
35971 SDValue BC = peekThroughBitcasts(Src);
35972 EVT SrcVT = Src.getValueType();
35973 EVT BCVT = BC.getValueType();
35975 // If broadcasting from another shuffle, attempt to simplify it.
35976 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
35977 if (isTargetShuffle(BC.getOpcode()) &&
35978 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
35979 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
35980 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
35982 for (unsigned i = 0; i != Scale; ++i)
35983 DemandedMask[i] = i;
35984 if (SDValue Res = combineX86ShufflesRecursively(
35985 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
35986 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
35987 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
35988 DAG.getBitcast(SrcVT, Res));
35991 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
35992 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
35993 if (Src.getOpcode() == ISD::BITCAST &&
35994 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
35995 DAG.getTargetLoweringInfo().isTypeLegal(BCVT)) {
35996 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
35997 VT.getVectorNumElements());
35998 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
36001 // Reduce broadcast source vector to lowest 128-bits.
36002 if (SrcVT.getSizeInBits() > 128)
36003 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
36004 extract128BitVector(Src, 0, DAG, DL));
36006 // broadcast(scalar_to_vector(x)) -> broadcast(x).
36007 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
36008 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
36010 // Share broadcast with the longest vector and extract low subvector (free).
36011 for (SDNode *User : Src->uses())
36012 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
36013 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
36014 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
36015 VT.getSizeInBits());
36018 // vbroadcast(scalarload X) -> vbroadcast_load X
36019 // For float loads, extract other uses of the scalar from the broadcast.
36020 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
36021 ISD::isNormalLoad(Src.getNode())) {
36022 LoadSDNode *LN = cast<LoadSDNode>(Src);
36023 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36024 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36026 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
36027 LN->getMemoryVT(), LN->getMemOperand());
36028 // If the load value is used only by N, replace it via CombineTo N.
36029 bool NoReplaceExtract = Src.hasOneUse();
36030 DCI.CombineTo(N.getNode(), BcastLd);
36031 if (NoReplaceExtract) {
36032 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36033 DCI.recursivelyDeleteUnusedNodes(LN);
36035 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
36036 DAG.getIntPtrConstant(0, DL));
36037 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
36039 return N; // Return N so it doesn't get rechecked!
36042 // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
36043 // i16. So shrink it ourselves if we can make a broadcast_load.
36044 if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
36045 Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
36046 assert(Subtarget.hasAVX2() && "Expected AVX2");
36047 SDValue TruncIn = Src.getOperand(0);
36049 // If this is a truncate of a non extending load we can just narrow it to
36050 // use a broadcast_load.
36051 if (ISD::isNormalLoad(TruncIn.getNode())) {
36052 LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
36053 // Unless its volatile or atomic.
36054 if (LN->isSimple()) {
36055 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36056 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36057 SDValue BcastLd = DAG.getMemIntrinsicNode(
36058 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
36059 LN->getPointerInfo(), LN->getOriginalAlign(),
36060 LN->getMemOperand()->getFlags());
36061 DCI.CombineTo(N.getNode(), BcastLd);
36062 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36063 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
36064 return N; // Return N so it doesn't get rechecked!
36068 // If this is a truncate of an i16 extload, we can directly replace it.
36069 if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
36070 ISD::isEXTLoad(Src.getOperand(0).getNode())) {
36071 LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
36072 if (LN->getMemoryVT().getSizeInBits() == 16) {
36073 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36074 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36076 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
36077 LN->getMemoryVT(), LN->getMemOperand());
36078 DCI.CombineTo(N.getNode(), BcastLd);
36079 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36080 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
36081 return N; // Return N so it doesn't get rechecked!
36085 // If this is a truncate of load that has been shifted right, we can
36086 // offset the pointer and use a narrower load.
36087 if (TruncIn.getOpcode() == ISD::SRL &&
36088 TruncIn.getOperand(0).hasOneUse() &&
36089 isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
36090 ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
36091 LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
36092 unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
36093 // Make sure the shift amount and the load size are divisible by 16.
36094 // Don't do this if the load is volatile or atomic.
36095 if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
36097 unsigned Offset = ShiftAmt / 8;
36098 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36099 SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(), Offset, DL);
36100 SDValue Ops[] = { LN->getChain(), Ptr };
36101 SDValue BcastLd = DAG.getMemIntrinsicNode(
36102 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
36103 LN->getPointerInfo().getWithOffset(Offset),
36104 LN->getOriginalAlign(),
36105 LN->getMemOperand()->getFlags());
36106 DCI.CombineTo(N.getNode(), BcastLd);
36107 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36108 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
36109 return N; // Return N so it doesn't get rechecked!
36114 // vbroadcast(vzload X) -> vbroadcast_load X
36115 if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
36116 MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
36117 if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
36118 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36119 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36121 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
36122 LN->getMemoryVT(), LN->getMemOperand());
36123 DCI.CombineTo(N.getNode(), BcastLd);
36124 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36125 DCI.recursivelyDeleteUnusedNodes(LN);
36126 return N; // Return N so it doesn't get rechecked!
36130 // vbroadcast(vector load X) -> vbroadcast_load
36131 if (SrcVT == MVT::v2f64 && Src.hasOneUse() &&
36132 ISD::isNormalLoad(Src.getNode())) {
36133 LoadSDNode *LN = cast<LoadSDNode>(Src);
36134 // Unless the load is volatile or atomic.
36135 if (LN->isSimple()) {
36136 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36137 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36138 SDValue BcastLd = DAG.getMemIntrinsicNode(
36139 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
36140 LN->getPointerInfo(), LN->getOriginalAlign(),
36141 LN->getMemOperand()->getFlags());
36142 DCI.CombineTo(N.getNode(), BcastLd);
36143 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36144 DCI.recursivelyDeleteUnusedNodes(LN);
36145 return N; // Return N so it doesn't get rechecked!
36151 case X86ISD::VZEXT_MOVL: {
36152 SDValue N0 = N.getOperand(0);
36154 // If this a vzmovl of a full vector load, replace it with a vzload, unless
36155 // the load is volatile.
36156 if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
36157 auto *LN = cast<LoadSDNode>(N0);
36158 if (SDValue VZLoad =
36159 narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
36160 DCI.CombineTo(N.getNode(), VZLoad);
36161 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
36162 DCI.recursivelyDeleteUnusedNodes(LN);
36167 // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
36168 // and can just use a VZEXT_LOAD.
36169 // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
36170 if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
36171 auto *LN = cast<MemSDNode>(N0);
36172 if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
36173 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36174 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
36176 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
36177 LN->getMemoryVT(), LN->getMemOperand());
36178 DCI.CombineTo(N.getNode(), VZLoad);
36179 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
36180 DCI.recursivelyDeleteUnusedNodes(LN);
36185 // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
36186 // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
36187 // if the upper bits of the i64 are zero.
36188 if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
36189 N0.getOperand(0).hasOneUse() &&
36190 N0.getOperand(0).getValueType() == MVT::i64) {
36191 SDValue In = N0.getOperand(0);
36192 APInt Mask = APInt::getHighBitsSet(64, 32);
36193 if (DAG.MaskedValueIsZero(In, Mask)) {
36194 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
36195 MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
36196 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
36197 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
36198 return DAG.getBitcast(VT, Movl);
36202 // Load a scalar integer constant directly to XMM instead of transferring an
36203 // immediate value from GPR.
36204 // vzext_movl (scalar_to_vector C) --> load [C,0...]
36205 if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
36206 if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
36207 // Create a vector constant - scalar constant followed by zeros.
36208 EVT ScalarVT = N0.getOperand(0).getValueType();
36209 Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
36210 unsigned NumElts = VT.getVectorNumElements();
36211 Constant *Zero = ConstantInt::getNullValue(ScalarTy);
36212 SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
36213 ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
36215 // Load the vector constant from constant pool.
36216 MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
36217 SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
36218 MachinePointerInfo MPI =
36219 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
36220 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
36221 return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
36222 MachineMemOperand::MOLoad);
36228 case X86ISD::BLENDI: {
36229 SDValue N0 = N.getOperand(0);
36230 SDValue N1 = N.getOperand(1);
36232 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
36233 // TODO: Handle MVT::v16i16 repeated blend mask.
36234 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
36235 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
36236 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
36237 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
36238 SrcVT.getScalarSizeInBits() >= 32) {
36239 unsigned BlendMask = N.getConstantOperandVal(2);
36240 unsigned Size = VT.getVectorNumElements();
36241 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
36242 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
36243 return DAG.getBitcast(
36244 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
36246 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
36251 case X86ISD::VPERMI: {
36252 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
36253 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
36254 SDValue N0 = N.getOperand(0);
36255 SDValue N1 = N.getOperand(1);
36256 unsigned EltSizeInBits = VT.getScalarSizeInBits();
36257 if (N0.getOpcode() == ISD::BITCAST &&
36258 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
36259 SDValue Src = N0.getOperand(0);
36260 EVT SrcVT = Src.getValueType();
36261 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
36262 return DAG.getBitcast(VT, Res);
36266 case X86ISD::VPERM2X128: {
36267 // If both 128-bit values were inserted into high halves of 256-bit values,
36268 // the shuffle can be reduced to a concatenation of subvectors:
36269 // vperm2x128 (ins ?, X, C1), (ins ?, Y, C2), 0x31 --> concat X, Y
36270 // Note: We are only looking for the exact high/high shuffle mask because we
36271 // expect to fold other similar patterns before creating this opcode.
36272 SDValue Ins0 = peekThroughBitcasts(N.getOperand(0));
36273 SDValue Ins1 = peekThroughBitcasts(N.getOperand(1));
36274 unsigned Imm = N.getConstantOperandVal(2);
36275 if (!(Imm == 0x31 &&
36276 Ins0.getOpcode() == ISD::INSERT_SUBVECTOR &&
36277 Ins1.getOpcode() == ISD::INSERT_SUBVECTOR &&
36278 Ins0.getValueType() == Ins1.getValueType()))
36281 SDValue X = Ins0.getOperand(1);
36282 SDValue Y = Ins1.getOperand(1);
36283 unsigned C1 = Ins0.getConstantOperandVal(2);
36284 unsigned C2 = Ins1.getConstantOperandVal(2);
36285 MVT SrcVT = X.getSimpleValueType();
36286 unsigned SrcElts = SrcVT.getVectorNumElements();
36287 if (SrcVT != Y.getSimpleValueType() || SrcVT.getSizeInBits() != 128 ||
36288 C1 != SrcElts || C2 != SrcElts)
36291 return DAG.getBitcast(VT, DAG.getNode(ISD::CONCAT_VECTORS, DL,
36292 Ins1.getValueType(), X, Y));
36294 case X86ISD::PSHUFD:
36295 case X86ISD::PSHUFLW:
36296 case X86ISD::PSHUFHW:
36297 Mask = getPSHUFShuffleMask(N);
36298 assert(Mask.size() == 4);
36300 case X86ISD::MOVSD:
36301 case X86ISD::MOVSS: {
36302 SDValue N0 = N.getOperand(0);
36303 SDValue N1 = N.getOperand(1);
36305 // Canonicalize scalar FPOps:
36306 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
36307 // If commutable, allow OP(N1[0], N0[0]).
36308 unsigned Opcode1 = N1.getOpcode();
36309 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
36310 Opcode1 == ISD::FDIV) {
36311 SDValue N10 = N1.getOperand(0);
36312 SDValue N11 = N1.getOperand(1);
36314 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
36316 std::swap(N10, N11);
36317 MVT SVT = VT.getVectorElementType();
36318 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
36319 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
36320 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
36321 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
36322 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
36323 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
36329 case X86ISD::INSERTPS: {
36330 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
36331 SDValue Op0 = N.getOperand(0);
36332 SDValue Op1 = N.getOperand(1);
36333 unsigned InsertPSMask = N.getConstantOperandVal(2);
36334 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
36335 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
36336 unsigned ZeroMask = InsertPSMask & 0xF;
36338 // If we zero out all elements from Op0 then we don't need to reference it.
36339 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
36340 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
36341 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36343 // If we zero out the element from Op1 then we don't need to reference it.
36344 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
36345 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
36346 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36348 // Attempt to merge insertps Op1 with an inner target shuffle node.
36349 SmallVector<int, 8> TargetMask1;
36350 SmallVector<SDValue, 2> Ops1;
36351 APInt KnownUndef1, KnownZero1;
36352 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
36354 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
36355 // Zero/UNDEF insertion - zero out element and remove dependency.
36356 InsertPSMask |= (1u << DstIdx);
36357 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
36358 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36360 // Update insertps mask srcidx and reference the source input directly.
36361 int M = TargetMask1[SrcIdx];
36362 assert(0 <= M && M < 8 && "Shuffle index out of range");
36363 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
36364 Op1 = Ops1[M < 4 ? 0 : 1];
36365 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
36366 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36369 // Attempt to merge insertps Op0 with an inner target shuffle node.
36370 SmallVector<int, 8> TargetMask0;
36371 SmallVector<SDValue, 2> Ops0;
36372 APInt KnownUndef0, KnownZero0;
36373 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
36375 bool Updated = false;
36376 bool UseInput00 = false;
36377 bool UseInput01 = false;
36378 for (int i = 0; i != 4; ++i) {
36379 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
36380 // No change if element is already zero or the inserted element.
36382 } else if (KnownUndef0[i] || KnownZero0[i]) {
36383 // If the target mask is undef/zero then we must zero the element.
36384 InsertPSMask |= (1u << i);
36389 // The input vector element must be inline.
36390 int M = TargetMask0[i];
36391 if (M != i && M != (i + 4))
36394 // Determine which inputs of the target shuffle we're using.
36395 UseInput00 |= (0 <= M && M < 4);
36396 UseInput01 |= (4 <= M);
36399 // If we're not using both inputs of the target shuffle then use the
36400 // referenced input directly.
36401 if (UseInput00 && !UseInput01) {
36404 } else if (!UseInput00 && UseInput01) {
36410 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
36411 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36414 // If we're inserting an element from a vbroadcast load, fold the
36415 // load into the X86insertps instruction. We need to convert the scalar
36416 // load to a vector and clear the source lane of the INSERTPS control.
36417 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
36418 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
36419 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
36420 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
36421 MemIntr->getBasePtr(),
36422 MemIntr->getMemOperand());
36423 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
36424 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
36426 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
36427 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
36438 // Nuke no-op shuffles that show up after combining.
36439 if (isNoopShuffleMask(Mask))
36440 return N.getOperand(0);
36442 // Look for simplifications involving one or two shuffle instructions.
36443 SDValue V = N.getOperand(0);
36444 switch (N.getOpcode()) {
36447 case X86ISD::PSHUFLW:
36448 case X86ISD::PSHUFHW:
36449 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
36451 // See if this reduces to a PSHUFD which is no more expensive and can
36452 // combine with more operations. Note that it has to at least flip the
36453 // dwords as otherwise it would have been removed as a no-op.
36454 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
36455 int DMask[] = {0, 1, 2, 3};
36456 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
36457 DMask[DOffset + 0] = DOffset + 1;
36458 DMask[DOffset + 1] = DOffset + 0;
36459 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
36460 V = DAG.getBitcast(DVT, V);
36461 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
36462 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
36463 return DAG.getBitcast(VT, V);
36466 // Look for shuffle patterns which can be implemented as a single unpack.
36467 // FIXME: This doesn't handle the location of the PSHUFD generically, and
36468 // only works when we have a PSHUFD followed by two half-shuffles.
36469 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
36470 (V.getOpcode() == X86ISD::PSHUFLW ||
36471 V.getOpcode() == X86ISD::PSHUFHW) &&
36472 V.getOpcode() != N.getOpcode() &&
36473 V.hasOneUse() && V.getOperand(0).hasOneUse()) {
36474 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
36475 if (D.getOpcode() == X86ISD::PSHUFD) {
36476 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
36477 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
36478 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
36479 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
36481 for (int i = 0; i < 4; ++i) {
36482 WordMask[i + NOffset] = Mask[i] + NOffset;
36483 WordMask[i + VOffset] = VMask[i] + VOffset;
36485 // Map the word mask through the DWord mask.
36487 for (int i = 0; i < 8; ++i)
36488 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
36489 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
36490 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
36491 // We can replace all three shuffles with an unpack.
36492 V = DAG.getBitcast(VT, D.getOperand(0));
36493 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
36502 case X86ISD::PSHUFD:
36503 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
36512 /// Checks if the shuffle mask takes subsequent elements
36513 /// alternately from two vectors.
36514 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
36515 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
36517 int ParitySrc[2] = {-1, -1};
36518 unsigned Size = Mask.size();
36519 for (unsigned i = 0; i != Size; ++i) {
36524 // Make sure we are using the matching element from the input.
36525 if ((M % Size) != i)
36528 // Make sure we use the same input for all elements of the same parity.
36529 int Src = M / Size;
36530 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
36532 ParitySrc[i % 2] = Src;
36535 // Make sure each input is used.
36536 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
36539 Op0Even = ParitySrc[0] == 0;
36543 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
36544 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
36545 /// are written to the parameters \p Opnd0 and \p Opnd1.
36547 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
36548 /// so it is easier to generically match. We also insert dummy vector shuffle
36549 /// nodes for the operands which explicitly discard the lanes which are unused
36550 /// by this operation to try to flow through the rest of the combiner
36551 /// the fact that they're unused.
36552 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
36553 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
36556 EVT VT = N->getValueType(0);
36557 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36558 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
36559 !VT.getSimpleVT().isFloatingPoint())
36562 // We only handle target-independent shuffles.
36563 // FIXME: It would be easy and harmless to use the target shuffle mask
36564 // extraction tool to support more.
36565 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
36568 SDValue V1 = N->getOperand(0);
36569 SDValue V2 = N->getOperand(1);
36571 // Make sure we have an FADD and an FSUB.
36572 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
36573 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
36574 V1.getOpcode() == V2.getOpcode())
36577 // If there are other uses of these operations we can't fold them.
36578 if (!V1->hasOneUse() || !V2->hasOneUse())
36581 // Ensure that both operations have the same operands. Note that we can
36582 // commute the FADD operands.
36584 if (V1.getOpcode() == ISD::FSUB) {
36585 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
36586 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
36587 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
36590 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
36591 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
36592 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
36593 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
36597 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
36599 if (!isAddSubOrSubAddMask(Mask, Op0Even))
36602 // It's a subadd if the vector in the even parity is an FADD.
36603 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
36604 : V2->getOpcode() == ISD::FADD;
36611 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
36612 static SDValue combineShuffleToFMAddSub(SDNode *N,
36613 const X86Subtarget &Subtarget,
36614 SelectionDAG &DAG) {
36615 // We only handle target-independent shuffles.
36616 // FIXME: It would be easy and harmless to use the target shuffle mask
36617 // extraction tool to support more.
36618 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
36621 MVT VT = N->getSimpleValueType(0);
36622 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36623 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
36626 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
36627 SDValue Op0 = N->getOperand(0);
36628 SDValue Op1 = N->getOperand(1);
36629 SDValue FMAdd = Op0, FMSub = Op1;
36630 if (FMSub.getOpcode() != X86ISD::FMSUB)
36631 std::swap(FMAdd, FMSub);
36633 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
36634 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
36635 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
36636 FMAdd.getOperand(2) != FMSub.getOperand(2))
36639 // Check for correct shuffle mask.
36640 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
36642 if (!isAddSubOrSubAddMask(Mask, Op0Even))
36645 // FMAddSub takes zeroth operand from FMSub node.
36647 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
36648 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
36649 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
36650 FMAdd.getOperand(2));
36653 /// Try to combine a shuffle into a target-specific add-sub or
36654 /// mul-add-sub node.
36655 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
36656 const X86Subtarget &Subtarget,
36657 SelectionDAG &DAG) {
36658 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
36661 SDValue Opnd0, Opnd1;
36663 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
36666 MVT VT = N->getSimpleValueType(0);
36669 // Try to generate X86ISD::FMADDSUB node here.
36671 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
36672 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
36673 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
36679 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
36680 // the ADDSUB idiom has been successfully recognized. There are no known
36681 // X86 targets with 512-bit ADDSUB instructions!
36682 if (VT.is512BitVector())
36685 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
36688 // We are looking for a shuffle where both sources are concatenated with undef
36689 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
36690 // if we can express this as a single-source shuffle, that's preferable.
36691 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
36692 const X86Subtarget &Subtarget) {
36693 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
36696 EVT VT = N->getValueType(0);
36698 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
36699 if (!VT.is128BitVector() && !VT.is256BitVector())
36702 if (VT.getVectorElementType() != MVT::i32 &&
36703 VT.getVectorElementType() != MVT::i64 &&
36704 VT.getVectorElementType() != MVT::f32 &&
36705 VT.getVectorElementType() != MVT::f64)
36708 SDValue N0 = N->getOperand(0);
36709 SDValue N1 = N->getOperand(1);
36711 // Check that both sources are concats with undef.
36712 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
36713 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
36714 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
36715 !N1.getOperand(1).isUndef())
36718 // Construct the new shuffle mask. Elements from the first source retain their
36719 // index, but elements from the second source no longer need to skip an undef.
36720 SmallVector<int, 8> Mask;
36721 int NumElts = VT.getVectorNumElements();
36723 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
36724 for (int Elt : SVOp->getMask())
36725 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
36728 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
36730 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
36733 /// Eliminate a redundant shuffle of a horizontal math op.
36734 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
36735 unsigned Opcode = N->getOpcode();
36736 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
36737 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
36740 // For a broadcast, peek through an extract element of index 0 to find the
36741 // horizontal op: broadcast (ext_vec_elt HOp, 0)
36742 EVT VT = N->getValueType(0);
36743 if (Opcode == X86ISD::VBROADCAST) {
36744 SDValue SrcOp = N->getOperand(0);
36745 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
36746 SrcOp.getValueType() == MVT::f64 &&
36747 SrcOp.getOperand(0).getValueType() == VT &&
36748 isNullConstant(SrcOp.getOperand(1)))
36749 N = SrcOp.getNode();
36752 SDValue HOp = N->getOperand(0);
36753 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
36754 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
36757 // 128-bit horizontal math instructions are defined to operate on adjacent
36758 // lanes of each operand as:
36759 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
36760 // ...similarly for v2f64 and v8i16.
36761 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
36762 HOp.getOperand(0) != HOp.getOperand(1))
36765 // The shuffle that we are eliminating may have allowed the horizontal op to
36766 // have an undemanded (undefined) operand. Duplicate the other (defined)
36767 // operand to ensure that the results are defined across all lanes without the
36769 auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
36771 if (HorizOp.getOperand(0).isUndef()) {
36772 assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
36773 X = HorizOp.getOperand(1);
36774 } else if (HorizOp.getOperand(1).isUndef()) {
36775 assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
36776 X = HorizOp.getOperand(0);
36780 return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
36781 HorizOp.getValueType(), X, X);
36784 // When the operands of a horizontal math op are identical, the low half of
36785 // the result is the same as the high half. If a target shuffle is also
36786 // replicating low and high halves (and without changing the type/length of
36787 // the vector), we don't need the shuffle.
36788 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
36789 if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
36790 // movddup (hadd X, X) --> hadd X, X
36791 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
36792 assert((HOp.getValueType() == MVT::v2f64 ||
36793 HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
36794 return updateHOp(HOp, DAG);
36799 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
36800 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
36801 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
36802 // but this should be tied to whatever horizontal op matching and shuffle
36803 // canonicalization are producing.
36804 if (HOp.getValueSizeInBits() == 128 &&
36805 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
36806 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
36807 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
36808 return updateHOp(HOp, DAG);
36810 if (HOp.getValueSizeInBits() == 256 &&
36811 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
36812 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
36813 isTargetShuffleEquivalent(
36814 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
36815 return updateHOp(HOp, DAG);
36820 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
36821 /// low half of each source vector and does not set any high half elements in
36822 /// the destination vector, narrow the shuffle to half its original size.
36823 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
36824 if (!Shuf->getValueType(0).isSimple())
36826 MVT VT = Shuf->getSimpleValueType(0);
36827 if (!VT.is256BitVector() && !VT.is512BitVector())
36830 // See if we can ignore all of the high elements of the shuffle.
36831 ArrayRef<int> Mask = Shuf->getMask();
36832 if (!isUndefUpperHalf(Mask))
36835 // Check if the shuffle mask accesses only the low half of each input vector
36836 // (half-index output is 0 or 2).
36837 int HalfIdx1, HalfIdx2;
36838 SmallVector<int, 8> HalfMask(Mask.size() / 2);
36839 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
36840 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
36843 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
36844 // The trick is knowing that all of the insert/extract are actually free
36845 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
36846 // of narrow inputs into a narrow output, and that is always cheaper than
36847 // the wide shuffle that we started with.
36848 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
36849 Shuf->getOperand(1), HalfMask, HalfIdx1,
36850 HalfIdx2, false, DAG, /*UseConcat*/true);
36853 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
36854 TargetLowering::DAGCombinerInfo &DCI,
36855 const X86Subtarget &Subtarget) {
36856 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
36857 if (SDValue V = narrowShuffle(Shuf, DAG))
36860 // If we have legalized the vector types, look for blends of FADD and FSUB
36861 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
36863 EVT VT = N->getValueType(0);
36864 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36865 if (TLI.isTypeLegal(VT)) {
36866 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
36869 if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
36873 // Attempt to combine into a vector load/broadcast.
36874 if (SDValue LD = combineToConsecutiveLoads(VT, SDValue(N, 0), dl, DAG,
36878 // For AVX2, we sometimes want to combine
36879 // (vector_shuffle <mask> (concat_vectors t1, undef)
36880 // (concat_vectors t2, undef))
36882 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
36883 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
36884 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
36887 if (isTargetShuffle(N->getOpcode())) {
36889 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
36892 // Try recursively combining arbitrary sequences of x86 shuffle
36893 // instructions into higher-order shuffles. We do this after combining
36894 // specific PSHUF instruction sequences into their minimal form so that we
36895 // can evaluate how many specialized shuffle instructions are involved in
36896 // a particular chain.
36897 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
36900 // Simplify source operands based on shuffle mask.
36901 // TODO - merge this into combineX86ShufflesRecursively.
36902 APInt KnownUndef, KnownZero;
36903 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
36904 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
36905 return SDValue(N, 0);
36908 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
36909 // insert into a zero vector. This helps get VZEXT_MOVL closer to
36910 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
36911 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
36912 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
36913 N->getOperand(0).hasOneUse()) {
36914 SDValue V = peekThroughOneUseBitcasts(N->getOperand(0));
36916 if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
36917 V.getOperand(0).isUndef() && isNullConstant(V.getOperand(2))) {
36918 SDValue In = V.getOperand(1);
36920 MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
36921 In.getValueSizeInBits() / VT.getScalarSizeInBits());
36922 In = DAG.getBitcast(SubVT, In);
36923 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, SubVT, In);
36924 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
36925 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
36926 Movl, V.getOperand(2));
36933 // Simplify variable target shuffle masks based on the demanded elements.
36934 // TODO: Handle DemandedBits in mask indices as well?
36935 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
36936 SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
36937 TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
36938 // If we're demanding all elements don't bother trying to simplify the mask.
36939 unsigned NumElts = DemandedElts.getBitWidth();
36940 if (DemandedElts.isAllOnesValue())
36943 SDValue Mask = Op.getOperand(MaskIndex);
36944 if (!Mask.hasOneUse())
36947 // Attempt to generically simplify the variable shuffle mask.
36948 APInt MaskUndef, MaskZero;
36949 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
36953 // Attempt to extract+simplify a (constant pool load) shuffle mask.
36954 // TODO: Support other types from getTargetShuffleMaskIndices?
36955 SDValue BC = peekThroughOneUseBitcasts(Mask);
36956 EVT BCVT = BC.getValueType();
36957 auto *Load = dyn_cast<LoadSDNode>(BC);
36961 const Constant *C = getTargetConstantFromNode(Load);
36965 Type *CTy = C->getType();
36966 if (!CTy->isVectorTy() ||
36967 CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
36970 // Handle scaling for i64 elements on 32-bit targets.
36971 unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
36972 if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
36974 unsigned Scale = NumCstElts / NumElts;
36976 // Simplify mask if we have an undemanded element that is not undef.
36977 bool Simplified = false;
36978 SmallVector<Constant *, 32> ConstVecOps;
36979 for (unsigned i = 0; i != NumCstElts; ++i) {
36980 Constant *Elt = C->getAggregateElement(i);
36981 if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
36982 ConstVecOps.push_back(UndefValue::get(Elt->getType()));
36986 ConstVecOps.push_back(Elt);
36991 // Generate new constant pool entry + legalize immediately for the load.
36993 SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
36994 SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
36995 SDValue NewMask = TLO.DAG.getLoad(
36996 BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
36997 MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
36999 return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
37002 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
37003 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
37004 TargetLoweringOpt &TLO, unsigned Depth) const {
37005 int NumElts = DemandedElts.getBitWidth();
37006 unsigned Opc = Op.getOpcode();
37007 EVT VT = Op.getValueType();
37009 // Handle special case opcodes.
37011 case X86ISD::PMULDQ:
37012 case X86ISD::PMULUDQ: {
37013 APInt LHSUndef, LHSZero;
37014 APInt RHSUndef, RHSZero;
37015 SDValue LHS = Op.getOperand(0);
37016 SDValue RHS = Op.getOperand(1);
37017 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
37020 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
37023 // Multiply by zero.
37024 KnownZero = LHSZero | RHSZero;
37029 case X86ISD::VSRA: {
37030 // We only need the bottom 64-bits of the (128-bit) shift amount.
37031 SDValue Amt = Op.getOperand(1);
37032 MVT AmtVT = Amt.getSimpleValueType();
37033 assert(AmtVT.is128BitVector() && "Unexpected value type");
37035 // If we reuse the shift amount just for sse shift amounts then we know that
37036 // only the bottom 64-bits are only ever used.
37037 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
37038 unsigned UseOpc = Use->getOpcode();
37039 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
37040 UseOpc == X86ISD::VSRA) &&
37041 Use->getOperand(0) != Amt;
37044 APInt AmtUndef, AmtZero;
37045 unsigned NumAmtElts = AmtVT.getVectorNumElements();
37046 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
37047 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
37048 Depth + 1, AssumeSingleUse))
37052 case X86ISD::VSHLI:
37053 case X86ISD::VSRLI:
37054 case X86ISD::VSRAI: {
37055 SDValue Src = Op.getOperand(0);
37057 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
37060 // TODO convert SrcUndef to KnownUndef.
37063 case X86ISD::KSHIFTL: {
37064 SDValue Src = Op.getOperand(0);
37065 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
37066 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
37067 unsigned ShiftAmt = Amt->getZExtValue();
37070 return TLO.CombineTo(Op, Src);
37072 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
37073 // single shift. We can do this if the bottom bits (which are shifted
37074 // out) are never demanded.
37075 if (Src.getOpcode() == X86ISD::KSHIFTR) {
37076 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
37077 unsigned C1 = Src.getConstantOperandVal(1);
37078 unsigned NewOpc = X86ISD::KSHIFTL;
37079 int Diff = ShiftAmt - C1;
37082 NewOpc = X86ISD::KSHIFTR;
37086 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
37087 return TLO.CombineTo(
37088 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
37092 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
37093 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
37097 KnownUndef <<= ShiftAmt;
37098 KnownZero <<= ShiftAmt;
37099 KnownZero.setLowBits(ShiftAmt);
37102 case X86ISD::KSHIFTR: {
37103 SDValue Src = Op.getOperand(0);
37104 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
37105 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
37106 unsigned ShiftAmt = Amt->getZExtValue();
37109 return TLO.CombineTo(Op, Src);
37111 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
37112 // single shift. We can do this if the top bits (which are shifted
37113 // out) are never demanded.
37114 if (Src.getOpcode() == X86ISD::KSHIFTL) {
37115 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
37116 unsigned C1 = Src.getConstantOperandVal(1);
37117 unsigned NewOpc = X86ISD::KSHIFTR;
37118 int Diff = ShiftAmt - C1;
37121 NewOpc = X86ISD::KSHIFTL;
37125 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
37126 return TLO.CombineTo(
37127 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
37131 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
37132 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
37136 KnownUndef.lshrInPlace(ShiftAmt);
37137 KnownZero.lshrInPlace(ShiftAmt);
37138 KnownZero.setHighBits(ShiftAmt);
37141 case X86ISD::CVTSI2P:
37142 case X86ISD::CVTUI2P: {
37143 SDValue Src = Op.getOperand(0);
37144 MVT SrcVT = Src.getSimpleValueType();
37145 APInt SrcUndef, SrcZero;
37146 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
37147 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
37152 case X86ISD::PACKSS:
37153 case X86ISD::PACKUS: {
37154 SDValue N0 = Op.getOperand(0);
37155 SDValue N1 = Op.getOperand(1);
37157 APInt DemandedLHS, DemandedRHS;
37158 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
37160 APInt SrcUndef, SrcZero;
37161 if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
37164 if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
37168 // Aggressively peek through ops to get at the demanded elts.
37169 // TODO - we should do this for all target/faux shuffles ops.
37170 if (!DemandedElts.isAllOnesValue()) {
37171 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
37172 TLO.DAG, Depth + 1);
37173 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
37174 TLO.DAG, Depth + 1);
37175 if (NewN0 || NewN1) {
37176 NewN0 = NewN0 ? NewN0 : N0;
37177 NewN1 = NewN1 ? NewN1 : N1;
37178 return TLO.CombineTo(Op,
37179 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
37186 case X86ISD::FHADD:
37187 case X86ISD::FHSUB: {
37188 APInt DemandedLHS, DemandedRHS;
37189 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
37191 APInt LHSUndef, LHSZero;
37192 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
37193 LHSZero, TLO, Depth + 1))
37195 APInt RHSUndef, RHSZero;
37196 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
37197 RHSZero, TLO, Depth + 1))
37201 case X86ISD::VTRUNC:
37202 case X86ISD::VTRUNCS:
37203 case X86ISD::VTRUNCUS: {
37204 SDValue Src = Op.getOperand(0);
37205 MVT SrcVT = Src.getSimpleValueType();
37206 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
37207 APInt SrcUndef, SrcZero;
37208 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
37211 KnownZero = SrcZero.zextOrTrunc(NumElts);
37212 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
37215 case X86ISD::BLENDV: {
37216 APInt SelUndef, SelZero;
37217 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
37218 SelZero, TLO, Depth + 1))
37221 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
37222 APInt LHSUndef, LHSZero;
37223 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
37224 LHSZero, TLO, Depth + 1))
37227 APInt RHSUndef, RHSZero;
37228 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
37229 RHSZero, TLO, Depth + 1))
37232 KnownZero = LHSZero & RHSZero;
37233 KnownUndef = LHSUndef & RHSUndef;
37236 case X86ISD::VZEXT_MOVL: {
37237 // If upper demanded elements are already zero then we have nothing to do.
37238 SDValue Src = Op.getOperand(0);
37239 APInt DemandedUpperElts = DemandedElts;
37240 DemandedUpperElts.clearLowBits(1);
37241 if (TLO.DAG.computeKnownBits(Src, DemandedUpperElts, Depth + 1).isZero())
37242 return TLO.CombineTo(Op, Src);
37245 case X86ISD::VBROADCAST: {
37246 SDValue Src = Op.getOperand(0);
37247 MVT SrcVT = Src.getSimpleValueType();
37248 if (!SrcVT.isVector())
37250 // Don't bother broadcasting if we just need the 0'th element.
37251 if (DemandedElts == 1) {
37252 if (Src.getValueType() != VT)
37253 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
37255 return TLO.CombineTo(Op, Src);
37257 APInt SrcUndef, SrcZero;
37258 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
37259 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
37262 // Aggressively peek through src to get at the demanded elt.
37263 // TODO - we should do this for all target/faux shuffles ops.
37264 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
37265 Src, SrcElts, TLO.DAG, Depth + 1))
37266 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
37269 case X86ISD::VPERMV:
37270 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
37274 case X86ISD::PSHUFB:
37275 case X86ISD::VPERMV3:
37276 case X86ISD::VPERMILPV:
37277 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
37281 case X86ISD::VPPERM:
37282 case X86ISD::VPERMIL2:
37283 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
37289 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
37290 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
37291 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
37292 if ((VT.is256BitVector() || VT.is512BitVector()) &&
37293 DemandedElts.lshr(NumElts / 2) == 0) {
37294 unsigned SizeInBits = VT.getSizeInBits();
37295 unsigned ExtSizeInBits = SizeInBits / 2;
37297 // See if 512-bit ops only use the bottom 128-bits.
37298 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
37299 ExtSizeInBits = SizeInBits / 4;
37302 // Subvector broadcast.
37303 case X86ISD::SUBV_BROADCAST: {
37305 SDValue Src = Op.getOperand(0);
37306 if (Src.getValueSizeInBits() > ExtSizeInBits)
37307 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
37308 else if (Src.getValueSizeInBits() < ExtSizeInBits) {
37309 MVT SrcSVT = Src.getSimpleValueType().getScalarType();
37311 MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
37312 Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
37314 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
37315 TLO.DAG, DL, ExtSizeInBits));
37317 // Byte shifts by immediate.
37318 case X86ISD::VSHLDQ:
37319 case X86ISD::VSRLDQ:
37320 // Shift by uniform.
37324 // Shift by immediate.
37325 case X86ISD::VSHLI:
37326 case X86ISD::VSRLI:
37327 case X86ISD::VSRAI: {
37330 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
37332 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
37333 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
37335 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
37336 return TLO.CombineTo(Op, Insert);
37338 case X86ISD::VPERMI: {
37339 // Simplify PERMPD/PERMQ to extract_subvector.
37340 // TODO: This should be done in shuffle combining.
37341 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
37342 SmallVector<int, 4> Mask;
37343 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
37344 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
37346 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
37347 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
37348 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
37349 return TLO.CombineTo(Op, Insert);
37354 // Zero upper elements.
37355 case X86ISD::VZEXT_MOVL:
37356 // Target unary shuffles by immediate:
37357 case X86ISD::PSHUFD:
37358 case X86ISD::PSHUFLW:
37359 case X86ISD::PSHUFHW:
37360 case X86ISD::VPERMILPI:
37361 // (Non-Lane Crossing) Target Shuffles.
37362 case X86ISD::VPERMILPV:
37363 case X86ISD::VPERMIL2:
37364 case X86ISD::PSHUFB:
37365 case X86ISD::UNPCKL:
37366 case X86ISD::UNPCKH:
37367 case X86ISD::BLENDI:
37368 // Saturated Packs.
37369 case X86ISD::PACKSS:
37370 case X86ISD::PACKUS:
37374 case X86ISD::FHADD:
37375 case X86ISD::FHSUB: {
37377 SmallVector<SDValue, 4> Ops;
37378 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
37379 SDValue SrcOp = Op.getOperand(i);
37380 EVT SrcVT = SrcOp.getValueType();
37381 assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
37382 "Unsupported vector size");
37383 Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
37387 MVT ExtVT = VT.getSimpleVT();
37388 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
37389 ExtSizeInBits / ExtVT.getScalarSizeInBits());
37390 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
37391 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
37393 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
37394 return TLO.CombineTo(Op, Insert);
37399 // Get target/faux shuffle mask.
37400 APInt OpUndef, OpZero;
37401 SmallVector<int, 64> OpMask;
37402 SmallVector<SDValue, 2> OpInputs;
37403 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
37404 OpZero, TLO.DAG, Depth, false))
37407 // Shuffle inputs must be the same size as the result.
37408 if (OpMask.size() != (unsigned)NumElts ||
37409 llvm::any_of(OpInputs, [VT](SDValue V) {
37410 return VT.getSizeInBits() != V.getValueSizeInBits() ||
37411 !V.getValueType().isVector();
37415 KnownZero = OpZero;
37416 KnownUndef = OpUndef;
37418 // Check if shuffle mask can be simplified to undef/zero/identity.
37419 int NumSrcs = OpInputs.size();
37420 for (int i = 0; i != NumElts; ++i)
37421 if (!DemandedElts[i])
37422 OpMask[i] = SM_SentinelUndef;
37424 if (isUndefInRange(OpMask, 0, NumElts)) {
37425 KnownUndef.setAllBits();
37426 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
37428 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
37429 KnownZero.setAllBits();
37430 return TLO.CombineTo(
37431 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
37433 for (int Src = 0; Src != NumSrcs; ++Src)
37434 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
37435 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
37437 // Attempt to simplify inputs.
37438 for (int Src = 0; Src != NumSrcs; ++Src) {
37439 // TODO: Support inputs of different types.
37440 if (OpInputs[Src].getValueType() != VT)
37443 int Lo = Src * NumElts;
37444 APInt SrcElts = APInt::getNullValue(NumElts);
37445 for (int i = 0; i != NumElts; ++i)
37446 if (DemandedElts[i]) {
37447 int M = OpMask[i] - Lo;
37448 if (0 <= M && M < NumElts)
37452 // TODO - Propagate input undef/zero elts.
37453 APInt SrcUndef, SrcZero;
37454 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
37459 // If we don't demand all elements, then attempt to combine to a simpler
37461 // TODO: Handle other depths, but first we need to handle the fact that
37462 // it might combine to the same shuffle.
37463 if (!DemandedElts.isAllOnesValue() && Depth == 0) {
37464 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
37465 for (int i = 0; i != NumElts; ++i)
37466 if (DemandedElts[i])
37467 DemandedMask[i] = i;
37469 SDValue NewShuffle = combineX86ShufflesRecursively(
37470 {Op}, 0, Op, DemandedMask, {}, Depth, /*HasVarMask*/ false,
37471 /*AllowVarMask*/ true, TLO.DAG, Subtarget);
37473 return TLO.CombineTo(Op, NewShuffle);
37479 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
37480 SDValue Op, const APInt &OriginalDemandedBits,
37481 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
37482 unsigned Depth) const {
37483 EVT VT = Op.getValueType();
37484 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
37485 unsigned Opc = Op.getOpcode();
37487 case X86ISD::VTRUNC: {
37489 SDValue Src = Op.getOperand(0);
37490 MVT SrcVT = Src.getSimpleValueType();
37492 // Simplify the input, using demanded bit information.
37493 APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
37494 APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
37495 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
37499 case X86ISD::PMULDQ:
37500 case X86ISD::PMULUDQ: {
37501 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
37503 SDValue LHS = Op.getOperand(0);
37504 SDValue RHS = Op.getOperand(1);
37505 // FIXME: Can we bound this better?
37506 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
37507 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
37510 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
37514 // Aggressively peek through ops to get at the demanded low bits.
37515 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
37516 LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
37517 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
37518 RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
37519 if (DemandedLHS || DemandedRHS) {
37520 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
37521 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
37522 return TLO.CombineTo(
37523 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
37527 case X86ISD::VSHLI: {
37528 SDValue Op0 = Op.getOperand(0);
37530 unsigned ShAmt = Op.getConstantOperandVal(1);
37531 if (ShAmt >= BitWidth)
37534 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
37536 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
37537 // single shift. We can do this if the bottom bits (which are shifted
37538 // out) are never demanded.
37539 if (Op0.getOpcode() == X86ISD::VSRLI &&
37540 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
37541 unsigned Shift2Amt = Op0.getConstantOperandVal(1);
37542 if (Shift2Amt < BitWidth) {
37543 int Diff = ShAmt - Shift2Amt;
37545 return TLO.CombineTo(Op, Op0.getOperand(0));
37547 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
37548 SDValue NewShift = TLO.DAG.getNode(
37549 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
37550 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
37551 return TLO.CombineTo(Op, NewShift);
37555 // If we are only demanding sign bits then we can use the shift source directly.
37556 unsigned NumSignBits =
37557 TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
37558 unsigned UpperDemandedBits =
37559 BitWidth - OriginalDemandedBits.countTrailingZeros();
37560 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
37561 return TLO.CombineTo(Op, Op0);
37563 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
37567 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
37568 Known.Zero <<= ShAmt;
37569 Known.One <<= ShAmt;
37571 // Low bits known zero.
37572 Known.Zero.setLowBits(ShAmt);
37575 case X86ISD::VSRLI: {
37576 unsigned ShAmt = Op.getConstantOperandVal(1);
37577 if (ShAmt >= BitWidth)
37580 APInt DemandedMask = OriginalDemandedBits << ShAmt;
37582 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
37583 OriginalDemandedElts, Known, TLO, Depth + 1))
37586 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
37587 Known.Zero.lshrInPlace(ShAmt);
37588 Known.One.lshrInPlace(ShAmt);
37590 // High bits known zero.
37591 Known.Zero.setHighBits(ShAmt);
37594 case X86ISD::VSRAI: {
37595 SDValue Op0 = Op.getOperand(0);
37596 SDValue Op1 = Op.getOperand(1);
37598 unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
37599 if (ShAmt >= BitWidth)
37602 APInt DemandedMask = OriginalDemandedBits << ShAmt;
37604 // If we just want the sign bit then we don't need to shift it.
37605 if (OriginalDemandedBits.isSignMask())
37606 return TLO.CombineTo(Op, Op0);
37608 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
37609 if (Op0.getOpcode() == X86ISD::VSHLI &&
37610 Op.getOperand(1) == Op0.getOperand(1)) {
37611 SDValue Op00 = Op0.getOperand(0);
37612 unsigned NumSignBits =
37613 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
37614 if (ShAmt < NumSignBits)
37615 return TLO.CombineTo(Op, Op00);
37618 // If any of the demanded bits are produced by the sign extension, we also
37619 // demand the input sign bit.
37620 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
37621 DemandedMask.setSignBit();
37623 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
37627 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
37628 Known.Zero.lshrInPlace(ShAmt);
37629 Known.One.lshrInPlace(ShAmt);
37631 // If the input sign bit is known to be zero, or if none of the top bits
37632 // are demanded, turn this into an unsigned shift right.
37633 if (Known.Zero[BitWidth - ShAmt - 1] ||
37634 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
37635 return TLO.CombineTo(
37636 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
37638 // High bits are known one.
37639 if (Known.One[BitWidth - ShAmt - 1])
37640 Known.One.setHighBits(ShAmt);
37643 case X86ISD::PEXTRB:
37644 case X86ISD::PEXTRW: {
37645 SDValue Vec = Op.getOperand(0);
37646 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
37647 MVT VecVT = Vec.getSimpleValueType();
37648 unsigned NumVecElts = VecVT.getVectorNumElements();
37650 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
37651 unsigned Idx = CIdx->getZExtValue();
37652 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
37654 // If we demand no bits from the vector then we must have demanded
37655 // bits from the implict zext - simplify to zero.
37656 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
37657 if (DemandedVecBits == 0)
37658 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
37660 APInt KnownUndef, KnownZero;
37661 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
37662 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
37663 KnownZero, TLO, Depth + 1))
37666 KnownBits KnownVec;
37667 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
37668 KnownVec, TLO, Depth + 1))
37671 if (SDValue V = SimplifyMultipleUseDemandedBits(
37672 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
37673 return TLO.CombineTo(
37674 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
37676 Known = KnownVec.zext(BitWidth);
37681 case X86ISD::PINSRB:
37682 case X86ISD::PINSRW: {
37683 SDValue Vec = Op.getOperand(0);
37684 SDValue Scl = Op.getOperand(1);
37685 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
37686 MVT VecVT = Vec.getSimpleValueType();
37688 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
37689 unsigned Idx = CIdx->getZExtValue();
37690 if (!OriginalDemandedElts[Idx])
37691 return TLO.CombineTo(Op, Vec);
37693 KnownBits KnownVec;
37694 APInt DemandedVecElts(OriginalDemandedElts);
37695 DemandedVecElts.clearBit(Idx);
37696 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
37697 KnownVec, TLO, Depth + 1))
37700 KnownBits KnownScl;
37701 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
37702 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
37703 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
37706 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
37707 Known.One = KnownVec.One & KnownScl.One;
37708 Known.Zero = KnownVec.Zero & KnownScl.Zero;
37713 case X86ISD::PACKSS:
37714 // PACKSS saturates to MIN/MAX integer values. So if we just want the
37715 // sign bit then we can just ask for the source operands sign bit.
37716 // TODO - add known bits handling.
37717 if (OriginalDemandedBits.isSignMask()) {
37718 APInt DemandedLHS, DemandedRHS;
37719 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
37721 KnownBits KnownLHS, KnownRHS;
37722 APInt SignMask = APInt::getSignMask(BitWidth * 2);
37723 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
37724 KnownLHS, TLO, Depth + 1))
37726 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
37727 KnownRHS, TLO, Depth + 1))
37730 // Attempt to avoid multi-use ops if we don't need anything from them.
37731 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
37732 Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
37733 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
37734 Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
37735 if (DemandedOp0 || DemandedOp1) {
37736 SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
37737 SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
37738 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
37741 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
37743 case X86ISD::PCMPGT:
37744 // icmp sgt(0, R) == ashr(R, BitWidth-1).
37745 // iff we only need the sign bit then we can use R directly.
37746 if (OriginalDemandedBits.isSignMask() &&
37747 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
37748 return TLO.CombineTo(Op, Op.getOperand(1));
37750 case X86ISD::MOVMSK: {
37751 SDValue Src = Op.getOperand(0);
37752 MVT SrcVT = Src.getSimpleValueType();
37753 unsigned SrcBits = SrcVT.getScalarSizeInBits();
37754 unsigned NumElts = SrcVT.getVectorNumElements();
37756 // If we don't need the sign bits at all just return zero.
37757 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
37758 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
37760 // Only demand the vector elements of the sign bits we need.
37761 APInt KnownUndef, KnownZero;
37762 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
37763 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
37767 Known.Zero = KnownZero.zextOrSelf(BitWidth);
37768 Known.Zero.setHighBits(BitWidth - NumElts);
37770 // MOVMSK only uses the MSB from each vector element.
37771 KnownBits KnownSrc;
37772 APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
37773 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
37777 if (KnownSrc.One[SrcBits - 1])
37778 Known.One.setLowBits(NumElts);
37779 else if (KnownSrc.Zero[SrcBits - 1])
37780 Known.Zero.setLowBits(NumElts);
37782 // Attempt to avoid multi-use os if we don't need anything from it.
37783 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
37784 Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
37785 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
37788 case X86ISD::BEXTR: {
37789 SDValue Op0 = Op.getOperand(0);
37790 SDValue Op1 = Op.getOperand(1);
37792 // Only bottom 16-bits of the control bits are required.
37793 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
37794 // NOTE: SimplifyDemandedBits won't do this for constants.
37795 const APInt &Val1 = Cst1->getAPIntValue();
37796 APInt MaskedVal1 = Val1 & 0xFFFF;
37797 if (MaskedVal1 != Val1) {
37799 return TLO.CombineTo(
37800 Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
37801 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
37806 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
37807 if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
37810 // If the length is 0, replace with 0.
37811 KnownBits LengthBits = Known1.extractBits(8, 8);
37812 if (LengthBits.isZero())
37813 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
37819 return TargetLowering::SimplifyDemandedBitsForTargetNode(
37820 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
37823 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
37824 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
37825 SelectionDAG &DAG, unsigned Depth) const {
37826 int NumElts = DemandedElts.getBitWidth();
37827 unsigned Opc = Op.getOpcode();
37828 EVT VT = Op.getValueType();
37831 case X86ISD::PINSRB:
37832 case X86ISD::PINSRW: {
37833 // If we don't demand the inserted element, return the base vector.
37834 SDValue Vec = Op.getOperand(0);
37835 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
37836 MVT VecVT = Vec.getSimpleValueType();
37837 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
37838 !DemandedElts[CIdx->getZExtValue()])
37842 case X86ISD::VSHLI: {
37843 // If we are only demanding sign bits then we can use the shift source
37845 SDValue Op0 = Op.getOperand(0);
37846 unsigned ShAmt = Op.getConstantOperandVal(1);
37847 unsigned BitWidth = DemandedBits.getBitWidth();
37848 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
37849 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
37850 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
37854 case X86ISD::VSRAI:
37855 // iff we only need the sign bit then we can use the source directly.
37856 // TODO: generalize where we only demand extended signbits.
37857 if (DemandedBits.isSignMask())
37858 return Op.getOperand(0);
37860 case X86ISD::PCMPGT:
37861 // icmp sgt(0, R) == ashr(R, BitWidth-1).
37862 // iff we only need the sign bit then we can use R directly.
37863 if (DemandedBits.isSignMask() &&
37864 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
37865 return Op.getOperand(1);
37869 APInt ShuffleUndef, ShuffleZero;
37870 SmallVector<int, 16> ShuffleMask;
37871 SmallVector<SDValue, 2> ShuffleOps;
37872 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
37873 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
37874 // If all the demanded elts are from one operand and are inline,
37875 // then we can use the operand directly.
37876 int NumOps = ShuffleOps.size();
37877 if (ShuffleMask.size() == (unsigned)NumElts &&
37878 llvm::all_of(ShuffleOps, [VT](SDValue V) {
37879 return VT.getSizeInBits() == V.getValueSizeInBits();
37882 if (DemandedElts.isSubsetOf(ShuffleUndef))
37883 return DAG.getUNDEF(VT);
37884 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
37885 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
37887 // Bitmask that indicates which ops have only been accessed 'inline'.
37888 APInt IdentityOp = APInt::getAllOnesValue(NumOps);
37889 for (int i = 0; i != NumElts; ++i) {
37890 int M = ShuffleMask[i];
37891 if (!DemandedElts[i] || ShuffleUndef[i])
37893 int OpIdx = M / NumElts;
37894 int EltIdx = M % NumElts;
37895 if (M < 0 || EltIdx != i) {
37896 IdentityOp.clearAllBits();
37899 IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
37900 if (IdentityOp == 0)
37903 assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
37904 "Multiple identity shuffles detected");
37906 if (IdentityOp != 0)
37907 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
37911 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
37912 Op, DemandedBits, DemandedElts, DAG, Depth);
37915 // Helper to peek through bitops/setcc to determine size of source vector.
37916 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
37917 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
37918 switch (Src.getOpcode()) {
37920 return Src.getOperand(0).getValueSizeInBits() == Size;
37924 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
37925 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
37930 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
37931 static unsigned getAltBitOpcode(unsigned Opcode) {
37933 case ISD::AND: return X86ISD::FAND;
37934 case ISD::OR: return X86ISD::FOR;
37935 case ISD::XOR: return X86ISD::FXOR;
37936 case X86ISD::ANDNP: return X86ISD::FANDN;
37938 llvm_unreachable("Unknown bitwise opcode");
37941 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
37942 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
37944 EVT SrcVT = Src.getValueType();
37945 if (SrcVT != MVT::v4i1)
37948 switch (Src.getOpcode()) {
37950 if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
37951 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
37952 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
37953 SDValue Op0 = Src.getOperand(0);
37954 if (ISD::isNormalLoad(Op0.getNode()))
37955 return DAG.getBitcast(MVT::v4f32, Op0);
37956 if (Op0.getOpcode() == ISD::BITCAST &&
37957 Op0.getOperand(0).getValueType() == MVT::v4f32)
37958 return Op0.getOperand(0);
37964 SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
37965 SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
37967 return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
37975 // Helper to push sign extension of vXi1 SETCC result through bitops.
37976 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
37977 SDValue Src, const SDLoc &DL) {
37978 switch (Src.getOpcode()) {
37980 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
37984 return DAG.getNode(
37985 Src.getOpcode(), DL, SExtVT,
37986 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
37987 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
37989 llvm_unreachable("Unexpected node type for vXi1 sign extension");
37992 // Try to match patterns such as
37993 // (i16 bitcast (v16i1 x))
37995 // (i16 movmsk (16i8 sext (v16i1 x)))
37996 // before the illegal vector is scalarized on subtargets that don't have legal
37998 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
38000 const X86Subtarget &Subtarget) {
38001 EVT SrcVT = Src.getValueType();
38002 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
38005 // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
38006 // legalization destroys the v4i32 type.
38007 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
38008 if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
38009 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
38010 DAG.getBitcast(MVT::v4f32, V));
38011 return DAG.getZExtOrTrunc(V, DL, VT);
38015 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
38016 // movmskb even with avx512. This will be better than truncating to vXi1 and
38017 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
38018 // vpcmpeqb/vpcmpgtb.
38019 bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
38020 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
38021 Src.getOperand(0).getValueType() == MVT::v32i8 ||
38022 Src.getOperand(0).getValueType() == MVT::v64i8);
38024 // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
38025 // directly with vpmovmskb/vmovmskps/vmovmskpd.
38026 if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
38027 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
38028 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
38029 EVT CmpVT = Src.getOperand(0).getValueType();
38030 EVT EltVT = CmpVT.getVectorElementType();
38031 if (CmpVT.getSizeInBits() <= 256 &&
38032 (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
38033 PreferMovMsk = true;
38036 // With AVX512 vxi1 types are legal and we prefer using k-regs.
38037 // MOVMSK is supported in SSE2 or later.
38038 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
38041 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
38042 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
38043 // v8i16 and v16i16.
38044 // For these two cases, we can shuffle the upper element bytes to a
38045 // consecutive sequence at the start of the vector and treat the results as
38046 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
38047 // for v16i16 this is not the case, because the shuffle is expensive, so we
38048 // avoid sign-extending to this type entirely.
38049 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
38050 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
38052 bool PropagateSExt = false;
38053 switch (SrcVT.getSimpleVT().SimpleTy) {
38057 SExtVT = MVT::v2i64;
38060 SExtVT = MVT::v4i32;
38061 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
38062 // sign-extend to a 256-bit operation to avoid truncation.
38063 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
38064 SExtVT = MVT::v4i64;
38065 PropagateSExt = true;
38069 SExtVT = MVT::v8i16;
38070 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
38071 // sign-extend to a 256-bit operation to match the compare.
38072 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
38073 // 256-bit because the shuffle is cheaper than sign extending the result of
38075 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
38076 checkBitcastSrcVectorSize(Src, 512))) {
38077 SExtVT = MVT::v8i32;
38078 PropagateSExt = true;
38082 SExtVT = MVT::v16i8;
38083 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
38084 // it is not profitable to sign-extend to 256-bit because this will
38085 // require an extra cross-lane shuffle which is more expensive than
38086 // truncating the result of the compare to 128-bits.
38089 SExtVT = MVT::v32i8;
38092 // If we have AVX512F, but not AVX512BW and the input is truncated from
38093 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
38094 if (Subtarget.hasAVX512()) {
38095 if (Subtarget.hasBWI())
38097 SExtVT = MVT::v64i8;
38100 // Split if this is a <64 x i8> comparison result.
38101 if (checkBitcastSrcVectorSize(Src, 512)) {
38102 SExtVT = MVT::v64i8;
38108 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
38109 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
38111 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
38112 V = getPMOVMSKB(DL, V, DAG, Subtarget);
38114 if (SExtVT == MVT::v8i16)
38115 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
38116 DAG.getUNDEF(MVT::v8i16));
38117 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
38121 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
38122 V = DAG.getZExtOrTrunc(V, DL, IntVT);
38123 return DAG.getBitcast(VT, V);
38126 // Convert a vXi1 constant build vector to the same width scalar integer.
38127 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
38128 EVT SrcVT = Op.getValueType();
38129 assert(SrcVT.getVectorElementType() == MVT::i1 &&
38130 "Expected a vXi1 vector");
38131 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
38132 "Expected a constant build vector");
38134 APInt Imm(SrcVT.getVectorNumElements(), 0);
38135 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
38136 SDValue In = Op.getOperand(Idx);
38137 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
38140 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
38141 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
38144 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
38145 TargetLowering::DAGCombinerInfo &DCI,
38146 const X86Subtarget &Subtarget) {
38147 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
38149 if (!DCI.isBeforeLegalizeOps())
38152 // Only do this if we have k-registers.
38153 if (!Subtarget.hasAVX512())
38156 EVT DstVT = N->getValueType(0);
38157 SDValue Op = N->getOperand(0);
38158 EVT SrcVT = Op.getValueType();
38160 if (!Op.hasOneUse())
38163 // Look for logic ops.
38164 if (Op.getOpcode() != ISD::AND &&
38165 Op.getOpcode() != ISD::OR &&
38166 Op.getOpcode() != ISD::XOR)
38169 // Make sure we have a bitcast between mask registers and a scalar type.
38170 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
38171 DstVT.isScalarInteger()) &&
38172 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
38173 SrcVT.isScalarInteger()))
38176 SDValue LHS = Op.getOperand(0);
38177 SDValue RHS = Op.getOperand(1);
38179 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
38180 LHS.getOperand(0).getValueType() == DstVT)
38181 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
38182 DAG.getBitcast(DstVT, RHS));
38184 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
38185 RHS.getOperand(0).getValueType() == DstVT)
38186 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
38187 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
38189 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
38190 // Most of these have to move a constant from the scalar domain anyway.
38191 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
38192 RHS = combinevXi1ConstantToInteger(RHS, DAG);
38193 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
38194 DAG.getBitcast(DstVT, LHS), RHS);
38200 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
38201 const X86Subtarget &Subtarget) {
38203 unsigned NumElts = BV->getNumOperands();
38204 SDValue Splat = BV->getSplatValue();
38206 // Build MMX element from integer GPR or SSE float values.
38207 auto CreateMMXElement = [&](SDValue V) {
38209 return DAG.getUNDEF(MVT::x86mmx);
38210 if (V.getValueType().isFloatingPoint()) {
38211 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
38212 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
38213 V = DAG.getBitcast(MVT::v2i64, V);
38214 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
38216 V = DAG.getBitcast(MVT::i32, V);
38218 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
38220 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
38223 // Convert build vector ops to MMX data in the bottom elements.
38224 SmallVector<SDValue, 8> Ops;
38226 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
38228 if (Splat.isUndef())
38229 return DAG.getUNDEF(MVT::x86mmx);
38231 Splat = CreateMMXElement(Splat);
38233 if (Subtarget.hasSSE1()) {
38234 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
38236 Splat = DAG.getNode(
38237 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
38238 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
38241 // Use PSHUFW to repeat 16-bit elements.
38242 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
38243 return DAG.getNode(
38244 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
38245 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
38246 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
38248 Ops.append(NumElts, Splat);
38250 for (unsigned i = 0; i != NumElts; ++i)
38251 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
38254 // Use tree of PUNPCKLs to build up general MMX vector.
38255 while (Ops.size() > 1) {
38256 unsigned NumOps = Ops.size();
38257 unsigned IntrinOp =
38258 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
38259 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
38260 : Intrinsic::x86_mmx_punpcklbw));
38261 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
38262 for (unsigned i = 0; i != NumOps; i += 2)
38263 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
38264 Ops[i], Ops[i + 1]);
38265 Ops.resize(NumOps / 2);
38271 // Recursive function that attempts to find if a bool vector node was originally
38272 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
38273 // integer. If so, replace the scalar ops with bool vector equivalents back down
38275 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, SDLoc DL,
38277 const X86Subtarget &Subtarget) {
38278 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38279 unsigned Opc = V.getOpcode();
38281 case ISD::BITCAST: {
38282 // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
38283 SDValue Src = V.getOperand(0);
38284 EVT SrcVT = Src.getValueType();
38285 if (SrcVT.isVector() || SrcVT.isFloatingPoint())
38286 return DAG.getBitcast(VT, Src);
38289 case ISD::TRUNCATE: {
38290 // If we find a suitable source, a truncated scalar becomes a subvector.
38291 SDValue Src = V.getOperand(0);
38293 EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
38294 if (TLI.isTypeLegal(NewSrcVT))
38296 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
38297 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
38298 DAG.getIntPtrConstant(0, DL));
38301 case ISD::ANY_EXTEND:
38302 case ISD::ZERO_EXTEND: {
38303 // If we find a suitable source, an extended scalar becomes a subvector.
38304 SDValue Src = V.getOperand(0);
38305 EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
38306 Src.getScalarValueSizeInBits());
38307 if (TLI.isTypeLegal(NewSrcVT))
38309 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
38310 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
38311 Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
38312 : DAG.getConstant(0, DL, VT),
38313 N0, DAG.getIntPtrConstant(0, DL));
38317 // If we find suitable sources, we can just move an OR to the vector domain.
38318 SDValue Src0 = V.getOperand(0);
38319 SDValue Src1 = V.getOperand(1);
38320 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
38321 if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
38322 return DAG.getNode(Opc, DL, VT, N0, N1);
38326 // If we find a suitable source, a SHL becomes a KSHIFTL.
38327 SDValue Src0 = V.getOperand(0);
38328 if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
38329 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
38330 return DAG.getNode(
38331 X86ISD::KSHIFTL, DL, VT, N0,
38332 DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
38339 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
38340 TargetLowering::DAGCombinerInfo &DCI,
38341 const X86Subtarget &Subtarget) {
38342 SDValue N0 = N->getOperand(0);
38343 EVT VT = N->getValueType(0);
38344 EVT SrcVT = N0.getValueType();
38346 // Try to match patterns such as
38347 // (i16 bitcast (v16i1 x))
38349 // (i16 movmsk (16i8 sext (v16i1 x)))
38350 // before the setcc result is scalarized on subtargets that don't have legal
38352 if (DCI.isBeforeLegalize()) {
38354 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
38357 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
38358 // type, widen both sides to avoid a trip through memory.
38359 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
38360 Subtarget.hasAVX512()) {
38361 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
38362 N0 = DAG.getBitcast(MVT::v8i1, N0);
38363 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
38364 DAG.getIntPtrConstant(0, dl));
38367 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
38368 // type, widen both sides to avoid a trip through memory.
38369 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
38370 Subtarget.hasAVX512()) {
38371 // Use zeros for the widening if we already have some zeroes. This can
38372 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
38374 // FIXME: It might make sense to detect a concat_vectors with a mix of
38375 // zeroes and undef and turn it into insert_subvector for i1 vectors as
38376 // a separate combine. What we can't do is canonicalize the operands of
38377 // such a concat or we'll get into a loop with SimplifyDemandedBits.
38378 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
38379 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
38380 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
38381 SrcVT = LastOp.getValueType();
38382 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
38383 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
38384 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
38385 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
38386 N0 = DAG.getBitcast(MVT::i8, N0);
38387 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
38391 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
38392 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
38394 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
38395 N0 = DAG.getBitcast(MVT::i8, N0);
38396 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
38399 // If we're bitcasting from iX to vXi1, see if the integer originally
38400 // began as a vXi1 and whether we can remove the bitcast entirely.
38401 if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
38402 SrcVT.isScalarInteger() &&
38403 DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
38405 combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
38410 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
38411 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
38412 // due to insert_subvector legalization on KNL. By promoting the copy to i16
38413 // we can help with known bits propagation from the vXi1 domain to the
38415 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
38416 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38417 N0.getOperand(0).getValueType() == MVT::v16i1 &&
38418 isNullConstant(N0.getOperand(1)))
38419 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
38420 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
38422 // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
38423 // and the vbroadcast_load are both integer or both fp. In some cases this
38424 // will remove the bitcast entirely.
38425 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
38426 VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
38427 auto *BCast = cast<MemIntrinsicSDNode>(N0);
38428 unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
38429 unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
38430 // Don't swap i8/i16 since don't have fp types that size.
38431 if (MemSize >= 32) {
38432 MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
38433 : MVT::getIntegerVT(MemSize);
38434 MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
38435 : MVT::getIntegerVT(SrcVTSize);
38436 LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
38438 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
38439 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
38441 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
38442 MemVT, BCast->getMemOperand());
38443 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
38444 return DAG.getBitcast(VT, ResNode);
38448 // Since MMX types are special and don't usually play with other vector types,
38449 // it's better to handle them early to be sure we emit efficient code by
38450 // avoiding store-load conversions.
38451 if (VT == MVT::x86mmx) {
38452 // Detect MMX constant vectors.
38454 SmallVector<APInt, 1> EltBits;
38455 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
38457 // Handle zero-extension of i32 with MOVD.
38458 if (EltBits[0].countLeadingZeros() >= 32)
38459 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
38460 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
38461 // Else, bitcast to a double.
38462 // TODO - investigate supporting sext 32-bit immediates on x86_64.
38463 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
38464 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
38467 // Detect bitcasts to x86mmx low word.
38468 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
38469 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
38470 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
38471 bool LowUndef = true, AllUndefOrZero = true;
38472 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
38473 SDValue Op = N0.getOperand(i);
38474 LowUndef &= Op.isUndef() || (i >= e/2);
38475 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
38477 if (AllUndefOrZero) {
38478 SDValue N00 = N0.getOperand(0);
38480 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
38481 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
38482 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
38486 // Detect bitcasts of 64-bit build vectors and convert to a
38487 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
38489 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
38490 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
38491 SrcVT == MVT::v8i8))
38492 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
38494 // Detect bitcasts between element or subvector extraction to x86mmx.
38495 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
38496 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
38497 isNullConstant(N0.getOperand(1))) {
38498 SDValue N00 = N0.getOperand(0);
38499 if (N00.getValueType().is128BitVector())
38500 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
38501 DAG.getBitcast(MVT::v2i64, N00));
38504 // Detect bitcasts from FP_TO_SINT to x86mmx.
38505 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
38507 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
38508 DAG.getUNDEF(MVT::v2i32));
38509 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
38510 DAG.getBitcast(MVT::v2i64, Res));
38514 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
38515 // most of these to scalar anyway.
38516 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
38517 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
38518 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
38519 return combinevXi1ConstantToInteger(N0, DAG);
38522 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
38523 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
38524 isa<ConstantSDNode>(N0)) {
38525 auto *C = cast<ConstantSDNode>(N0);
38526 if (C->isAllOnesValue())
38527 return DAG.getConstant(1, SDLoc(N0), VT);
38528 if (C->isNullValue())
38529 return DAG.getConstant(0, SDLoc(N0), VT);
38532 // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
38533 // Turn it into a sign bit compare that produces a k-register. This avoids
38534 // a trip through a GPR.
38535 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
38536 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
38537 isPowerOf2_32(VT.getVectorNumElements())) {
38538 unsigned NumElts = VT.getVectorNumElements();
38541 // Peek through truncate.
38542 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
38543 Src = N0.getOperand(0);
38545 if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
38546 SDValue MovmskIn = Src.getOperand(0);
38547 MVT MovmskVT = MovmskIn.getSimpleValueType();
38548 unsigned MovMskElts = MovmskVT.getVectorNumElements();
38550 // We allow extra bits of the movmsk to be used since they are known zero.
38551 // We can't convert a VPMOVMSKB without avx512bw.
38552 if (MovMskElts <= NumElts &&
38553 (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
38554 EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
38555 MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
38557 MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
38558 SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
38559 DAG.getConstant(0, dl, IntVT), ISD::SETLT);
38560 if (EVT(CmpVT) == VT)
38563 // Pad with zeroes up to original VT to replace the zeroes that were
38564 // being used from the MOVMSK.
38565 unsigned NumConcats = NumElts / MovMskElts;
38566 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
38568 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
38573 // Try to remove bitcasts from input and output of mask arithmetic to
38574 // remove GPR<->K-register crossings.
38575 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
38578 // Convert a bitcasted integer logic operation that has one bitcasted
38579 // floating-point operand into a floating-point logic operation. This may
38580 // create a load of a constant, but that is cheaper than materializing the
38581 // constant in an integer register and transferring it to an SSE register or
38582 // transferring the SSE operand to integer register and back.
38584 switch (N0.getOpcode()) {
38585 case ISD::AND: FPOpcode = X86ISD::FAND; break;
38586 case ISD::OR: FPOpcode = X86ISD::FOR; break;
38587 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
38588 default: return SDValue();
38591 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
38592 (Subtarget.hasSSE2() && VT == MVT::f64)))
38595 SDValue LogicOp0 = N0.getOperand(0);
38596 SDValue LogicOp1 = N0.getOperand(1);
38599 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
38600 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
38601 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
38602 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
38603 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
38604 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
38606 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
38607 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
38608 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
38609 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
38610 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
38611 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
38617 // Given a ABS node, detect the following pattern:
38618 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
38619 // This is useful as it is the input into a SAD pattern.
38620 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
38621 SDValue AbsOp1 = Abs->getOperand(0);
38622 if (AbsOp1.getOpcode() != ISD::SUB)
38625 Op0 = AbsOp1.getOperand(0);
38626 Op1 = AbsOp1.getOperand(1);
38628 // Check if the operands of the sub are zero-extended from vectors of i8.
38629 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
38630 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
38631 Op1.getOpcode() != ISD::ZERO_EXTEND ||
38632 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
38638 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
38640 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
38641 const SDValue &Zext1, const SDLoc &DL,
38642 const X86Subtarget &Subtarget) {
38643 // Find the appropriate width for the PSADBW.
38644 EVT InVT = Zext0.getOperand(0).getValueType();
38645 unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
38647 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
38648 // fill in the missing vector elements with 0.
38649 unsigned NumConcat = RegSize / InVT.getSizeInBits();
38650 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
38651 Ops[0] = Zext0.getOperand(0);
38652 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
38653 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
38654 Ops[0] = Zext1.getOperand(0);
38655 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
38657 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
38658 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38659 ArrayRef<SDValue> Ops) {
38660 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
38661 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
38663 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
38664 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
38668 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
38670 static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
38671 const X86Subtarget &Subtarget) {
38672 // Bail without SSE41.
38673 if (!Subtarget.hasSSE41())
38676 EVT ExtractVT = Extract->getValueType(0);
38677 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
38680 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
38681 ISD::NodeType BinOp;
38682 SDValue Src = DAG.matchBinOpReduction(
38683 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
38687 EVT SrcVT = Src.getValueType();
38688 EVT SrcSVT = SrcVT.getScalarType();
38689 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
38693 SDValue MinPos = Src;
38695 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
38696 while (SrcVT.getSizeInBits() > 128) {
38698 std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
38699 SrcVT = Lo.getValueType();
38700 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
38702 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
38703 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
38704 "Unexpected value type");
38706 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
38707 // to flip the value accordingly.
38709 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
38710 if (BinOp == ISD::SMAX)
38711 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
38712 else if (BinOp == ISD::SMIN)
38713 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
38714 else if (BinOp == ISD::UMAX)
38715 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
38718 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
38720 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
38721 // shuffling each upper element down and insert zeros. This means that the
38722 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
38723 // ready for the PHMINPOS.
38724 if (ExtractVT == MVT::i8) {
38725 SDValue Upper = DAG.getVectorShuffle(
38726 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
38727 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
38728 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
38731 // Perform the PHMINPOS on a v8i16 vector,
38732 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
38733 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
38734 MinPos = DAG.getBitcast(SrcVT, MinPos);
38737 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
38739 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
38740 DAG.getIntPtrConstant(0, DL));
38743 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
38744 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
38746 const X86Subtarget &Subtarget) {
38747 // Bail without SSE2.
38748 if (!Subtarget.hasSSE2())
38751 EVT ExtractVT = Extract->getValueType(0);
38752 unsigned BitWidth = ExtractVT.getSizeInBits();
38753 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
38754 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
38757 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
38758 ISD::NodeType BinOp;
38759 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
38760 if (!Match && ExtractVT == MVT::i1)
38761 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
38765 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
38766 // which we can't support here for now.
38767 if (Match.getScalarValueSizeInBits() != BitWidth)
38772 EVT MatchVT = Match.getValueType();
38773 unsigned NumElts = MatchVT.getVectorNumElements();
38774 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
38775 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38777 if (ExtractVT == MVT::i1) {
38778 // Special case for (pre-legalization) vXi1 reductions.
38779 if (NumElts > 64 || !isPowerOf2_32(NumElts))
38781 if (TLI.isTypeLegal(MatchVT)) {
38782 // If this is a legal AVX512 predicate type then we can just bitcast.
38783 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
38784 Movmsk = DAG.getBitcast(MovmskVT, Match);
38786 // For all_of(setcc(vec,0,eq)) - avoid vXi64 comparisons if we don't have
38787 // PCMPEQQ (SSE41+), use PCMPEQD instead.
38788 if (BinOp == ISD::AND && !Subtarget.hasSSE41() &&
38789 Match.getOpcode() == ISD::SETCC &&
38790 ISD::isBuildVectorAllZeros(Match.getOperand(1).getNode()) &&
38791 cast<CondCodeSDNode>(Match.getOperand(2))->get() ==
38792 ISD::CondCode::SETEQ) {
38793 SDValue Vec = Match.getOperand(0);
38794 if (Vec.getValueType().getScalarType() == MVT::i64 &&
38795 (2 * NumElts) <= MaxElts) {
38797 EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
38798 MatchVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
38799 Match = DAG.getSetCC(
38800 DL, MatchVT, DAG.getBitcast(CmpVT, Match.getOperand(0)),
38801 DAG.getBitcast(CmpVT, Match.getOperand(1)), ISD::CondCode::SETEQ);
38805 // Use combineBitcastvxi1 to create the MOVMSK.
38806 while (NumElts > MaxElts) {
38808 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
38809 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
38812 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
38813 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
38817 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
38819 // FIXME: Better handling of k-registers or 512-bit vectors?
38820 unsigned MatchSizeInBits = Match.getValueSizeInBits();
38821 if (!(MatchSizeInBits == 128 ||
38822 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
38825 // Make sure this isn't a vector of 1 element. The perf win from using
38826 // MOVMSK diminishes with less elements in the reduction, but it is
38827 // generally better to get the comparison over to the GPRs as soon as
38828 // possible to reduce the number of vector ops.
38829 if (Match.getValueType().getVectorNumElements() < 2)
38832 // Check that we are extracting a reduction of all sign bits.
38833 if (DAG.ComputeNumSignBits(Match) != BitWidth)
38836 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
38838 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
38839 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
38840 MatchSizeInBits = Match.getValueSizeInBits();
38843 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
38845 if (64 == BitWidth || 32 == BitWidth)
38846 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
38847 MatchSizeInBits / BitWidth);
38849 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
38851 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
38852 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
38853 NumElts = MaskSrcVT.getVectorNumElements();
38855 assert((NumElts <= 32 || NumElts == 64) &&
38856 "Not expecting more than 64 elements");
38858 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
38859 if (BinOp == ISD::XOR) {
38860 // parity -> (AND (CTPOP(MOVMSK X)), 1)
38861 SDValue Mask = DAG.getConstant(1, DL, CmpVT);
38862 SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
38863 Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
38864 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
38868 ISD::CondCode CondCode;
38869 if (BinOp == ISD::OR) {
38870 // any_of -> MOVMSK != 0
38871 CmpC = DAG.getConstant(0, DL, CmpVT);
38872 CondCode = ISD::CondCode::SETNE;
38874 // all_of -> MOVMSK == ((1 << NumElts) - 1)
38875 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
38877 CondCode = ISD::CondCode::SETEQ;
38880 // The setcc produces an i8 of 0/1, so extend that to the result width and
38881 // negate to get the final 0/-1 mask value.
38883 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
38884 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
38885 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
38886 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
38887 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
38890 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
38891 const X86Subtarget &Subtarget) {
38892 // PSADBW is only supported on SSE2 and up.
38893 if (!Subtarget.hasSSE2())
38896 EVT ExtractVT = Extract->getValueType(0);
38897 // Verify the type we're extracting is either i32 or i64.
38898 // FIXME: Could support other types, but this is what we have coverage for.
38899 if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
38902 EVT VT = Extract->getOperand(0).getValueType();
38903 if (!isPowerOf2_32(VT.getVectorNumElements()))
38906 // Match shuffle + add pyramid.
38907 ISD::NodeType BinOp;
38908 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
38910 // The operand is expected to be zero extended from i8
38911 // (verified in detectZextAbsDiff).
38912 // In order to convert to i64 and above, additional any/zero/sign
38913 // extend is expected.
38914 // The zero extend from 32 bit has no mathematical effect on the result.
38915 // Also the sign extend is basically zero extend
38916 // (extends the sign bit which is zero).
38917 // So it is correct to skip the sign/zero extend instruction.
38918 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
38919 Root.getOpcode() == ISD::ZERO_EXTEND ||
38920 Root.getOpcode() == ISD::ANY_EXTEND))
38921 Root = Root.getOperand(0);
38923 // If there was a match, we want Root to be a select that is the root of an
38924 // abs-diff pattern.
38925 if (!Root || Root.getOpcode() != ISD::ABS)
38928 // Check whether we have an abs-diff pattern feeding into the select.
38929 SDValue Zext0, Zext1;
38930 if (!detectZextAbsDiff(Root, Zext0, Zext1))
38933 // Create the SAD instruction.
38935 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
38937 // If the original vector was wider than 8 elements, sum over the results
38938 // in the SAD vector.
38939 unsigned Stages = Log2_32(VT.getVectorNumElements());
38940 EVT SadVT = SAD.getValueType();
38942 unsigned SadElems = SadVT.getVectorNumElements();
38944 for(unsigned i = Stages - 3; i > 0; --i) {
38945 SmallVector<int, 16> Mask(SadElems, -1);
38946 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
38947 Mask[j] = MaskEnd + j;
38950 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
38951 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
38955 unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
38956 // Return the lowest ExtractSizeInBits bits.
38957 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
38958 SadVT.getSizeInBits() / ExtractSizeInBits);
38959 SAD = DAG.getBitcast(ResVT, SAD);
38960 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
38961 Extract->getOperand(1));
38964 // Attempt to peek through a target shuffle and extract the scalar from the
38966 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
38967 TargetLowering::DAGCombinerInfo &DCI,
38968 const X86Subtarget &Subtarget) {
38969 if (DCI.isBeforeLegalizeOps())
38973 SDValue Src = N->getOperand(0);
38974 SDValue Idx = N->getOperand(1);
38976 EVT VT = N->getValueType(0);
38977 EVT SrcVT = Src.getValueType();
38978 EVT SrcSVT = SrcVT.getVectorElementType();
38979 unsigned SrcEltBits = SrcSVT.getSizeInBits();
38980 unsigned NumSrcElts = SrcVT.getVectorNumElements();
38982 // Don't attempt this for boolean mask vectors or unknown extraction indices.
38983 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
38986 const APInt &IdxC = N->getConstantOperandAPInt(1);
38987 if (IdxC.uge(NumSrcElts))
38990 SDValue SrcBC = peekThroughBitcasts(Src);
38992 // Handle extract(bitcast(broadcast(scalar_value))).
38993 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
38994 SDValue SrcOp = SrcBC.getOperand(0);
38995 EVT SrcOpVT = SrcOp.getValueType();
38996 if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
38997 (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
38998 unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
38999 unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
39000 // TODO support non-zero offsets.
39002 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
39003 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
39009 // If we're extracting a single element from a broadcast load and there are
39010 // no other users, just create a single load.
39011 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
39012 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
39013 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
39014 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
39015 VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
39016 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
39017 MemIntr->getBasePtr(),
39018 MemIntr->getPointerInfo(),
39019 MemIntr->getOriginalAlign(),
39020 MemIntr->getMemOperand()->getFlags());
39021 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
39026 // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
39027 // TODO: Move to DAGCombine?
39028 if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
39029 SrcBC.getValueType().isInteger() &&
39030 (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
39031 SrcBC.getScalarValueSizeInBits() ==
39032 SrcBC.getOperand(0).getValueSizeInBits()) {
39033 unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
39034 if (IdxC.ult(Scale)) {
39035 unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
39036 SDValue Scl = SrcBC.getOperand(0);
39037 EVT SclVT = Scl.getValueType();
39039 Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
39040 DAG.getShiftAmountConstant(Offset, SclVT, dl));
39042 Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
39043 Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
39048 // Handle extract(truncate(x)) for 0'th index.
39049 // TODO: Treat this as a faux shuffle?
39050 // TODO: When can we use this for general indices?
39051 if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() && IdxC == 0) {
39052 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
39053 Src = DAG.getBitcast(SrcVT, Src);
39054 return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
39057 // Resolve the target shuffle inputs and mask.
39058 SmallVector<int, 16> Mask;
39059 SmallVector<SDValue, 2> Ops;
39060 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
39063 // Shuffle inputs must be the same size as the result.
39064 if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
39065 return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
39069 // Attempt to narrow/widen the shuffle mask to the correct size.
39070 if (Mask.size() != NumSrcElts) {
39071 if ((NumSrcElts % Mask.size()) == 0) {
39072 SmallVector<int, 16> ScaledMask;
39073 int Scale = NumSrcElts / Mask.size();
39074 narrowShuffleMaskElts(Scale, Mask, ScaledMask);
39075 Mask = std::move(ScaledMask);
39076 } else if ((Mask.size() % NumSrcElts) == 0) {
39077 // Simplify Mask based on demanded element.
39078 int ExtractIdx = (int)N->getConstantOperandVal(1);
39079 int Scale = Mask.size() / NumSrcElts;
39080 int Lo = Scale * ExtractIdx;
39081 int Hi = Scale * (ExtractIdx + 1);
39082 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
39083 if (i < Lo || Hi <= i)
39084 Mask[i] = SM_SentinelUndef;
39086 SmallVector<int, 16> WidenedMask;
39087 while (Mask.size() > NumSrcElts &&
39088 canWidenShuffleElements(Mask, WidenedMask))
39089 Mask = std::move(WidenedMask);
39090 // TODO - investigate support for wider shuffle masks with known upper
39091 // undef/zero elements for implicit zero-extension.
39095 // Check if narrowing/widening failed.
39096 if (Mask.size() != NumSrcElts)
39099 int SrcIdx = Mask[IdxC.getZExtValue()];
39101 // If the shuffle source element is undef/zero then we can just accept it.
39102 if (SrcIdx == SM_SentinelUndef)
39103 return DAG.getUNDEF(VT);
39105 if (SrcIdx == SM_SentinelZero)
39106 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
39107 : DAG.getConstant(0, dl, VT);
39109 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
39110 SrcIdx = SrcIdx % Mask.size();
39112 // We can only extract other elements from 128-bit vectors and in certain
39113 // circumstances, depending on SSE-level.
39114 // TODO: Investigate using extract_subvector for larger vectors.
39115 // TODO: Investigate float/double extraction if it will be just stored.
39116 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
39117 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
39118 assert(SrcSVT == VT && "Unexpected extraction type");
39119 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
39120 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
39121 DAG.getIntPtrConstant(SrcIdx, dl));
39124 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
39125 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
39126 assert(VT.getSizeInBits() >= SrcEltBits && "Unexpected extraction type");
39127 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
39128 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
39129 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
39130 DAG.getIntPtrConstant(SrcIdx, dl));
39131 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
39137 /// Extracting a scalar FP value from vector element 0 is free, so extract each
39138 /// operand first, then perform the math as a scalar op.
39139 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
39140 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
39141 SDValue Vec = ExtElt->getOperand(0);
39142 SDValue Index = ExtElt->getOperand(1);
39143 EVT VT = ExtElt->getValueType(0);
39144 EVT VecVT = Vec.getValueType();
39146 // TODO: If this is a unary/expensive/expand op, allow extraction from a
39147 // non-zero element because the shuffle+scalar op will be cheaper?
39148 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
39151 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
39152 // extract, the condition code), so deal with those as a special-case.
39153 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
39154 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
39155 if (OpVT != MVT::f32 && OpVT != MVT::f64)
39158 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
39160 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
39161 Vec.getOperand(0), Index);
39162 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
39163 Vec.getOperand(1), Index);
39164 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
39167 if (VT != MVT::f32 && VT != MVT::f64)
39170 // Vector FP selects don't fit the pattern of FP math ops (because the
39171 // condition has a different type and we have to change the opcode), so deal
39172 // with those here.
39173 // FIXME: This is restricted to pre type legalization by ensuring the setcc
39174 // has i1 elements. If we loosen this we need to convert vector bool to a
39176 if (Vec.getOpcode() == ISD::VSELECT &&
39177 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
39178 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
39179 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
39180 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
39182 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
39183 Vec.getOperand(0).getValueType().getScalarType(),
39184 Vec.getOperand(0), Index);
39185 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
39186 Vec.getOperand(1), Index);
39187 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
39188 Vec.getOperand(2), Index);
39189 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
39192 // TODO: This switch could include FNEG and the x86-specific FP logic ops
39193 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
39194 // missed load folding and fma+fneg combining.
39195 switch (Vec.getOpcode()) {
39196 case ISD::FMA: // Begin 3 operands
39198 case ISD::FADD: // Begin 2 operands
39203 case ISD::FCOPYSIGN:
39206 case ISD::FMINNUM_IEEE:
39207 case ISD::FMAXNUM_IEEE:
39208 case ISD::FMAXIMUM:
39209 case ISD::FMINIMUM:
39212 case ISD::FABS: // Begin 1 operand
39217 case ISD::FNEARBYINT:
39221 case X86ISD::FRSQRT: {
39222 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
39224 SmallVector<SDValue, 4> ExtOps;
39225 for (SDValue Op : Vec->ops())
39226 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
39227 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
39232 llvm_unreachable("All opcodes should return within switch");
39235 /// Try to convert a vector reduction sequence composed of binops and shuffles
39236 /// into horizontal ops.
39237 static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
39238 const X86Subtarget &Subtarget) {
39239 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
39241 // We need at least SSE2 to anything here.
39242 if (!Subtarget.hasSSE2())
39247 DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
39251 SDValue Index = ExtElt->getOperand(1);
39252 assert(isNullConstant(Index) &&
39253 "Reduction doesn't end in an extract from index 0");
39255 EVT VT = ExtElt->getValueType(0);
39256 EVT VecVT = Rdx.getValueType();
39257 if (VecVT.getScalarType() != VT)
39262 // vXi8 reduction - sub 128-bit vector.
39263 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
39264 if (VecVT == MVT::v4i8) {
39266 if (Subtarget.hasSSE41()) {
39267 Rdx = DAG.getBitcast(MVT::i32, Rdx);
39268 Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
39269 DAG.getConstant(0, DL, MVT::v4i32), Rdx,
39270 DAG.getIntPtrConstant(0, DL));
39271 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
39273 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
39274 DAG.getConstant(0, DL, VecVT));
39277 if (Rdx.getValueType() == MVT::v8i8) {
39279 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
39280 DAG.getUNDEF(MVT::v8i8));
39282 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
39283 DAG.getConstant(0, DL, MVT::v16i8));
39284 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
39285 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
39288 // Must be a >=128-bit vector with pow2 elements.
39289 if ((VecVT.getSizeInBits() % 128) != 0 ||
39290 !isPowerOf2_32(VecVT.getVectorNumElements()))
39293 // vXi8 reduction - sum lo/hi halves then use PSADBW.
39294 if (VT == MVT::i8) {
39295 while (Rdx.getValueSizeInBits() > 128) {
39297 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
39298 VecVT = Lo.getValueType();
39299 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
39301 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
39303 SDValue Hi = DAG.getVectorShuffle(
39304 MVT::v16i8, DL, Rdx, Rdx,
39305 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
39306 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
39307 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
39308 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
39309 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
39310 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
39313 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
39314 if (!shouldUseHorizontalOp(true, DAG, Subtarget))
39317 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
39319 // 256-bit horizontal instructions operate on 128-bit chunks rather than
39320 // across the whole vector, so we need an extract + hop preliminary stage.
39321 // This is the only step where the operands of the hop are not the same value.
39322 // TODO: We could extend this to handle 512-bit or even longer vectors.
39323 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
39324 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
39325 unsigned NumElts = VecVT.getVectorNumElements();
39326 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
39327 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
39328 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
39329 VecVT = Rdx.getValueType();
39331 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
39332 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
39335 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
39336 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
39337 for (unsigned i = 0; i != ReductionSteps; ++i)
39338 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
39340 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
39343 /// Detect vector gather/scatter index generation and convert it from being a
39344 /// bunch of shuffles and extracts into a somewhat faster sequence.
39345 /// For i686, the best sequence is apparently storing the value and loading
39346 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
39347 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
39348 TargetLowering::DAGCombinerInfo &DCI,
39349 const X86Subtarget &Subtarget) {
39350 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
39353 SDValue InputVector = N->getOperand(0);
39354 SDValue EltIdx = N->getOperand(1);
39355 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
39357 EVT SrcVT = InputVector.getValueType();
39358 EVT VT = N->getValueType(0);
39359 SDLoc dl(InputVector);
39360 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
39361 unsigned NumSrcElts = SrcVT.getVectorNumElements();
39363 if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
39364 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
39366 // Integer Constant Folding.
39367 if (CIdx && VT.isInteger()) {
39368 APInt UndefVecElts;
39369 SmallVector<APInt, 16> EltBits;
39370 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
39371 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
39372 EltBits, true, false)) {
39373 uint64_t Idx = CIdx->getZExtValue();
39374 if (UndefVecElts[Idx])
39375 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
39376 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
39382 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39383 if (TLI.SimplifyDemandedBits(
39384 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
39385 return SDValue(N, 0);
39387 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
39388 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
39389 InputVector.getOpcode() == X86ISD::PINSRW) &&
39390 InputVector.getOperand(2) == EltIdx) {
39391 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
39392 "Vector type mismatch");
39393 SDValue Scl = InputVector.getOperand(1);
39394 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
39395 return DAG.getZExtOrTrunc(Scl, dl, VT);
39398 // TODO - Remove this once we can handle the implicit zero-extension of
39399 // X86ISD::PEXTRW/X86ISD::PEXTRB in combineHorizontalPredicateResult and
39400 // combineBasicSADPattern.
39404 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
39405 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
39406 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
39407 SDValue MMXSrc = InputVector.getOperand(0);
39409 // The bitcast source is a direct mmx result.
39410 if (MMXSrc.getValueType() == MVT::x86mmx)
39411 return DAG.getBitcast(VT, InputVector);
39414 // Detect mmx to i32 conversion through a v2i32 elt extract.
39415 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
39416 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
39417 SDValue MMXSrc = InputVector.getOperand(0);
39419 // The bitcast source is a direct mmx result.
39420 if (MMXSrc.getValueType() == MVT::x86mmx)
39421 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
39424 // Check whether this extract is the root of a sum of absolute differences
39425 // pattern. This has to be done here because we really want it to happen
39426 // pre-legalization,
39427 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
39430 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
39431 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
39434 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
39435 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
39438 if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
39441 if (SDValue V = scalarizeExtEltFP(N, DAG))
39444 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
39445 // and then testing the relevant element.
39447 // Note that we only combine extracts on the *same* result number, i.e.
39448 // t0 = merge_values a0, a1, a2, a3
39449 // i1 = extract_vector_elt t0, Constant:i64<2>
39450 // i1 = extract_vector_elt t0, Constant:i64<3>
39452 // i1 = extract_vector_elt t0:1, Constant:i64<2>
39453 // since the latter would need its own MOVMSK.
39454 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
39455 SmallVector<SDNode *, 16> BoolExtracts;
39456 unsigned ResNo = InputVector.getResNo();
39457 auto IsBoolExtract = [&BoolExtracts, &ResNo](SDNode *Use) {
39458 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39459 isa<ConstantSDNode>(Use->getOperand(1)) &&
39460 Use->getOperand(0).getResNo() == ResNo &&
39461 Use->getValueType(0) == MVT::i1) {
39462 BoolExtracts.push_back(Use);
39467 if (all_of(InputVector->uses(), IsBoolExtract) &&
39468 BoolExtracts.size() > 1) {
39469 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
39471 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
39472 for (SDNode *Use : BoolExtracts) {
39473 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
39474 unsigned MaskIdx = Use->getConstantOperandVal(1);
39475 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
39476 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
39477 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
39478 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
39479 DCI.CombineTo(Use, Res);
39481 return SDValue(N, 0);
39489 /// If a vector select has an operand that is -1 or 0, try to simplify the
39490 /// select to a bitwise logic operation.
39491 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
39493 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
39494 TargetLowering::DAGCombinerInfo &DCI,
39495 const X86Subtarget &Subtarget) {
39496 SDValue Cond = N->getOperand(0);
39497 SDValue LHS = N->getOperand(1);
39498 SDValue RHS = N->getOperand(2);
39499 EVT VT = LHS.getValueType();
39500 EVT CondVT = Cond.getValueType();
39502 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39504 if (N->getOpcode() != ISD::VSELECT)
39507 assert(CondVT.isVector() && "Vector select expects a vector selector!");
39509 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
39510 // TODO: Can we assert that both operands are not zeros (because that should
39511 // get simplified at node creation time)?
39512 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
39513 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
39515 // If both inputs are 0/undef, create a complete zero vector.
39516 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
39517 if (TValIsAllZeros && FValIsAllZeros) {
39518 if (VT.isFloatingPoint())
39519 return DAG.getConstantFP(0.0, DL, VT);
39520 return DAG.getConstant(0, DL, VT);
39523 // To use the condition operand as a bitwise mask, it must have elements that
39524 // are the same size as the select elements. Ie, the condition operand must
39525 // have already been promoted from the IR select condition type <N x i1>.
39526 // Don't check if the types themselves are equal because that excludes
39527 // vector floating-point selects.
39528 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
39531 // Try to invert the condition if true value is not all 1s and false value is
39532 // not all 0s. Only do this if the condition has one use.
39533 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
39534 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
39535 // Check if the selector will be produced by CMPP*/PCMP*.
39536 Cond.getOpcode() == ISD::SETCC &&
39537 // Check if SETCC has already been promoted.
39538 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
39540 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
39542 if (TValIsAllZeros || FValIsAllOnes) {
39543 SDValue CC = Cond.getOperand(2);
39544 ISD::CondCode NewCC = ISD::getSetCCInverse(
39545 cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
39546 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
39548 std::swap(LHS, RHS);
39549 TValIsAllOnes = FValIsAllOnes;
39550 FValIsAllZeros = TValIsAllZeros;
39554 // Cond value must be 'sign splat' to be converted to a logical op.
39555 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
39558 // vselect Cond, 111..., 000... -> Cond
39559 if (TValIsAllOnes && FValIsAllZeros)
39560 return DAG.getBitcast(VT, Cond);
39562 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
39565 // vselect Cond, 111..., X -> or Cond, X
39566 if (TValIsAllOnes) {
39567 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
39568 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
39569 return DAG.getBitcast(VT, Or);
39572 // vselect Cond, X, 000... -> and Cond, X
39573 if (FValIsAllZeros) {
39574 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
39575 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
39576 return DAG.getBitcast(VT, And);
39579 // vselect Cond, 000..., X -> andn Cond, X
39580 if (TValIsAllZeros) {
39581 MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
39582 SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
39583 SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
39584 SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
39585 return DAG.getBitcast(VT, AndN);
39591 /// If both arms of a vector select are concatenated vectors, split the select,
39592 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
39593 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
39594 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
39595 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
39596 const X86Subtarget &Subtarget) {
39597 unsigned Opcode = N->getOpcode();
39598 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
39601 // TODO: Split 512-bit vectors too?
39602 EVT VT = N->getValueType(0);
39603 if (!VT.is256BitVector())
39606 // TODO: Split as long as any 2 of the 3 operands are concatenated?
39607 SDValue Cond = N->getOperand(0);
39608 SDValue TVal = N->getOperand(1);
39609 SDValue FVal = N->getOperand(2);
39610 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
39611 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
39612 !collectConcatOps(TVal.getNode(), CatOpsT) ||
39613 !collectConcatOps(FVal.getNode(), CatOpsF))
39616 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
39617 ArrayRef<SDValue> Ops) {
39618 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
39620 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
39621 makeBlend, /*CheckBWI*/ false);
39624 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
39625 SDValue Cond = N->getOperand(0);
39626 SDValue LHS = N->getOperand(1);
39627 SDValue RHS = N->getOperand(2);
39630 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
39631 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
39632 if (!TrueC || !FalseC)
39635 // Don't do this for crazy integer types.
39636 EVT VT = N->getValueType(0);
39637 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
39640 // We're going to use the condition bit in math or logic ops. We could allow
39641 // this with a wider condition value (post-legalization it becomes an i8),
39642 // but if nothing is creating selects that late, it doesn't matter.
39643 if (Cond.getValueType() != MVT::i1)
39646 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
39647 // 3, 5, or 9 with i32/i64, so those get transformed too.
39648 // TODO: For constants that overflow or do not differ by power-of-2 or small
39649 // multiplier, convert to 'and' + 'add'.
39650 const APInt &TrueVal = TrueC->getAPIntValue();
39651 const APInt &FalseVal = FalseC->getAPIntValue();
39653 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
39657 APInt AbsDiff = Diff.abs();
39658 if (AbsDiff.isPowerOf2() ||
39659 ((VT == MVT::i32 || VT == MVT::i64) &&
39660 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
39662 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
39663 // of the condition can usually be folded into a compare predicate, but even
39664 // without that, the sequence should be cheaper than a CMOV alternative.
39665 if (TrueVal.slt(FalseVal)) {
39666 Cond = DAG.getNOT(DL, Cond, MVT::i1);
39667 std::swap(TrueC, FalseC);
39670 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
39671 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
39673 // Multiply condition by the difference if non-one.
39674 if (!AbsDiff.isOneValue())
39675 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
39677 // Add the base if non-zero.
39678 if (!FalseC->isNullValue())
39679 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
39687 /// If this is a *dynamic* select (non-constant condition) and we can match
39688 /// this node with one of the variable blend instructions, restructure the
39689 /// condition so that blends can use the high (sign) bit of each element.
39690 /// This function will also call SimplifyDemandedBits on already created
39691 /// BLENDV to perform additional simplifications.
39692 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
39693 TargetLowering::DAGCombinerInfo &DCI,
39694 const X86Subtarget &Subtarget) {
39695 SDValue Cond = N->getOperand(0);
39696 if ((N->getOpcode() != ISD::VSELECT &&
39697 N->getOpcode() != X86ISD::BLENDV) ||
39698 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
39701 // Don't optimize before the condition has been transformed to a legal type
39702 // and don't ever optimize vector selects that map to AVX512 mask-registers.
39703 unsigned BitWidth = Cond.getScalarValueSizeInBits();
39704 if (BitWidth < 8 || BitWidth > 64)
39707 // We can only handle the cases where VSELECT is directly legal on the
39708 // subtarget. We custom lower VSELECT nodes with constant conditions and
39709 // this makes it hard to see whether a dynamic VSELECT will correctly
39710 // lower, so we both check the operation's status and explicitly handle the
39711 // cases where a *dynamic* blend will fail even though a constant-condition
39712 // blend could be custom lowered.
39713 // FIXME: We should find a better way to handle this class of problems.
39714 // Potentially, we should combine constant-condition vselect nodes
39715 // pre-legalization into shuffles and not mark as many types as custom
39717 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39718 EVT VT = N->getValueType(0);
39719 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
39721 // FIXME: We don't support i16-element blends currently. We could and
39722 // should support them by making *all* the bits in the condition be set
39723 // rather than just the high bit and using an i8-element blend.
39724 if (VT.getVectorElementType() == MVT::i16)
39726 // Dynamic blending was only available from SSE4.1 onward.
39727 if (VT.is128BitVector() && !Subtarget.hasSSE41())
39729 // Byte blends are only available in AVX2
39730 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
39732 // There are no 512-bit blend instructions that use sign bits.
39733 if (VT.is512BitVector())
39736 auto OnlyUsedAsSelectCond = [](SDValue Cond) {
39737 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
39739 if ((UI->getOpcode() != ISD::VSELECT &&
39740 UI->getOpcode() != X86ISD::BLENDV) ||
39741 UI.getOperandNo() != 0)
39747 APInt DemandedBits(APInt::getSignMask(BitWidth));
39749 if (OnlyUsedAsSelectCond(Cond)) {
39751 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
39752 !DCI.isBeforeLegalizeOps());
39753 if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
39756 // If we changed the computation somewhere in the DAG, this change will
39757 // affect all users of Cond. Update all the nodes so that we do not use
39758 // the generic VSELECT anymore. Otherwise, we may perform wrong
39759 // optimizations as we messed with the actual expectation for the vector
39761 for (SDNode *U : Cond->uses()) {
39762 if (U->getOpcode() == X86ISD::BLENDV)
39765 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
39766 Cond, U->getOperand(1), U->getOperand(2));
39767 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
39768 DCI.AddToWorklist(U);
39770 DCI.CommitTargetLoweringOpt(TLO);
39771 return SDValue(N, 0);
39774 // Otherwise we can still at least try to simplify multiple use bits.
39775 if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
39776 return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
39777 N->getOperand(1), N->getOperand(2));
39783 // (or (and (M, (sub 0, X)), (pandn M, X)))
39784 // which is a special case of:
39785 // (select M, (sub 0, X), X)
39787 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
39788 // We know that, if fNegate is 0 or 1:
39789 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
39791 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
39792 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
39793 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
39794 // This lets us transform our vselect to:
39795 // (add (xor X, M), (and M, 1))
39797 // (sub (xor X, M), M)
39798 static SDValue combineLogicBlendIntoConditionalNegate(
39799 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
39800 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
39801 EVT MaskVT = Mask.getValueType();
39802 assert(MaskVT.isInteger() &&
39803 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
39804 "Mask must be zero/all-bits");
39806 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
39808 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
39811 auto IsNegV = [](SDNode *N, SDValue V) {
39812 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
39813 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
39817 if (IsNegV(Y.getNode(), X))
39819 else if (IsNegV(X.getNode(), Y))
39824 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
39825 SDValue SubOp2 = Mask;
39827 // If the negate was on the false side of the select, then
39828 // the operands of the SUB need to be swapped. PR 27251.
39829 // This is because the pattern being matched above is
39830 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
39831 // but if the pattern matched was
39832 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
39833 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
39834 // pattern also needs to be a negation of the replacement pattern above.
39835 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
39836 // sub accomplishes the negation of the replacement pattern.
39838 std::swap(SubOp1, SubOp2);
39840 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
39841 return DAG.getBitcast(VT, Res);
39844 /// Do target-specific dag combines on SELECT and VSELECT nodes.
39845 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
39846 TargetLowering::DAGCombinerInfo &DCI,
39847 const X86Subtarget &Subtarget) {
39849 SDValue Cond = N->getOperand(0);
39850 SDValue LHS = N->getOperand(1);
39851 SDValue RHS = N->getOperand(2);
39853 // Try simplification again because we use this function to optimize
39854 // BLENDV nodes that are not handled by the generic combiner.
39855 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
39858 EVT VT = LHS.getValueType();
39859 EVT CondVT = Cond.getValueType();
39860 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39861 bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
39863 // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
39864 // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
39865 // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
39866 if (CondVT.isVector() && CondVT.isInteger() &&
39867 CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
39868 (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
39869 DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
39870 if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
39871 DL, DAG, Subtarget))
39874 // Convert vselects with constant condition into shuffles.
39875 if (CondConstantVector && DCI.isBeforeLegalizeOps()) {
39876 SmallVector<int, 64> Mask;
39877 if (createShuffleMaskFromVSELECT(Mask, Cond))
39878 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
39881 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
39882 // instructions match the semantics of the common C idiom x<y?x:y but not
39883 // x<=y?x:y, because of how they handle negative zero (which can be
39884 // ignored in unsafe-math mode).
39885 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
39886 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
39887 VT != MVT::f80 && VT != MVT::f128 &&
39888 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
39889 (Subtarget.hasSSE2() ||
39890 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
39891 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
39893 unsigned Opcode = 0;
39894 // Check for x CC y ? x : y.
39895 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
39896 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
39900 // Converting this to a min would handle NaNs incorrectly, and swapping
39901 // the operands would cause it to handle comparisons between positive
39902 // and negative zero incorrectly.
39903 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
39904 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39905 !(DAG.isKnownNeverZeroFloat(LHS) ||
39906 DAG.isKnownNeverZeroFloat(RHS)))
39908 std::swap(LHS, RHS);
39910 Opcode = X86ISD::FMIN;
39913 // Converting this to a min would handle comparisons between positive
39914 // and negative zero incorrectly.
39915 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39916 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
39918 Opcode = X86ISD::FMIN;
39921 // Converting this to a min would handle both negative zeros and NaNs
39922 // incorrectly, but we can swap the operands to fix both.
39923 std::swap(LHS, RHS);
39928 Opcode = X86ISD::FMIN;
39932 // Converting this to a max would handle comparisons between positive
39933 // and negative zero incorrectly.
39934 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39935 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
39937 Opcode = X86ISD::FMAX;
39940 // Converting this to a max would handle NaNs incorrectly, and swapping
39941 // the operands would cause it to handle comparisons between positive
39942 // and negative zero incorrectly.
39943 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
39944 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39945 !(DAG.isKnownNeverZeroFloat(LHS) ||
39946 DAG.isKnownNeverZeroFloat(RHS)))
39948 std::swap(LHS, RHS);
39950 Opcode = X86ISD::FMAX;
39953 // Converting this to a max would handle both negative zeros and NaNs
39954 // incorrectly, but we can swap the operands to fix both.
39955 std::swap(LHS, RHS);
39960 Opcode = X86ISD::FMAX;
39963 // Check for x CC y ? y : x -- a min/max with reversed arms.
39964 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
39965 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
39969 // Converting this to a min would handle comparisons between positive
39970 // and negative zero incorrectly, and swapping the operands would
39971 // cause it to handle NaNs incorrectly.
39972 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39973 !(DAG.isKnownNeverZeroFloat(LHS) ||
39974 DAG.isKnownNeverZeroFloat(RHS))) {
39975 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
39977 std::swap(LHS, RHS);
39979 Opcode = X86ISD::FMIN;
39982 // Converting this to a min would handle NaNs incorrectly.
39983 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
39985 Opcode = X86ISD::FMIN;
39988 // Converting this to a min would handle both negative zeros and NaNs
39989 // incorrectly, but we can swap the operands to fix both.
39990 std::swap(LHS, RHS);
39995 Opcode = X86ISD::FMIN;
39999 // Converting this to a max would handle NaNs incorrectly.
40000 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
40002 Opcode = X86ISD::FMAX;
40005 // Converting this to a max would handle comparisons between positive
40006 // and negative zero incorrectly, and swapping the operands would
40007 // cause it to handle NaNs incorrectly.
40008 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
40009 !DAG.isKnownNeverZeroFloat(LHS) &&
40010 !DAG.isKnownNeverZeroFloat(RHS)) {
40011 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
40013 std::swap(LHS, RHS);
40015 Opcode = X86ISD::FMAX;
40018 // Converting this to a max would handle both negative zeros and NaNs
40019 // incorrectly, but we can swap the operands to fix both.
40020 std::swap(LHS, RHS);
40025 Opcode = X86ISD::FMAX;
40031 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
40034 // Some mask scalar intrinsics rely on checking if only one bit is set
40035 // and implement it in C code like this:
40036 // A[0] = (U & 1) ? A[0] : W[0];
40037 // This creates some redundant instructions that break pattern matching.
40038 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
40039 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
40040 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
40041 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40042 SDValue AndNode = Cond.getOperand(0);
40043 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
40044 isNullConstant(Cond.getOperand(1)) &&
40045 isOneConstant(AndNode.getOperand(1))) {
40046 // LHS and RHS swapped due to
40047 // setcc outputting 1 when AND resulted in 0 and vice versa.
40048 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
40049 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
40053 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
40054 // lowering on KNL. In this case we convert it to
40055 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
40056 // The same situation all vectors of i8 and i16 without BWI.
40057 // Make sure we extend these even before type legalization gets a chance to
40058 // split wide vectors.
40059 // Since SKX these selects have a proper lowering.
40060 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
40061 CondVT.getVectorElementType() == MVT::i1 &&
40062 (VT.getVectorElementType() == MVT::i8 ||
40063 VT.getVectorElementType() == MVT::i16)) {
40064 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
40065 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
40068 // AVX512 - Extend select with zero to merge with target shuffle.
40069 // select(mask, extract_subvector(shuffle(x)), zero) -->
40070 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
40071 // TODO - support non target shuffles as well.
40072 if (Subtarget.hasAVX512() && CondVT.isVector() &&
40073 CondVT.getVectorElementType() == MVT::i1) {
40074 auto SelectableOp = [&TLI](SDValue Op) {
40075 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40076 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
40077 isNullConstant(Op.getOperand(1)) &&
40078 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
40079 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
40082 bool SelectableLHS = SelectableOp(LHS);
40083 bool SelectableRHS = SelectableOp(RHS);
40084 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
40085 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
40087 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
40088 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
40089 : RHS.getOperand(0).getValueType();
40090 unsigned NumSrcElts = SrcVT.getVectorNumElements();
40091 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
40092 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
40093 VT.getSizeInBits());
40094 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
40095 VT.getSizeInBits());
40096 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
40097 DAG.getUNDEF(SrcCondVT), Cond,
40098 DAG.getIntPtrConstant(0, DL));
40099 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
40100 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
40104 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
40107 // Canonicalize max and min:
40108 // (x > y) ? x : y -> (x >= y) ? x : y
40109 // (x < y) ? x : y -> (x <= y) ? x : y
40110 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
40111 // the need for an extra compare
40112 // against zero. e.g.
40113 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
40115 // testl %edi, %edi
40117 // cmovgl %edi, %eax
40121 // cmovsl %eax, %edi
40122 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
40123 Cond.hasOneUse() &&
40124 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
40125 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
40126 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40131 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
40132 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
40133 Cond.getOperand(0), Cond.getOperand(1), NewCC);
40134 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
40139 // Match VSELECTs into subs with unsigned saturation.
40140 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
40141 // psubus is available in SSE2 for i8 and i16 vectors.
40142 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
40143 isPowerOf2_32(VT.getVectorNumElements()) &&
40144 (VT.getVectorElementType() == MVT::i8 ||
40145 VT.getVectorElementType() == MVT::i16)) {
40146 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40148 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
40149 // left side invert the predicate to simplify logic below.
40151 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
40153 CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
40154 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
40158 if (Other.getNode() && Other->getNumOperands() == 2 &&
40159 Other->getOperand(0) == Cond.getOperand(0)) {
40160 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
40161 SDValue CondRHS = Cond->getOperand(1);
40163 // Look for a general sub with unsigned saturation first.
40164 // x >= y ? x-y : 0 --> subus x, y
40165 // x > y ? x-y : 0 --> subus x, y
40166 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
40167 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
40168 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
40170 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
40171 if (isa<BuildVectorSDNode>(CondRHS)) {
40172 // If the RHS is a constant we have to reverse the const
40173 // canonicalization.
40174 // x > C-1 ? x+-C : 0 --> subus x, C
40175 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
40176 return (!Op && !Cond) ||
40178 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
40180 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
40181 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
40182 /*AllowUndefs*/ true)) {
40183 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
40185 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
40188 // Another special case: If C was a sign bit, the sub has been
40189 // canonicalized into a xor.
40190 // FIXME: Would it be better to use computeKnownBits to determine
40191 // whether it's safe to decanonicalize the xor?
40192 // x s< 0 ? x^C : 0 --> subus x, C
40193 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
40194 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
40195 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
40196 OpRHSConst->getAPIntValue().isSignMask()) {
40197 // Note that we have to rebuild the RHS constant here to ensure we
40198 // don't rely on particular values of undef lanes.
40199 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
40200 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
40208 // Match VSELECTs into add with unsigned saturation.
40209 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
40210 // paddus is available in SSE2 for i8 and i16 vectors.
40211 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
40212 isPowerOf2_32(VT.getVectorNumElements()) &&
40213 (VT.getVectorElementType() == MVT::i8 ||
40214 VT.getVectorElementType() == MVT::i16)) {
40215 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40217 SDValue CondLHS = Cond->getOperand(0);
40218 SDValue CondRHS = Cond->getOperand(1);
40220 // Check if one of the arms of the VSELECT is vector with all bits set.
40221 // If it's on the left side invert the predicate to simplify logic below.
40223 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
40225 CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
40226 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
40230 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
40231 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
40233 // Canonicalize condition operands.
40234 if (CC == ISD::SETUGE) {
40235 std::swap(CondLHS, CondRHS);
40239 // We can test against either of the addition operands.
40240 // x <= x+y ? x+y : ~0 --> addus x, y
40241 // x+y >= x ? x+y : ~0 --> addus x, y
40242 if (CC == ISD::SETULE && Other == CondRHS &&
40243 (OpLHS == CondLHS || OpRHS == CondLHS))
40244 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
40246 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
40247 CondLHS == OpLHS) {
40248 // If the RHS is a constant we have to reverse the const
40249 // canonicalization.
40250 // x > ~C ? x+C : ~0 --> addus x, C
40251 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
40252 return Cond->getAPIntValue() == ~Op->getAPIntValue();
40254 if (CC == ISD::SETULE &&
40255 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
40256 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
40261 // Check if the first operand is all zeros and Cond type is vXi1.
40262 // If this an avx512 target we can improve the use of zero masking by
40263 // swapping the operands and inverting the condition.
40264 if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
40265 Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
40266 ISD::isBuildVectorAllZeros(LHS.getNode()) &&
40267 !ISD::isBuildVectorAllZeros(RHS.getNode())) {
40268 // Invert the cond to not(cond) : xor(op,allones)=not(op)
40269 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
40270 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
40271 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
40274 // Early exit check
40275 if (!TLI.isTypeLegal(VT))
40278 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
40281 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
40284 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
40287 // select(~Cond, X, Y) -> select(Cond, Y, X)
40288 if (CondVT.getScalarType() != MVT::i1)
40289 if (SDValue CondNot = IsNOT(Cond, DAG))
40290 return DAG.getNode(N->getOpcode(), DL, VT,
40291 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
40293 // Try to optimize vXi1 selects if both operands are either all constants or
40294 // bitcasts from scalar integer type. In that case we can convert the operands
40295 // to integer and use an integer select which will be converted to a CMOV.
40296 // We need to take a little bit of care to avoid creating an i64 type after
40297 // type legalization.
40298 if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
40299 VT.getVectorElementType() == MVT::i1 &&
40300 (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
40301 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
40302 bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
40303 bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
40306 (LHS.getOpcode() == ISD::BITCAST &&
40307 LHS.getOperand(0).getValueType() == IntVT)) &&
40309 (RHS.getOpcode() == ISD::BITCAST &&
40310 RHS.getOperand(0).getValueType() == IntVT))) {
40312 LHS = combinevXi1ConstantToInteger(LHS, DAG);
40314 LHS = LHS.getOperand(0);
40317 RHS = combinevXi1ConstantToInteger(RHS, DAG);
40319 RHS = RHS.getOperand(0);
40321 SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
40322 return DAG.getBitcast(VT, Select);
40326 // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
40327 // single bits, then invert the predicate and swap the select operands.
40328 // This can lower using a vector shift bit-hack rather than mask and compare.
40329 if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
40330 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
40331 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
40332 Cond.getOperand(0).getOpcode() == ISD::AND &&
40333 isNullOrNullSplat(Cond.getOperand(1)) &&
40334 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
40335 Cond.getOperand(0).getValueType() == VT) {
40336 // The 'and' mask must be composed of power-of-2 constants.
40337 SDValue And = Cond.getOperand(0);
40338 auto *C = isConstOrConstSplat(And.getOperand(1));
40339 if (C && C->getAPIntValue().isPowerOf2()) {
40340 // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
40342 DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
40343 return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
40346 // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
40347 // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
40348 // 16-bit lacks a proper blendv.
40349 unsigned EltBitWidth = VT.getScalarSizeInBits();
40350 bool CanShiftBlend =
40351 TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
40352 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
40353 (Subtarget.hasXOP()));
40354 if (CanShiftBlend &&
40355 ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
40356 return C->getAPIntValue().isPowerOf2();
40358 // Create a left-shift constant to get the mask bits over to the sign-bit.
40359 SDValue Mask = And.getOperand(1);
40360 SmallVector<int, 32> ShlVals;
40361 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
40362 auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
40363 ShlVals.push_back(EltBitWidth - 1 -
40364 MaskVal->getAPIntValue().exactLogBase2());
40366 // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
40367 SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
40368 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
40370 DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
40371 return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
40379 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
40381 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
40382 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
40383 /// Note that this is only legal for some op/cc combinations.
40384 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
40386 const X86Subtarget &Subtarget) {
40387 // This combine only operates on CMP-like nodes.
40388 if (!(Cmp.getOpcode() == X86ISD::CMP ||
40389 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
40392 // Can't replace the cmp if it has more uses than the one we're looking at.
40393 // FIXME: We would like to be able to handle this, but would need to make sure
40394 // all uses were updated.
40395 if (!Cmp.hasOneUse())
40398 // This only applies to variations of the common case:
40399 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
40400 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
40401 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
40402 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
40403 // Using the proper condcodes (see below), overflow is checked for.
40405 // FIXME: We can generalize both constraints:
40406 // - XOR/OR/AND (if they were made to survive AtomicExpand)
40408 // if the result is compared.
40410 SDValue CmpLHS = Cmp.getOperand(0);
40411 SDValue CmpRHS = Cmp.getOperand(1);
40413 if (!CmpLHS.hasOneUse())
40416 unsigned Opc = CmpLHS.getOpcode();
40417 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
40420 SDValue OpRHS = CmpLHS.getOperand(2);
40421 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
40425 APInt Addend = OpRHSC->getAPIntValue();
40426 if (Opc == ISD::ATOMIC_LOAD_SUB)
40429 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
40433 APInt Comparison = CmpRHSC->getAPIntValue();
40435 // If the addend is the negation of the comparison value, then we can do
40436 // a full comparison by emitting the atomic arithmetic as a locked sub.
40437 if (Comparison == -Addend) {
40438 // The CC is fine, but we need to rewrite the LHS of the comparison as an
40440 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
40441 auto AtomicSub = DAG.getAtomic(
40442 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
40443 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
40444 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
40445 AN->getMemOperand());
40446 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
40447 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
40448 DAG.getUNDEF(CmpLHS.getValueType()));
40449 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
40453 // We can handle comparisons with zero in a number of cases by manipulating
40455 if (!Comparison.isNullValue())
40458 if (CC == X86::COND_S && Addend == 1)
40460 else if (CC == X86::COND_NS && Addend == 1)
40462 else if (CC == X86::COND_G && Addend == -1)
40464 else if (CC == X86::COND_LE && Addend == -1)
40469 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
40470 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
40471 DAG.getUNDEF(CmpLHS.getValueType()));
40472 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
40476 // Check whether a boolean test is testing a boolean value generated by
40477 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
40480 // Simplify the following patterns:
40481 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
40482 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
40483 // to (Op EFLAGS Cond)
40485 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
40486 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
40487 // to (Op EFLAGS !Cond)
40489 // where Op could be BRCOND or CMOV.
40491 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
40492 // This combine only operates on CMP-like nodes.
40493 if (!(Cmp.getOpcode() == X86ISD::CMP ||
40494 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
40497 // Quit if not used as a boolean value.
40498 if (CC != X86::COND_E && CC != X86::COND_NE)
40501 // Check CMP operands. One of them should be 0 or 1 and the other should be
40502 // an SetCC or extended from it.
40503 SDValue Op1 = Cmp.getOperand(0);
40504 SDValue Op2 = Cmp.getOperand(1);
40507 const ConstantSDNode* C = nullptr;
40508 bool needOppositeCond = (CC == X86::COND_E);
40509 bool checkAgainstTrue = false; // Is it a comparison against 1?
40511 if ((C = dyn_cast<ConstantSDNode>(Op1)))
40513 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
40515 else // Quit if all operands are not constants.
40518 if (C->getZExtValue() == 1) {
40519 needOppositeCond = !needOppositeCond;
40520 checkAgainstTrue = true;
40521 } else if (C->getZExtValue() != 0)
40522 // Quit if the constant is neither 0 or 1.
40525 bool truncatedToBoolWithAnd = false;
40526 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
40527 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
40528 SetCC.getOpcode() == ISD::TRUNCATE ||
40529 SetCC.getOpcode() == ISD::AND) {
40530 if (SetCC.getOpcode() == ISD::AND) {
40532 if (isOneConstant(SetCC.getOperand(0)))
40534 if (isOneConstant(SetCC.getOperand(1)))
40538 SetCC = SetCC.getOperand(OpIdx);
40539 truncatedToBoolWithAnd = true;
40541 SetCC = SetCC.getOperand(0);
40544 switch (SetCC.getOpcode()) {
40545 case X86ISD::SETCC_CARRY:
40546 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
40547 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
40548 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
40549 // truncated to i1 using 'and'.
40550 if (checkAgainstTrue && !truncatedToBoolWithAnd)
40552 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
40553 "Invalid use of SETCC_CARRY!");
40555 case X86ISD::SETCC:
40556 // Set the condition code or opposite one if necessary.
40557 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
40558 if (needOppositeCond)
40559 CC = X86::GetOppositeBranchCondition(CC);
40560 return SetCC.getOperand(1);
40561 case X86ISD::CMOV: {
40562 // Check whether false/true value has canonical one, i.e. 0 or 1.
40563 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
40564 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
40565 // Quit if true value is not a constant.
40568 // Quit if false value is not a constant.
40570 SDValue Op = SetCC.getOperand(0);
40571 // Skip 'zext' or 'trunc' node.
40572 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
40573 Op.getOpcode() == ISD::TRUNCATE)
40574 Op = Op.getOperand(0);
40575 // A special case for rdrand/rdseed, where 0 is set if false cond is
40577 if ((Op.getOpcode() != X86ISD::RDRAND &&
40578 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
40581 // Quit if false value is not the constant 0 or 1.
40582 bool FValIsFalse = true;
40583 if (FVal && FVal->getZExtValue() != 0) {
40584 if (FVal->getZExtValue() != 1)
40586 // If FVal is 1, opposite cond is needed.
40587 needOppositeCond = !needOppositeCond;
40588 FValIsFalse = false;
40590 // Quit if TVal is not the constant opposite of FVal.
40591 if (FValIsFalse && TVal->getZExtValue() != 1)
40593 if (!FValIsFalse && TVal->getZExtValue() != 0)
40595 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
40596 if (needOppositeCond)
40597 CC = X86::GetOppositeBranchCondition(CC);
40598 return SetCC.getOperand(3);
40605 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
40607 /// (X86or (X86setcc) (X86setcc))
40608 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
40609 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
40610 X86::CondCode &CC1, SDValue &Flags,
40612 if (Cond->getOpcode() == X86ISD::CMP) {
40613 if (!isNullConstant(Cond->getOperand(1)))
40616 Cond = Cond->getOperand(0);
40621 SDValue SetCC0, SetCC1;
40622 switch (Cond->getOpcode()) {
40623 default: return false;
40630 SetCC0 = Cond->getOperand(0);
40631 SetCC1 = Cond->getOperand(1);
40635 // Make sure we have SETCC nodes, using the same flags value.
40636 if (SetCC0.getOpcode() != X86ISD::SETCC ||
40637 SetCC1.getOpcode() != X86ISD::SETCC ||
40638 SetCC0->getOperand(1) != SetCC1->getOperand(1))
40641 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
40642 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
40643 Flags = SetCC0->getOperand(1);
40647 // When legalizing carry, we create carries via add X, -1
40648 // If that comes from an actual carry, via setcc, we use the
40650 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
40651 if (EFLAGS.getOpcode() == X86ISD::ADD) {
40652 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
40653 SDValue Carry = EFLAGS.getOperand(0);
40654 while (Carry.getOpcode() == ISD::TRUNCATE ||
40655 Carry.getOpcode() == ISD::ZERO_EXTEND ||
40656 Carry.getOpcode() == ISD::SIGN_EXTEND ||
40657 Carry.getOpcode() == ISD::ANY_EXTEND ||
40658 (Carry.getOpcode() == ISD::AND &&
40659 isOneConstant(Carry.getOperand(1))))
40660 Carry = Carry.getOperand(0);
40661 if (Carry.getOpcode() == X86ISD::SETCC ||
40662 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
40663 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
40664 uint64_t CarryCC = Carry.getConstantOperandVal(0);
40665 SDValue CarryOp1 = Carry.getOperand(1);
40666 if (CarryCC == X86::COND_B)
40668 if (CarryCC == X86::COND_A) {
40669 // Try to convert COND_A into COND_B in an attempt to facilitate
40670 // materializing "setb reg".
40672 // Do not flip "e > c", where "c" is a constant, because Cmp
40673 // instruction cannot take an immediate as its first operand.
40675 if (CarryOp1.getOpcode() == X86ISD::SUB &&
40676 CarryOp1.getNode()->hasOneUse() &&
40677 CarryOp1.getValueType().isInteger() &&
40678 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
40679 SDValue SubCommute =
40680 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
40681 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
40682 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
40685 // If this is a check of the z flag of an add with 1, switch to the
40687 if (CarryCC == X86::COND_E &&
40688 CarryOp1.getOpcode() == X86ISD::ADD &&
40689 isOneConstant(CarryOp1.getOperand(1)))
40698 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
40699 /// to avoid the inversion.
40700 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
40702 const X86Subtarget &Subtarget) {
40703 // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
40704 if (EFLAGS.getOpcode() != X86ISD::PTEST &&
40705 EFLAGS.getOpcode() != X86ISD::TESTP)
40708 // PTEST/TESTP sets EFLAGS as:
40709 // TESTZ: ZF = (Op0 & Op1) == 0
40710 // TESTC: CF = (~Op0 & Op1) == 0
40711 // TESTNZC: ZF == 0 && CF == 0
40712 EVT VT = EFLAGS.getValueType();
40713 SDValue Op0 = EFLAGS.getOperand(0);
40714 SDValue Op1 = EFLAGS.getOperand(1);
40715 EVT OpVT = Op0.getValueType();
40717 // TEST*(~X,Y) == TEST*(X,Y)
40718 if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
40719 X86::CondCode InvCC;
40723 InvCC = X86::COND_E;
40726 // !testc -> !testz.
40727 InvCC = X86::COND_NE;
40731 InvCC = X86::COND_B;
40734 // !testz -> !testc.
40735 InvCC = X86::COND_AE;
40739 // testnzc -> testnzc (no change).
40743 InvCC = X86::COND_INVALID;
40747 if (InvCC != X86::COND_INVALID) {
40749 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40750 DAG.getBitcast(OpVT, NotOp0), Op1);
40754 if (CC == X86::COND_E || CC == X86::COND_NE) {
40755 // TESTZ(X,~Y) == TESTC(Y,X)
40756 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
40757 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
40758 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40759 DAG.getBitcast(OpVT, NotOp1), Op0);
40763 SDValue BC = peekThroughBitcasts(Op0);
40764 EVT BCVT = BC.getValueType();
40765 assert(BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
40766 "Unexpected vector type");
40768 // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
40769 if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
40770 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40771 DAG.getBitcast(OpVT, BC.getOperand(0)),
40772 DAG.getBitcast(OpVT, BC.getOperand(1)));
40775 // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
40776 if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
40777 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
40778 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40779 DAG.getBitcast(OpVT, BC.getOperand(0)),
40780 DAG.getBitcast(OpVT, BC.getOperand(1)));
40783 // If every element is an all-sign value, see if we can use MOVMSK to
40784 // more efficiently extract the sign bits and compare that.
40785 // TODO: Handle TESTC with comparison inversion.
40786 // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
40787 // MOVMSK combines to make sure its never worse than PTEST?
40788 unsigned EltBits = BCVT.getScalarSizeInBits();
40789 if (DAG.ComputeNumSignBits(BC) == EltBits) {
40790 assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
40791 APInt SignMask = APInt::getSignMask(EltBits);
40792 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40794 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
40795 // For vXi16 cases we need to use pmovmksb and extract every other
40798 if (EltBits == 16) {
40799 MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
40800 Res = DAG.getBitcast(MovmskVT, Res);
40801 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
40802 Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
40803 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
40805 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
40807 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
40808 DAG.getConstant(0, DL, MVT::i32));
40813 // TESTZ(-1,X) == TESTZ(X,X)
40814 if (ISD::isBuildVectorAllOnes(Op0.getNode()))
40815 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
40817 // TESTZ(X,-1) == TESTZ(X,X)
40818 if (ISD::isBuildVectorAllOnes(Op1.getNode()))
40819 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
40825 // Attempt to simplify the MOVMSK input based on the comparison type.
40826 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
40828 const X86Subtarget &Subtarget) {
40829 // Handle eq/ne against zero (any_of).
40830 // Handle eq/ne against -1 (all_of).
40831 if (!(CC == X86::COND_E || CC == X86::COND_NE))
40833 if (EFLAGS.getValueType() != MVT::i32)
40835 unsigned CmpOpcode = EFLAGS.getOpcode();
40836 if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
40838 auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
40841 const APInt &CmpVal = CmpConstant->getAPIntValue();
40843 SDValue CmpOp = EFLAGS.getOperand(0);
40844 unsigned CmpBits = CmpOp.getValueSizeInBits();
40845 assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
40847 // Peek through any truncate.
40848 if (CmpOp.getOpcode() == ISD::TRUNCATE)
40849 CmpOp = CmpOp.getOperand(0);
40851 // Bail if we don't find a MOVMSK.
40852 if (CmpOp.getOpcode() != X86ISD::MOVMSK)
40855 SDValue Vec = CmpOp.getOperand(0);
40856 MVT VecVT = Vec.getSimpleValueType();
40857 assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
40858 "Unexpected MOVMSK operand");
40859 unsigned NumElts = VecVT.getVectorNumElements();
40860 unsigned NumEltBits = VecVT.getScalarSizeInBits();
40862 bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isNullValue();
40863 bool IsAllOf = CmpOpcode == X86ISD::SUB && NumElts <= CmpBits &&
40864 CmpVal.isMask(NumElts);
40865 if (!IsAnyOf && !IsAllOf)
40868 // See if we can peek through to a vector with a wider element type, if the
40869 // signbits extend down to all the sub-elements as well.
40870 // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
40871 // potential SimplifyDemandedBits/Elts cases.
40872 if (Vec.getOpcode() == ISD::BITCAST) {
40873 SDValue BC = peekThroughBitcasts(Vec);
40874 MVT BCVT = BC.getSimpleValueType();
40875 unsigned BCNumElts = BCVT.getVectorNumElements();
40876 unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
40877 if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
40878 BCNumEltBits > NumEltBits &&
40879 DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
40881 unsigned CmpMask = IsAnyOf ? 0 : ((1 << BCNumElts) - 1);
40882 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
40883 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
40884 DAG.getConstant(CmpMask, DL, MVT::i32));
40888 // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
40889 // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
40890 if (IsAllOf && Subtarget.hasSSE41()) {
40891 SDValue BC = peekThroughBitcasts(Vec);
40892 if (BC.getOpcode() == X86ISD::PCMPEQ &&
40893 ISD::isBuildVectorAllZeros(BC.getOperand(1).getNode())) {
40894 MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
40895 SDValue V = DAG.getBitcast(TestVT, BC.getOperand(0));
40896 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
40900 // See if we can avoid a PACKSS by calling MOVMSK on the sources.
40901 // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
40902 // sign bits prior to the comparison with zero unless we know that
40903 // the vXi16 splats the sign bit down to the lower i8 half.
40904 // TODO: Handle all_of patterns.
40905 if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
40906 SDValue VecOp0 = Vec.getOperand(0);
40907 SDValue VecOp1 = Vec.getOperand(1);
40908 bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
40909 bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
40910 // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
40911 if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
40913 SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
40914 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
40915 Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
40917 Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
40918 DAG.getConstant(0xAAAA, DL, MVT::i16));
40920 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
40921 DAG.getConstant(0, DL, MVT::i16));
40923 // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
40924 // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
40925 if (CmpBits == 16 && Subtarget.hasInt256() &&
40926 VecOp0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40927 VecOp1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40928 VecOp0.getOperand(0) == VecOp1.getOperand(0) &&
40929 VecOp0.getConstantOperandAPInt(1) == 0 &&
40930 VecOp1.getConstantOperandAPInt(1) == 8 &&
40931 (IsAnyOf || (SignExt0 && SignExt1))) {
40933 SDValue Result = DAG.getBitcast(MVT::v32i8, VecOp0.getOperand(0));
40934 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
40935 unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
40936 if (!SignExt0 || !SignExt1) {
40937 assert(IsAnyOf && "Only perform v16i16 signmasks for any_of patterns");
40938 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
40939 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
40941 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
40942 DAG.getConstant(CmpMask, DL, MVT::i32));
40946 // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
40947 SmallVector<int, 32> ShuffleMask;
40948 SmallVector<SDValue, 2> ShuffleInputs;
40949 if (NumElts == CmpBits &&
40950 getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
40951 ShuffleMask, DAG) &&
40952 ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
40953 ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
40954 unsigned NumShuffleElts = ShuffleMask.size();
40955 APInt DemandedElts = APInt::getNullValue(NumShuffleElts);
40956 for (int M : ShuffleMask) {
40957 assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
40958 DemandedElts.setBit(M);
40960 if (DemandedElts.isAllOnesValue()) {
40962 SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
40963 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
40965 DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
40966 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
40967 EFLAGS.getOperand(1));
40974 /// Optimize an EFLAGS definition used according to the condition code \p CC
40975 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
40976 /// uses of chain values.
40977 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
40979 const X86Subtarget &Subtarget) {
40980 if (CC == X86::COND_B)
40981 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
40984 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
40987 if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
40990 if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
40993 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
40996 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
40997 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
40998 TargetLowering::DAGCombinerInfo &DCI,
40999 const X86Subtarget &Subtarget) {
41002 SDValue FalseOp = N->getOperand(0);
41003 SDValue TrueOp = N->getOperand(1);
41004 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
41005 SDValue Cond = N->getOperand(3);
41007 // cmov X, X, ?, ? --> X
41008 if (TrueOp == FalseOp)
41011 // Try to simplify the EFLAGS and condition code operands.
41012 // We can't always do this as FCMOV only supports a subset of X86 cond.
41013 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
41014 if (!(FalseOp.getValueType() == MVT::f80 ||
41015 (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
41016 (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
41017 !Subtarget.hasCMov() || hasFPCMov(CC)) {
41018 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
41020 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
41024 // If this is a select between two integer constants, try to do some
41025 // optimizations. Note that the operands are ordered the opposite of SELECT
41027 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
41028 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
41029 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
41030 // larger than FalseC (the false value).
41031 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
41032 CC = X86::GetOppositeBranchCondition(CC);
41033 std::swap(TrueC, FalseC);
41034 std::swap(TrueOp, FalseOp);
41037 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
41038 // This is efficient for any integer data type (including i8/i16) and
41040 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
41041 Cond = getSETCC(CC, Cond, DL, DAG);
41043 // Zero extend the condition if needed.
41044 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
41046 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
41047 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
41048 DAG.getConstant(ShAmt, DL, MVT::i8));
41052 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
41053 // for any integer data type, including i8/i16.
41054 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
41055 Cond = getSETCC(CC, Cond, DL, DAG);
41057 // Zero extend the condition if needed.
41058 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
41059 FalseC->getValueType(0), Cond);
41060 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
41061 SDValue(FalseC, 0));
41065 // Optimize cases that will turn into an LEA instruction. This requires
41066 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
41067 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
41068 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
41069 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
41070 "Implicit constant truncation");
41072 bool isFastMultiplier = false;
41073 if (Diff.ult(10)) {
41074 switch (Diff.getZExtValue()) {
41076 case 1: // result = add base, cond
41077 case 2: // result = lea base( , cond*2)
41078 case 3: // result = lea base(cond, cond*2)
41079 case 4: // result = lea base( , cond*4)
41080 case 5: // result = lea base(cond, cond*4)
41081 case 8: // result = lea base( , cond*8)
41082 case 9: // result = lea base(cond, cond*8)
41083 isFastMultiplier = true;
41088 if (isFastMultiplier) {
41089 Cond = getSETCC(CC, Cond, DL ,DAG);
41090 // Zero extend the condition if needed.
41091 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
41093 // Scale the condition by the difference.
41095 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
41096 DAG.getConstant(Diff, DL, Cond.getValueType()));
41098 // Add the base if non-zero.
41099 if (FalseC->getAPIntValue() != 0)
41100 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
41101 SDValue(FalseC, 0));
41108 // Handle these cases:
41109 // (select (x != c), e, c) -> select (x != c), e, x),
41110 // (select (x == c), c, e) -> select (x == c), x, e)
41111 // where the c is an integer constant, and the "select" is the combination
41112 // of CMOV and CMP.
41114 // The rationale for this change is that the conditional-move from a constant
41115 // needs two instructions, however, conditional-move from a register needs
41116 // only one instruction.
41118 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
41119 // some instruction-combining opportunities. This opt needs to be
41120 // postponed as late as possible.
41122 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
41123 // the DCI.xxxx conditions are provided to postpone the optimization as
41124 // late as possible.
41126 ConstantSDNode *CmpAgainst = nullptr;
41127 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
41128 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
41129 !isa<ConstantSDNode>(Cond.getOperand(0))) {
41131 if (CC == X86::COND_NE &&
41132 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
41133 CC = X86::GetOppositeBranchCondition(CC);
41134 std::swap(TrueOp, FalseOp);
41137 if (CC == X86::COND_E &&
41138 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
41139 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
41140 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
41141 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
41146 // Fold and/or of setcc's to double CMOV:
41147 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
41148 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
41150 // This combine lets us generate:
41151 // cmovcc1 (jcc1 if we don't have CMOV)
41157 // cmovne (jne if we don't have CMOV)
41158 // When we can't use the CMOV instruction, it might increase branch
41160 // When we can use CMOV, or when there is no mispredict, this improves
41161 // throughput and reduces register pressure.
41163 if (CC == X86::COND_NE) {
41165 X86::CondCode CC0, CC1;
41167 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
41169 std::swap(FalseOp, TrueOp);
41170 CC0 = X86::GetOppositeBranchCondition(CC0);
41171 CC1 = X86::GetOppositeBranchCondition(CC1);
41174 SDValue LOps[] = {FalseOp, TrueOp,
41175 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
41176 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
41177 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
41179 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
41184 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
41185 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
41186 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
41187 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
41188 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
41189 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
41190 SDValue Add = TrueOp;
41191 SDValue Const = FalseOp;
41192 // Canonicalize the condition code for easier matching and output.
41193 if (CC == X86::COND_E)
41194 std::swap(Add, Const);
41196 // We might have replaced the constant in the cmov with the LHS of the
41197 // compare. If so change it to the RHS of the compare.
41198 if (Const == Cond.getOperand(0))
41199 Const = Cond.getOperand(1);
41201 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
41202 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
41203 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
41204 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
41205 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
41206 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
41207 EVT VT = N->getValueType(0);
41208 // This should constant fold.
41209 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
41211 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
41212 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
41213 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
41220 /// Different mul shrinking modes.
41221 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
41223 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
41224 EVT VT = N->getOperand(0).getValueType();
41225 if (VT.getScalarSizeInBits() != 32)
41228 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
41229 unsigned SignBits[2] = {1, 1};
41230 bool IsPositive[2] = {false, false};
41231 for (unsigned i = 0; i < 2; i++) {
41232 SDValue Opd = N->getOperand(i);
41234 SignBits[i] = DAG.ComputeNumSignBits(Opd);
41235 IsPositive[i] = DAG.SignBitIsZero(Opd);
41238 bool AllPositive = IsPositive[0] && IsPositive[1];
41239 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
41240 // When ranges are from -128 ~ 127, use MULS8 mode.
41241 if (MinSignBits >= 25)
41242 Mode = ShrinkMode::MULS8;
41243 // When ranges are from 0 ~ 255, use MULU8 mode.
41244 else if (AllPositive && MinSignBits >= 24)
41245 Mode = ShrinkMode::MULU8;
41246 // When ranges are from -32768 ~ 32767, use MULS16 mode.
41247 else if (MinSignBits >= 17)
41248 Mode = ShrinkMode::MULS16;
41249 // When ranges are from 0 ~ 65535, use MULU16 mode.
41250 else if (AllPositive && MinSignBits >= 16)
41251 Mode = ShrinkMode::MULU16;
41257 /// When the operands of vector mul are extended from smaller size values,
41258 /// like i8 and i16, the type of mul may be shrinked to generate more
41259 /// efficient code. Two typical patterns are handled:
41261 /// %2 = sext/zext <N x i8> %1 to <N x i32>
41262 /// %4 = sext/zext <N x i8> %3 to <N x i32>
41263 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
41264 /// %5 = mul <N x i32> %2, %4
41267 /// %2 = zext/sext <N x i16> %1 to <N x i32>
41268 /// %4 = zext/sext <N x i16> %3 to <N x i32>
41269 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
41270 /// %5 = mul <N x i32> %2, %4
41272 /// There are four mul shrinking modes:
41273 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
41274 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
41275 /// generate pmullw+sext32 for it (MULS8 mode).
41276 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
41277 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
41278 /// generate pmullw+zext32 for it (MULU8 mode).
41279 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
41280 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
41281 /// generate pmullw+pmulhw for it (MULS16 mode).
41282 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
41283 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
41284 /// generate pmullw+pmulhuw for it (MULU16 mode).
41285 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
41286 const X86Subtarget &Subtarget) {
41287 // Check for legality
41288 // pmullw/pmulhw are not supported by SSE.
41289 if (!Subtarget.hasSSE2())
41292 // Check for profitability
41293 // pmulld is supported since SSE41. It is better to use pmulld
41294 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
41296 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
41297 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
41301 if (!canReduceVMulWidth(N, DAG, Mode))
41305 SDValue N0 = N->getOperand(0);
41306 SDValue N1 = N->getOperand(1);
41307 EVT VT = N->getOperand(0).getValueType();
41308 unsigned NumElts = VT.getVectorNumElements();
41309 if ((NumElts % 2) != 0)
41312 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
41314 // Shrink the operands of mul.
41315 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
41316 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
41318 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
41319 // lower part is needed.
41320 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
41321 if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
41322 return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
41323 : ISD::SIGN_EXTEND,
41326 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
41327 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
41328 // the higher part is also needed.
41330 DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
41331 ReducedVT, NewN0, NewN1);
41333 // Repack the lower part and higher part result of mul into a wider
41335 // Generate shuffle functioning as punpcklwd.
41336 SmallVector<int, 16> ShuffleMask(NumElts);
41337 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
41338 ShuffleMask[2 * i] = i;
41339 ShuffleMask[2 * i + 1] = i + NumElts;
41342 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
41343 ResLo = DAG.getBitcast(ResVT, ResLo);
41344 // Generate shuffle functioning as punpckhwd.
41345 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
41346 ShuffleMask[2 * i] = i + NumElts / 2;
41347 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
41350 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
41351 ResHi = DAG.getBitcast(ResVT, ResHi);
41352 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
41355 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
41356 EVT VT, const SDLoc &DL) {
41358 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
41359 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41360 DAG.getConstant(Mult, DL, VT));
41361 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
41362 DAG.getConstant(Shift, DL, MVT::i8));
41363 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
41368 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
41369 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41370 DAG.getConstant(Mul1, DL, VT));
41371 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
41372 DAG.getConstant(Mul2, DL, VT));
41373 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
41382 // mul x, 11 => add ((shl (mul x, 5), 1), x)
41383 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
41385 // mul x, 21 => add ((shl (mul x, 5), 2), x)
41386 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
41388 // mul x, 41 => add ((shl (mul x, 5), 3), x)
41389 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
41391 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
41392 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
41393 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
41395 // mul x, 19 => add ((shl (mul x, 9), 1), x)
41396 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
41398 // mul x, 37 => add ((shl (mul x, 9), 2), x)
41399 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
41401 // mul x, 73 => add ((shl (mul x, 9), 3), x)
41402 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
41404 // mul x, 13 => add ((shl (mul x, 3), 2), x)
41405 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
41407 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
41408 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
41410 // mul x, 26 => add ((mul (mul x, 5), 5), x)
41411 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
41413 // mul x, 28 => add ((mul (mul x, 9), 3), x)
41414 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
41416 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
41417 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
41418 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
41421 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
41422 // by a single LEA.
41423 // First check if this a sum of two power of 2s because that's easy. Then
41424 // count how many zeros are up to the first bit.
41425 // TODO: We can do this even without LEA at a cost of two shifts and an add.
41426 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
41427 unsigned ScaleShift = countTrailingZeros(MulAmt);
41428 if (ScaleShift >= 1 && ScaleShift < 4) {
41429 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
41430 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41431 DAG.getConstant(ShiftAmt, DL, MVT::i8));
41432 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41433 DAG.getConstant(ScaleShift, DL, MVT::i8));
41434 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
41441 // If the upper 17 bits of each element are zero then we can use PMADDWD,
41442 // which is always at least as quick as PMULLD, except on KNL.
41443 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
41444 const X86Subtarget &Subtarget) {
41445 if (!Subtarget.hasSSE2())
41448 if (Subtarget.isPMADDWDSlow())
41451 EVT VT = N->getValueType(0);
41453 // Only support vXi32 vectors.
41454 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
41457 // Make sure the type is legal or will be widened to a legal type.
41458 if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(VT))
41461 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
41463 // Without BWI, we would need to split v32i16.
41464 if (WVT == MVT::v32i16 && !Subtarget.hasBWI())
41467 SDValue N0 = N->getOperand(0);
41468 SDValue N1 = N->getOperand(1);
41470 // If we are zero extending two steps without SSE4.1, its better to reduce
41471 // the vmul width instead.
41472 if (!Subtarget.hasSSE41() &&
41473 (N0.getOpcode() == ISD::ZERO_EXTEND &&
41474 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
41475 (N1.getOpcode() == ISD::ZERO_EXTEND &&
41476 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
41479 APInt Mask17 = APInt::getHighBitsSet(32, 17);
41480 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
41481 !DAG.MaskedValueIsZero(N0, Mask17))
41484 // Use SplitOpsAndApply to handle AVX splitting.
41485 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41486 ArrayRef<SDValue> Ops) {
41487 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
41488 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
41490 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
41491 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
41495 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
41496 const X86Subtarget &Subtarget) {
41497 if (!Subtarget.hasSSE2())
41500 EVT VT = N->getValueType(0);
41502 // Only support vXi64 vectors.
41503 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
41504 VT.getVectorNumElements() < 2 ||
41505 !isPowerOf2_32(VT.getVectorNumElements()))
41508 SDValue N0 = N->getOperand(0);
41509 SDValue N1 = N->getOperand(1);
41511 // MULDQ returns the 64-bit result of the signed multiplication of the lower
41512 // 32-bits. We can lower with this if the sign bits stretch that far.
41513 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
41514 DAG.ComputeNumSignBits(N1) > 32) {
41515 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41516 ArrayRef<SDValue> Ops) {
41517 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
41519 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
41520 PMULDQBuilder, /*CheckBWI*/false);
41523 // If the upper bits are zero we can use a single pmuludq.
41524 APInt Mask = APInt::getHighBitsSet(64, 32);
41525 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
41526 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41527 ArrayRef<SDValue> Ops) {
41528 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
41530 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
41531 PMULUDQBuilder, /*CheckBWI*/false);
41537 /// Optimize a single multiply with constant into two operations in order to
41538 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
41539 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
41540 TargetLowering::DAGCombinerInfo &DCI,
41541 const X86Subtarget &Subtarget) {
41542 EVT VT = N->getValueType(0);
41544 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
41547 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
41550 if (DCI.isBeforeLegalize() && VT.isVector())
41551 return reduceVMULWidth(N, DAG, Subtarget);
41553 if (!MulConstantOptimization)
41555 // An imul is usually smaller than the alternative sequence.
41556 if (DAG.getMachineFunction().getFunction().hasMinSize())
41559 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
41562 if (VT != MVT::i64 && VT != MVT::i32)
41565 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
41568 if (isPowerOf2_64(C->getZExtValue()))
41571 int64_t SignMulAmt = C->getSExtValue();
41572 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
41573 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
41576 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
41577 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41578 DAG.getConstant(AbsMulAmt, DL, VT));
41579 if (SignMulAmt < 0)
41580 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
41586 uint64_t MulAmt1 = 0;
41587 uint64_t MulAmt2 = 0;
41588 if ((AbsMulAmt % 9) == 0) {
41590 MulAmt2 = AbsMulAmt / 9;
41591 } else if ((AbsMulAmt % 5) == 0) {
41593 MulAmt2 = AbsMulAmt / 5;
41594 } else if ((AbsMulAmt % 3) == 0) {
41596 MulAmt2 = AbsMulAmt / 3;
41600 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
41602 (isPowerOf2_64(MulAmt2) ||
41603 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
41605 if (isPowerOf2_64(MulAmt2) &&
41606 !(SignMulAmt >= 0 && N->hasOneUse() &&
41607 N->use_begin()->getOpcode() == ISD::ADD))
41608 // If second multiplifer is pow2, issue it first. We want the multiply by
41609 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
41610 // is an add. Only do this for positive multiply amounts since the
41611 // negate would prevent it from being used as an address mode anyway.
41612 std::swap(MulAmt1, MulAmt2);
41614 if (isPowerOf2_64(MulAmt1))
41615 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41616 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
41618 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41619 DAG.getConstant(MulAmt1, DL, VT));
41621 if (isPowerOf2_64(MulAmt2))
41622 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
41623 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
41625 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
41626 DAG.getConstant(MulAmt2, DL, VT));
41628 // Negate the result.
41629 if (SignMulAmt < 0)
41630 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
41632 } else if (!Subtarget.slowLEA())
41633 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
41636 assert(C->getZExtValue() != 0 &&
41637 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
41638 "Both cases that could cause potential overflows should have "
41639 "already been handled.");
41640 if (isPowerOf2_64(AbsMulAmt - 1)) {
41641 // (mul x, 2^N + 1) => (add (shl x, N), x)
41642 NewMul = DAG.getNode(
41643 ISD::ADD, DL, VT, N->getOperand(0),
41644 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41645 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
41647 // To negate, subtract the number from zero
41648 if (SignMulAmt < 0)
41649 NewMul = DAG.getNode(ISD::SUB, DL, VT,
41650 DAG.getConstant(0, DL, VT), NewMul);
41651 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
41652 // (mul x, 2^N - 1) => (sub (shl x, N), x)
41653 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41654 DAG.getConstant(Log2_64(AbsMulAmt + 1),
41656 // To negate, reverse the operands of the subtract.
41657 if (SignMulAmt < 0)
41658 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
41660 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
41661 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
41662 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
41663 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41664 DAG.getConstant(Log2_64(AbsMulAmt - 2),
41666 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
41667 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
41668 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
41669 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
41670 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41671 DAG.getConstant(Log2_64(AbsMulAmt + 2),
41673 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
41674 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
41681 // Try to form a MULHU or MULHS node by looking for
41682 // (srl (mul ext, ext), 16)
41683 // TODO: This is X86 specific because we want to be able to handle wide types
41684 // before type legalization. But we can only do it if the vector will be
41685 // legalized via widening/splitting. Type legalization can't handle promotion
41686 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
41688 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
41689 const X86Subtarget &Subtarget) {
41690 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
41691 "SRL or SRA node is required here!");
41694 // Only do this with SSE4.1. On earlier targets reduceVMULWidth will expand
41696 if (!Subtarget.hasSSE41())
41699 // The operation feeding into the shift must be a multiply.
41700 SDValue ShiftOperand = N->getOperand(0);
41701 if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
41704 // Input type should be at least vXi32.
41705 EVT VT = N->getValueType(0);
41706 if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
41709 // Need a shift by 16.
41711 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
41715 SDValue LHS = ShiftOperand.getOperand(0);
41716 SDValue RHS = ShiftOperand.getOperand(1);
41718 unsigned ExtOpc = LHS.getOpcode();
41719 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
41720 RHS.getOpcode() != ExtOpc)
41723 // Peek through the extends.
41724 LHS = LHS.getOperand(0);
41725 RHS = RHS.getOperand(0);
41727 // Ensure the input types match.
41728 EVT MulVT = LHS.getValueType();
41729 if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
41732 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
41733 SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
41735 ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
41736 return DAG.getNode(ExtOpc, DL, VT, Mulh);
41739 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
41740 SDValue N0 = N->getOperand(0);
41741 SDValue N1 = N->getOperand(1);
41742 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
41743 EVT VT = N0.getValueType();
41745 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
41746 // since the result of setcc_c is all zero's or all ones.
41747 if (VT.isInteger() && !VT.isVector() &&
41748 N1C && N0.getOpcode() == ISD::AND &&
41749 N0.getOperand(1).getOpcode() == ISD::Constant) {
41750 SDValue N00 = N0.getOperand(0);
41751 APInt Mask = N0.getConstantOperandAPInt(1);
41752 Mask <<= N1C->getAPIntValue();
41753 bool MaskOK = false;
41754 // We can handle cases concerning bit-widening nodes containing setcc_c if
41755 // we carefully interrogate the mask to make sure we are semantics
41757 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
41758 // of the underlying setcc_c operation if the setcc_c was zero extended.
41759 // Consider the following example:
41760 // zext(setcc_c) -> i32 0x0000FFFF
41761 // c1 -> i32 0x0000FFFF
41762 // c2 -> i32 0x00000001
41763 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
41764 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
41765 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
41767 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
41768 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
41770 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
41771 N00.getOpcode() == ISD::ANY_EXTEND) &&
41772 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
41773 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
41775 if (MaskOK && Mask != 0) {
41777 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
41781 // Hardware support for vector shifts is sparse which makes us scalarize the
41782 // vector operations in many cases. Also, on sandybridge ADD is faster than
41784 // (shl V, 1) -> add V,V
41785 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
41786 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
41787 assert(N0.getValueType().isVector() && "Invalid vector shift type");
41788 // We shift all of the values by one. In many cases we do not have
41789 // hardware support for this operation. This is better expressed as an ADD
41791 if (N1SplatC->isOne())
41792 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
41798 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
41799 const X86Subtarget &Subtarget) {
41800 SDValue N0 = N->getOperand(0);
41801 SDValue N1 = N->getOperand(1);
41802 EVT VT = N0.getValueType();
41803 unsigned Size = VT.getSizeInBits();
41805 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
41808 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
41809 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
41810 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
41811 // depending on sign of (SarConst - [56,48,32,24,16])
41813 // sexts in X86 are MOVs. The MOVs have the same code size
41814 // as above SHIFTs (only SHIFT on 1 has lower code size).
41815 // However the MOVs have 2 advantages to a SHIFT:
41816 // 1. MOVs can write to a register that differs from source
41817 // 2. MOVs accept memory operands
41819 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
41820 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
41821 N0.getOperand(1).getOpcode() != ISD::Constant)
41824 SDValue N00 = N0.getOperand(0);
41825 SDValue N01 = N0.getOperand(1);
41826 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
41827 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
41828 EVT CVT = N1.getValueType();
41830 if (SarConst.isNegative())
41833 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
41834 unsigned ShiftSize = SVT.getSizeInBits();
41835 // skipping types without corresponding sext/zext and
41836 // ShlConst that is not one of [56,48,32,24,16]
41837 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
41841 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
41842 SarConst = SarConst - (Size - ShiftSize);
41845 else if (SarConst.isNegative())
41846 return DAG.getNode(ISD::SHL, DL, VT, NN,
41847 DAG.getConstant(-SarConst, DL, CVT));
41849 return DAG.getNode(ISD::SRA, DL, VT, NN,
41850 DAG.getConstant(SarConst, DL, CVT));
41855 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
41856 TargetLowering::DAGCombinerInfo &DCI,
41857 const X86Subtarget &Subtarget) {
41858 SDValue N0 = N->getOperand(0);
41859 SDValue N1 = N->getOperand(1);
41860 EVT VT = N0.getValueType();
41862 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
41865 // Only do this on the last DAG combine as it can interfere with other
41867 if (!DCI.isAfterLegalizeDAG())
41870 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
41871 // TODO: This is a generic DAG combine that became an x86-only combine to
41872 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
41873 // and-not ('andn').
41874 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
41877 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
41878 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
41879 if (!ShiftC || !AndC)
41882 // If we can shrink the constant mask below 8-bits or 32-bits, then this
41883 // transform should reduce code size. It may also enable secondary transforms
41884 // from improved known-bits analysis or instruction selection.
41885 APInt MaskVal = AndC->getAPIntValue();
41887 // If this can be matched by a zero extend, don't optimize.
41888 if (MaskVal.isMask()) {
41889 unsigned TO = MaskVal.countTrailingOnes();
41890 if (TO >= 8 && isPowerOf2_32(TO))
41894 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
41895 unsigned OldMaskSize = MaskVal.getMinSignedBits();
41896 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
41897 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
41898 (OldMaskSize > 32 && NewMaskSize <= 32)) {
41899 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
41901 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
41902 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
41903 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
41908 static SDValue combineVectorPackWithShuffle(SDNode *N, SelectionDAG &DAG) {
41909 unsigned Opcode = N->getOpcode();
41910 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
41911 "Unexpected pack opcode");
41913 EVT VT = N->getValueType(0);
41914 SDValue N0 = N->getOperand(0);
41915 SDValue N1 = N->getOperand(1);
41916 unsigned NumDstElts = VT.getVectorNumElements();
41918 // Attempt to fold PACK(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
41919 // to SHUFFLE(PACK(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
41920 // truncation trees that help us avoid lane crossing shuffles.
41921 // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
41922 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41923 N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41924 N0.getConstantOperandAPInt(1) == 0 &&
41925 N1.getConstantOperandAPInt(1) == (NumDstElts / 2) &&
41926 N0.getOperand(0) == N1.getOperand(0) && VT.is128BitVector() &&
41927 N0.getOperand(0).getValueType().is256BitVector()) {
41928 // TODO - support target/faux shuffles.
41929 SDValue Vec = peekThroughBitcasts(N0.getOperand(0));
41930 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Vec)) {
41931 // To keep the PACK LHS/RHS coherency, we must be able to scale the unary
41932 // shuffle to a vXi64 width - we can probably relax this in the future.
41933 SmallVector<int, 4> ShuffleMask;
41934 if (SVN->getOperand(1).isUndef() &&
41935 scaleShuffleElements(SVN->getMask(), 4, ShuffleMask)) {
41938 std::tie(Lo, Hi) = DAG.SplitVector(SVN->getOperand(0), DL);
41939 Lo = DAG.getBitcast(N0.getValueType(), Lo);
41940 Hi = DAG.getBitcast(N1.getValueType(), Hi);
41941 SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
41942 Res = DAG.getBitcast(MVT::v4i32, Res);
41943 Res = DAG.getVectorShuffle(MVT::v4i32, DL, Res, Res, ShuffleMask);
41944 return DAG.getBitcast(VT, Res);
41949 // Attempt to fold PACK(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(PACK(X,Y)).
41950 // TODO: Relax shuffle scaling to support sub-128-bit subvector shuffles.
41951 if (VT.is256BitVector()) {
41952 if (auto *SVN0 = dyn_cast<ShuffleVectorSDNode>(N0)) {
41953 if (auto *SVN1 = dyn_cast<ShuffleVectorSDNode>(N1)) {
41954 SmallVector<int, 2> ShuffleMask0, ShuffleMask1;
41955 if (scaleShuffleElements(SVN0->getMask(), 2, ShuffleMask0) &&
41956 scaleShuffleElements(SVN1->getMask(), 2, ShuffleMask1)) {
41957 SDValue Op00 = SVN0->getOperand(0);
41958 SDValue Op01 = SVN0->getOperand(1);
41959 SDValue Op10 = SVN1->getOperand(0);
41960 SDValue Op11 = SVN1->getOperand(1);
41961 if ((Op00 == Op11) && (Op01 == Op10)) {
41962 std::swap(Op10, Op11);
41963 ShuffleVectorSDNode::commuteMask(ShuffleMask1);
41965 if ((Op00 == Op10) && (Op01 == Op11)) {
41966 SmallVector<int, 4> ShuffleMask;
41967 ShuffleMask.append(ShuffleMask0.begin(), ShuffleMask0.end());
41968 ShuffleMask.append(ShuffleMask1.begin(), ShuffleMask1.end());
41970 SDValue Res = DAG.getNode(Opcode, DL, VT, Op00, Op01);
41971 Res = DAG.getBitcast(MVT::v4i64, Res);
41972 Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, ShuffleMask);
41973 return DAG.getBitcast(VT, Res);
41983 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
41984 TargetLowering::DAGCombinerInfo &DCI,
41985 const X86Subtarget &Subtarget) {
41986 unsigned Opcode = N->getOpcode();
41987 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
41988 "Unexpected pack opcode");
41990 EVT VT = N->getValueType(0);
41991 SDValue N0 = N->getOperand(0);
41992 SDValue N1 = N->getOperand(1);
41993 unsigned NumDstElts = VT.getVectorNumElements();
41994 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
41995 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
41996 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
41997 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
41998 "Unexpected PACKSS/PACKUS input type");
42000 bool IsSigned = (X86ISD::PACKSS == Opcode);
42002 // Constant Folding.
42003 APInt UndefElts0, UndefElts1;
42004 SmallVector<APInt, 32> EltBits0, EltBits1;
42005 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
42006 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
42007 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
42008 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
42009 unsigned NumLanes = VT.getSizeInBits() / 128;
42010 unsigned NumSrcElts = NumDstElts / 2;
42011 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
42012 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
42014 APInt Undefs(NumDstElts, 0);
42015 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
42016 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
42017 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
42018 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
42019 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
42020 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
42022 if (UndefElts[SrcIdx]) {
42023 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
42027 APInt &Val = EltBits[SrcIdx];
42029 // PACKSS: Truncate signed value with signed saturation.
42030 // Source values less than dst minint are saturated to minint.
42031 // Source values greater than dst maxint are saturated to maxint.
42032 if (Val.isSignedIntN(DstBitsPerElt))
42033 Val = Val.trunc(DstBitsPerElt);
42034 else if (Val.isNegative())
42035 Val = APInt::getSignedMinValue(DstBitsPerElt);
42037 Val = APInt::getSignedMaxValue(DstBitsPerElt);
42039 // PACKUS: Truncate signed value with unsigned saturation.
42040 // Source values less than zero are saturated to zero.
42041 // Source values greater than dst maxuint are saturated to maxuint.
42042 if (Val.isIntN(DstBitsPerElt))
42043 Val = Val.trunc(DstBitsPerElt);
42044 else if (Val.isNegative())
42045 Val = APInt::getNullValue(DstBitsPerElt);
42047 Val = APInt::getAllOnesValue(DstBitsPerElt);
42049 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
42053 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
42056 // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
42057 if (SDValue V = combineVectorPackWithShuffle(N, DAG))
42060 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
42061 // truncate to create a larger truncate.
42062 if (Subtarget.hasAVX512() &&
42063 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
42064 N0.getOperand(0).getValueType() == MVT::v8i32) {
42065 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
42067 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
42068 if (Subtarget.hasVLX())
42069 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
42071 // Widen input to v16i32 so we can truncate that.
42073 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
42074 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
42075 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
42079 // Attempt to combine as shuffle.
42081 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42087 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
42088 TargetLowering::DAGCombinerInfo &DCI,
42089 const X86Subtarget &Subtarget) {
42090 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
42091 X86ISD::VSRL == N->getOpcode()) &&
42092 "Unexpected shift opcode");
42093 EVT VT = N->getValueType(0);
42094 SDValue N0 = N->getOperand(0);
42095 SDValue N1 = N->getOperand(1);
42097 // Shift zero -> zero.
42098 if (ISD::isBuildVectorAllZeros(N0.getNode()))
42099 return DAG.getConstant(0, SDLoc(N), VT);
42101 // Detect constant shift amounts.
42103 SmallVector<APInt, 32> EltBits;
42104 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
42105 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
42106 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
42107 EltBits[0].getZExtValue(), DAG);
42110 APInt KnownUndef, KnownZero;
42111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42112 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
42113 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
42115 return SDValue(N, 0);
42120 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
42121 TargetLowering::DAGCombinerInfo &DCI,
42122 const X86Subtarget &Subtarget) {
42123 unsigned Opcode = N->getOpcode();
42124 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
42125 X86ISD::VSRLI == Opcode) &&
42126 "Unexpected shift opcode");
42127 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
42128 EVT VT = N->getValueType(0);
42129 SDValue N0 = N->getOperand(0);
42130 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
42131 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
42132 "Unexpected value type");
42133 assert(N->getOperand(1).getValueType() == MVT::i8 &&
42134 "Unexpected shift amount type");
42136 // Out of range logical bit shifts are guaranteed to be zero.
42137 // Out of range arithmetic bit shifts splat the sign bit.
42138 unsigned ShiftVal = N->getConstantOperandVal(1);
42139 if (ShiftVal >= NumBitsPerElt) {
42141 return DAG.getConstant(0, SDLoc(N), VT);
42142 ShiftVal = NumBitsPerElt - 1;
42145 // (shift X, 0) -> X
42149 // (shift 0, C) -> 0
42150 if (ISD::isBuildVectorAllZeros(N0.getNode()))
42151 // N0 is all zeros or undef. We guarantee that the bits shifted into the
42152 // result are all zeros, not undef.
42153 return DAG.getConstant(0, SDLoc(N), VT);
42155 // (VSRAI -1, C) -> -1
42156 if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
42157 // N0 is all ones or undef. We guarantee that the bits shifted into the
42158 // result are all ones, not undef.
42159 return DAG.getConstant(-1, SDLoc(N), VT);
42161 // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
42162 if (Opcode == N0.getOpcode()) {
42163 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
42164 unsigned NewShiftVal = ShiftVal + ShiftVal2;
42165 if (NewShiftVal >= NumBitsPerElt) {
42166 // Out of range logical bit shifts are guaranteed to be zero.
42167 // Out of range arithmetic bit shifts splat the sign bit.
42169 return DAG.getConstant(0, SDLoc(N), VT);
42170 NewShiftVal = NumBitsPerElt - 1;
42172 return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
42173 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
42176 // We can decode 'whole byte' logical bit shifts as shuffles.
42177 if (LogicalShift && (ShiftVal % 8) == 0) {
42179 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42183 // Constant Folding.
42185 SmallVector<APInt, 32> EltBits;
42186 if (N->isOnlyUserOf(N0.getNode()) &&
42187 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
42188 assert(EltBits.size() == VT.getVectorNumElements() &&
42189 "Unexpected shift value type");
42190 // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
42191 // created an undef input due to no input bits being demanded, but user
42192 // still expects 0 in other bits.
42193 for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
42194 APInt &Elt = EltBits[i];
42197 else if (X86ISD::VSHLI == Opcode)
42199 else if (X86ISD::VSRAI == Opcode)
42200 Elt.ashrInPlace(ShiftVal);
42202 Elt.lshrInPlace(ShiftVal);
42204 // Reset undef elements since they were zeroed above.
42206 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
42209 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42210 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
42211 APInt::getAllOnesValue(NumBitsPerElt), DCI))
42212 return SDValue(N, 0);
42217 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
42218 TargetLowering::DAGCombinerInfo &DCI,
42219 const X86Subtarget &Subtarget) {
42220 EVT VT = N->getValueType(0);
42221 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
42222 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16) ||
42223 N->getOpcode() == ISD::INSERT_VECTOR_ELT) &&
42224 "Unexpected vector insertion");
42226 if (N->getOpcode() == X86ISD::PINSRB || N->getOpcode() == X86ISD::PINSRW) {
42227 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
42228 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42229 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
42230 APInt::getAllOnesValue(NumBitsPerElt), DCI))
42231 return SDValue(N, 0);
42234 // Attempt to combine insertion patterns to a shuffle.
42235 if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
42237 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42244 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
42245 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
42246 /// OR -> CMPNEQSS.
42247 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
42248 TargetLowering::DAGCombinerInfo &DCI,
42249 const X86Subtarget &Subtarget) {
42252 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
42253 // we're requiring SSE2 for both.
42254 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
42255 SDValue N0 = N->getOperand(0);
42256 SDValue N1 = N->getOperand(1);
42257 SDValue CMP0 = N0.getOperand(1);
42258 SDValue CMP1 = N1.getOperand(1);
42261 // The SETCCs should both refer to the same CMP.
42262 if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
42265 SDValue CMP00 = CMP0->getOperand(0);
42266 SDValue CMP01 = CMP0->getOperand(1);
42267 EVT VT = CMP00.getValueType();
42269 if (VT == MVT::f32 || VT == MVT::f64) {
42270 bool ExpectingFlags = false;
42271 // Check for any users that want flags:
42272 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
42273 !ExpectingFlags && UI != UE; ++UI)
42274 switch (UI->getOpcode()) {
42279 ExpectingFlags = true;
42281 case ISD::CopyToReg:
42282 case ISD::SIGN_EXTEND:
42283 case ISD::ZERO_EXTEND:
42284 case ISD::ANY_EXTEND:
42288 if (!ExpectingFlags) {
42289 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
42290 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
42292 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
42293 X86::CondCode tmp = cc0;
42298 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
42299 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
42300 // FIXME: need symbolic constants for these magic numbers.
42301 // See X86ATTInstPrinter.cpp:printSSECC().
42302 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
42303 if (Subtarget.hasAVX512()) {
42305 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
42306 DAG.getTargetConstant(x86cc, DL, MVT::i8));
42307 // Need to fill with zeros to ensure the bitcast will produce zeroes
42308 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
42309 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
42310 DAG.getConstant(0, DL, MVT::v16i1),
42311 FSetCC, DAG.getIntPtrConstant(0, DL));
42312 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
42313 N->getSimpleValueType(0));
42315 SDValue OnesOrZeroesF =
42316 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
42317 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
42319 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
42320 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
42322 if (is64BitFP && !Subtarget.is64Bit()) {
42323 // On a 32-bit target, we cannot bitcast the 64-bit float to a
42324 // 64-bit integer, since that's not a legal type. Since
42325 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
42326 // bits, but can do this little dance to extract the lowest 32 bits
42327 // and work with those going forward.
42328 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
42330 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
42331 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
42332 Vector32, DAG.getIntPtrConstant(0, DL));
42336 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
42337 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
42338 DAG.getConstant(1, DL, IntVT));
42339 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
42341 return OneBitOfTruth;
42349 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
42350 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
42351 assert(N->getOpcode() == ISD::AND);
42353 MVT VT = N->getSimpleValueType(0);
42354 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
42358 SDValue N0 = N->getOperand(0);
42359 SDValue N1 = N->getOperand(1);
42361 auto GetNot = [&VT, &DAG](SDValue V) {
42362 // Basic X = NOT(Y) detection.
42363 if (SDValue Not = IsNOT(V, DAG))
42365 // Fold BROADCAST(NOT(Y)) -> BROADCAST(Y).
42366 if (V.getOpcode() == X86ISD::VBROADCAST) {
42367 SDValue Src = V.getOperand(0);
42368 EVT SrcVT = Src.getValueType();
42369 if (!SrcVT.isVector())
42371 if (SDValue Not = IsNOT(Src, DAG))
42372 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(V), VT,
42373 DAG.getBitcast(SrcVT, Not));
42378 if (SDValue Not = GetNot(N0)) {
42381 } else if (SDValue Not = GetNot(N1)) {
42387 X = DAG.getBitcast(VT, X);
42388 Y = DAG.getBitcast(VT, Y);
42389 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
42392 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
42393 // logical operations, like in the example below.
42394 // or (and (truncate x, truncate y)),
42395 // (xor (truncate z, build_vector (constants)))
42396 // Given a target type \p VT, we generate
42397 // or (and x, y), (xor z, zext(build_vector (constants)))
42398 // given x, y and z are of type \p VT. We can do so, if operands are either
42399 // truncates from VT types, the second operand is a vector of constants or can
42400 // be recursively promoted.
42401 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
42403 // Limit recursion to avoid excessive compile times.
42404 if (Depth >= SelectionDAG::MaxRecursionDepth)
42407 if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
42408 N->getOpcode() != ISD::OR)
42411 SDValue N0 = N->getOperand(0);
42412 SDValue N1 = N->getOperand(1);
42415 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42416 if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
42419 if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
42422 // The Left side has to be a trunc.
42423 if (N0.getOpcode() != ISD::TRUNCATE)
42426 // The type of the truncated inputs.
42427 if (N0.getOperand(0).getValueType() != VT)
42430 N0 = N0.getOperand(0);
42433 if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
42436 // The right side has to be a 'trunc' or a constant vector.
42437 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
42438 N1.getOperand(0).getValueType() == VT;
42439 if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
42443 N1 = N1.getOperand(0);
42445 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
42448 return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
42451 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
42452 // register. In most cases we actually compare or select YMM-sized registers
42453 // and mixing the two types creates horrible code. This method optimizes
42454 // some of the transition sequences.
42455 // Even with AVX-512 this is still useful for removing casts around logical
42456 // operations on vXi1 mask types.
42457 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
42458 const X86Subtarget &Subtarget) {
42459 EVT VT = N->getValueType(0);
42460 assert(VT.isVector() && "Expected vector type");
42463 assert((N->getOpcode() == ISD::ANY_EXTEND ||
42464 N->getOpcode() == ISD::ZERO_EXTEND ||
42465 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
42467 SDValue Narrow = N->getOperand(0);
42468 EVT NarrowVT = Narrow.getValueType();
42470 // Generate the wide operation.
42471 SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
42474 switch (N->getOpcode()) {
42475 default: llvm_unreachable("Unexpected opcode");
42476 case ISD::ANY_EXTEND:
42478 case ISD::ZERO_EXTEND:
42479 return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
42480 case ISD::SIGN_EXTEND:
42481 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
42482 Op, DAG.getValueType(NarrowVT));
42486 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
42489 default: llvm_unreachable("Unexpected input node for FP logic conversion");
42490 case ISD::AND: FPOpcode = X86ISD::FAND; break;
42491 case ISD::OR: FPOpcode = X86ISD::FOR; break;
42492 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
42497 /// If both input operands of a logic op are being cast from floating point
42498 /// types, try to convert this into a floating point logic node to avoid
42499 /// unnecessary moves from SSE to integer registers.
42500 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
42501 const X86Subtarget &Subtarget) {
42502 EVT VT = N->getValueType(0);
42503 SDValue N0 = N->getOperand(0);
42504 SDValue N1 = N->getOperand(1);
42507 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
42510 SDValue N00 = N0.getOperand(0);
42511 SDValue N10 = N1.getOperand(0);
42512 EVT N00Type = N00.getValueType();
42513 EVT N10Type = N10.getValueType();
42515 // Ensure that both types are the same and are legal scalar fp types.
42516 if (N00Type != N10Type ||
42517 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
42518 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
42521 unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
42522 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
42523 return DAG.getBitcast(VT, FPLogic);
42526 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
42527 // to reduce XMM->GPR traffic.
42528 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
42529 unsigned Opc = N->getOpcode();
42530 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
42531 "Unexpected bit opcode");
42533 SDValue N0 = N->getOperand(0);
42534 SDValue N1 = N->getOperand(1);
42536 // Both operands must be single use MOVMSK.
42537 if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
42538 N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
42541 SDValue Vec0 = N0.getOperand(0);
42542 SDValue Vec1 = N1.getOperand(0);
42543 EVT VecVT0 = Vec0.getValueType();
42544 EVT VecVT1 = Vec1.getValueType();
42546 // Both MOVMSK operands must be from vectors of the same size and same element
42547 // size, but its OK for a fp/int diff.
42548 if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
42549 VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
42554 VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
42556 DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
42557 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
42560 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
42561 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
42562 /// with a shift-right to eliminate loading the vector constant mask value.
42563 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
42564 const X86Subtarget &Subtarget) {
42565 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
42566 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
42567 EVT VT0 = Op0.getValueType();
42568 EVT VT1 = Op1.getValueType();
42570 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
42574 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
42575 !SplatVal.isMask())
42578 // Don't prevent creation of ANDN.
42579 if (isBitwiseNot(Op0))
42582 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
42585 unsigned EltBitWidth = VT0.getScalarSizeInBits();
42586 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
42590 unsigned ShiftVal = SplatVal.countTrailingOnes();
42591 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
42592 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
42593 return DAG.getBitcast(N->getValueType(0), Shift);
42596 // Get the index node from the lowered DAG of a GEP IR instruction with one
42597 // indexing dimension.
42598 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
42599 if (Ld->isIndexed())
42602 SDValue Base = Ld->getBasePtr();
42604 if (Base.getOpcode() != ISD::ADD)
42607 SDValue ShiftedIndex = Base.getOperand(0);
42609 if (ShiftedIndex.getOpcode() != ISD::SHL)
42612 return ShiftedIndex.getOperand(0);
42616 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
42617 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
42618 switch (VT.getSizeInBits()) {
42619 default: return false;
42620 case 64: return Subtarget.is64Bit() ? true : false;
42621 case 32: return true;
42627 // This function recognizes cases where X86 bzhi instruction can replace and
42628 // 'and-load' sequence.
42629 // In case of loading integer value from an array of constants which is defined
42632 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
42634 // then applying a bitwise and on the result with another input.
42635 // It's equivalent to performing bzhi (zero high bits) on the input, with the
42636 // same index of the load.
42637 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
42638 const X86Subtarget &Subtarget) {
42639 MVT VT = Node->getSimpleValueType(0);
42642 // Check if subtarget has BZHI instruction for the node's type
42643 if (!hasBZHI(Subtarget, VT))
42646 // Try matching the pattern for both operands.
42647 for (unsigned i = 0; i < 2; i++) {
42648 SDValue N = Node->getOperand(i);
42649 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
42651 // continue if the operand is not a load instruction
42655 const Value *MemOp = Ld->getMemOperand()->getValue();
42660 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
42661 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
42662 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
42664 Constant *Init = GV->getInitializer();
42665 Type *Ty = Init->getType();
42666 if (!isa<ConstantDataArray>(Init) ||
42667 !Ty->getArrayElementType()->isIntegerTy() ||
42668 Ty->getArrayElementType()->getScalarSizeInBits() !=
42669 VT.getSizeInBits() ||
42670 Ty->getArrayNumElements() >
42671 Ty->getArrayElementType()->getScalarSizeInBits())
42674 // Check if the array's constant elements are suitable to our case.
42675 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
42676 bool ConstantsMatch = true;
42677 for (uint64_t j = 0; j < ArrayElementCount; j++) {
42678 ConstantInt *Elem =
42679 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
42680 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
42681 ConstantsMatch = false;
42685 if (!ConstantsMatch)
42688 // Do the transformation (For 32-bit type):
42689 // -> (and (load arr[idx]), inp)
42690 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
42691 // that will be replaced with one bzhi instruction.
42692 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
42693 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
42695 // Get the Node which indexes into the array.
42696 SDValue Index = getIndexFromUnindexedLoad(Ld);
42699 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
42701 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
42702 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
42704 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
42705 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
42707 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
42715 // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
42716 // Turn it into series of XORs and a setnp.
42717 static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
42718 const X86Subtarget &Subtarget) {
42719 EVT VT = N->getValueType(0);
42721 // We only support 64-bit and 32-bit. 64-bit requires special handling
42722 // unless the 64-bit popcnt instruction is legal.
42723 if (VT != MVT::i32 && VT != MVT::i64)
42726 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42727 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
42730 SDValue N0 = N->getOperand(0);
42731 SDValue N1 = N->getOperand(1);
42733 // LHS needs to be a single use CTPOP.
42734 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
42737 // RHS needs to be 1.
42738 if (!isOneConstant(N1))
42742 SDValue X = N0.getOperand(0);
42744 // If this is 64-bit, its always best to xor the two 32-bit pieces together
42745 // even if we have popcnt.
42746 if (VT == MVT::i64) {
42747 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
42748 DAG.getNode(ISD::SRL, DL, VT, X,
42749 DAG.getConstant(32, DL, MVT::i8)));
42750 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
42751 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
42752 // Generate a 32-bit parity idiom. This will bring us back here if we need
42753 // to expand it too.
42754 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
42755 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
42756 DAG.getConstant(1, DL, MVT::i32));
42757 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
42759 assert(VT == MVT::i32 && "Unexpected VT!");
42761 // Xor the high and low 16-bits together using a 32-bit operation.
42762 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
42763 DAG.getConstant(16, DL, MVT::i8));
42764 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
42766 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
42767 // This should allow an h-reg to be used to save a shift.
42768 // FIXME: We only get an h-reg in 32-bit mode.
42769 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
42770 DAG.getNode(ISD::SRL, DL, VT, X,
42771 DAG.getConstant(8, DL, MVT::i8)));
42772 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
42773 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
42774 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
42776 // Copy the inverse of the parity flag into a register with setcc.
42777 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
42778 // Zero extend to original type.
42779 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
42783 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
42784 // Where C is a mask containing the same number of bits as the setcc and
42785 // where the setcc will freely 0 upper bits of k-register. We can replace the
42786 // undef in the concat with 0s and remove the AND. This mainly helps with
42787 // v2i1/v4i1 setcc being casted to scalar.
42788 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
42789 const X86Subtarget &Subtarget) {
42790 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
42792 EVT VT = N->getValueType(0);
42794 // Make sure this is an AND with constant. We will check the value of the
42796 if (!isa<ConstantSDNode>(N->getOperand(1)))
42799 // This is implied by the ConstantSDNode.
42800 assert(!VT.isVector() && "Expected scalar VT!");
42802 if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
42803 !N->getOperand(0).hasOneUse() ||
42804 !N->getOperand(0).getOperand(0).hasOneUse())
42807 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42808 SDValue Src = N->getOperand(0).getOperand(0);
42809 EVT SrcVT = Src.getValueType();
42810 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
42811 !TLI.isTypeLegal(SrcVT))
42814 if (Src.getOpcode() != ISD::CONCAT_VECTORS)
42817 // We only care about the first subvector of the concat, we expect the
42818 // other subvectors to be ignored due to the AND if we make the change.
42819 SDValue SubVec = Src.getOperand(0);
42820 EVT SubVecVT = SubVec.getValueType();
42822 // First subvector should be a setcc with a legal result type. The RHS of the
42823 // AND should be a mask with this many bits.
42824 if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
42825 !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
42828 EVT SetccVT = SubVec.getOperand(0).getValueType();
42829 if (!TLI.isTypeLegal(SetccVT) ||
42830 !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
42833 if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
42836 // We passed all the checks. Rebuild the concat_vectors with zeroes
42837 // and cast it back to VT.
42839 SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
42840 DAG.getConstant(0, dl, SubVecVT));
42842 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
42844 return DAG.getBitcast(VT, Concat);
42847 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
42848 TargetLowering::DAGCombinerInfo &DCI,
42849 const X86Subtarget &Subtarget) {
42850 EVT VT = N->getValueType(0);
42852 // If this is SSE1 only convert to FAND to avoid scalarization.
42853 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
42854 return DAG.getBitcast(
42855 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
42856 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
42857 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
42860 // Use a 32-bit and+zext if upper bits known zero.
42861 if (VT == MVT::i64 && Subtarget.is64Bit() &&
42862 !isa<ConstantSDNode>(N->getOperand(1))) {
42863 APInt HiMask = APInt::getHighBitsSet(64, 32);
42864 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
42865 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
42867 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
42868 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
42869 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
42870 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
42874 // This must be done before legalization has expanded the ctpop.
42875 if (SDValue V = combineParity(N, DAG, Subtarget))
42878 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
42879 // TODO: Support multiple SrcOps.
42880 if (VT == MVT::i1) {
42881 SmallVector<SDValue, 2> SrcOps;
42882 SmallVector<APInt, 2> SrcPartials;
42883 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
42884 SrcOps.size() == 1) {
42886 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42887 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
42888 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
42889 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
42890 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
42891 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
42893 assert(SrcPartials[0].getBitWidth() == NumElts &&
42894 "Unexpected partial reduction mask");
42895 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
42896 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
42897 return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
42902 if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
42905 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
42908 if (DCI.isBeforeLegalizeOps())
42911 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
42914 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
42917 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
42920 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
42923 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
42926 // Attempt to recursively combine a bitmask AND with shuffles.
42927 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
42929 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42933 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
42934 if ((VT.getScalarSizeInBits() % 8) == 0 &&
42935 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
42936 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
42937 SDValue BitMask = N->getOperand(1);
42938 SDValue SrcVec = N->getOperand(0).getOperand(0);
42939 EVT SrcVecVT = SrcVec.getValueType();
42941 // Check that the constant bitmask masks whole bytes.
42943 SmallVector<APInt, 64> EltBits;
42944 if (VT == SrcVecVT.getScalarType() &&
42945 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
42946 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
42947 llvm::all_of(EltBits, [](APInt M) {
42948 return M.isNullValue() || M.isAllOnesValue();
42950 unsigned NumElts = SrcVecVT.getVectorNumElements();
42951 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
42952 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
42954 // Create a root shuffle mask from the byte mask and the extracted index.
42955 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
42956 for (unsigned i = 0; i != Scale; ++i) {
42959 int VecIdx = Scale * Idx + i;
42960 ShuffleMask[VecIdx] =
42961 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
42964 if (SDValue Shuffle = combineX86ShufflesRecursively(
42965 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
42966 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
42967 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
42968 N->getOperand(0).getOperand(1));
42975 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
42976 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
42977 const X86Subtarget &Subtarget) {
42978 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
42980 MVT VT = N->getSimpleValueType(0);
42981 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
42984 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
42985 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
42986 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
42989 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
42990 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
42991 bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
42992 Subtarget.hasVLX();
42993 if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
42994 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
42997 // Attempt to extract constant byte masks.
42998 APInt UndefElts0, UndefElts1;
42999 SmallVector<APInt, 32> EltBits0, EltBits1;
43000 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
43003 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
43007 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
43008 // TODO - add UNDEF elts support.
43009 if (UndefElts0[i] || UndefElts1[i])
43011 if (EltBits0[i] != ~EltBits1[i])
43017 if (UseVPTERNLOG) {
43018 // Emit a VPTERNLOG node directly.
43019 SDValue A = DAG.getBitcast(VT, N0.getOperand(1));
43020 SDValue B = DAG.getBitcast(VT, N0.getOperand(0));
43021 SDValue C = DAG.getBitcast(VT, N1.getOperand(0));
43022 SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
43023 return DAG.getNode(X86ISD::VPTERNLOG, DL, VT, A, B, C, Imm);
43026 SDValue X = N->getOperand(0);
43028 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
43029 DAG.getBitcast(VT, N1.getOperand(0)));
43030 return DAG.getNode(ISD::OR, DL, VT, X, Y);
43033 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
43034 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
43035 if (N->getOpcode() != ISD::OR)
43038 SDValue N0 = N->getOperand(0);
43039 SDValue N1 = N->getOperand(1);
43041 // Canonicalize AND to LHS.
43042 if (N1.getOpcode() == ISD::AND)
43045 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
43046 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
43049 Mask = N1.getOperand(0);
43050 X = N1.getOperand(1);
43052 // Check to see if the mask appeared in both the AND and ANDNP.
43053 if (N0.getOperand(0) == Mask)
43054 Y = N0.getOperand(1);
43055 else if (N0.getOperand(1) == Mask)
43056 Y = N0.getOperand(0);
43060 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
43061 // ANDNP combine allows other combines to happen that prevent matching.
43066 // (or (and (m, y), (pandn m, x)))
43068 // (vselect m, x, y)
43069 // As a special case, try to fold:
43070 // (or (and (m, (sub 0, x)), (pandn m, x)))
43072 // (sub (xor X, M), M)
43073 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
43074 const X86Subtarget &Subtarget) {
43075 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
43077 EVT VT = N->getValueType(0);
43078 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
43079 (VT.is256BitVector() && Subtarget.hasInt256())))
43082 SDValue X, Y, Mask;
43083 if (!matchLogicBlend(N, X, Y, Mask))
43086 // Validate that X, Y, and Mask are bitcasts, and see through them.
43087 Mask = peekThroughBitcasts(Mask);
43088 X = peekThroughBitcasts(X);
43089 Y = peekThroughBitcasts(Y);
43091 EVT MaskVT = Mask.getValueType();
43092 unsigned EltBits = MaskVT.getScalarSizeInBits();
43094 // TODO: Attempt to handle floating point cases as well?
43095 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
43100 // Attempt to combine to conditional negate: (sub (xor X, M), M)
43101 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
43105 // PBLENDVB is only available on SSE 4.1.
43106 if (!Subtarget.hasSSE41())
43109 // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
43110 if (Subtarget.hasVLX())
43113 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
43115 X = DAG.getBitcast(BlendVT, X);
43116 Y = DAG.getBitcast(BlendVT, Y);
43117 Mask = DAG.getBitcast(BlendVT, Mask);
43118 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
43119 return DAG.getBitcast(VT, Mask);
43122 // Helper function for combineOrCmpEqZeroToCtlzSrl
43126 // srl(ctlz x), log2(bitsize(x))
43127 // Input pattern is checked by caller.
43128 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
43129 SelectionDAG &DAG) {
43130 SDValue Cmp = Op.getOperand(1);
43131 EVT VT = Cmp.getOperand(0).getValueType();
43132 unsigned Log2b = Log2_32(VT.getSizeInBits());
43134 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
43135 // The result of the shift is true or false, and on X86, the 32-bit
43136 // encoding of shr and lzcnt is more desirable.
43137 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
43138 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
43139 DAG.getConstant(Log2b, dl, MVT::i8));
43140 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
43143 // Try to transform:
43144 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
43146 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
43147 // Will also attempt to match more generic cases, eg:
43148 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
43149 // Only applies if the target supports the FastLZCNT feature.
43150 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
43151 TargetLowering::DAGCombinerInfo &DCI,
43152 const X86Subtarget &Subtarget) {
43153 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
43156 auto isORCandidate = [](SDValue N) {
43157 return (N->getOpcode() == ISD::OR && N->hasOneUse());
43160 // Check the zero extend is extending to 32-bit or more. The code generated by
43161 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
43162 // instructions to clear the upper bits.
43163 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
43164 !isORCandidate(N->getOperand(0)))
43167 // Check the node matches: setcc(eq, cmp 0)
43168 auto isSetCCCandidate = [](SDValue N) {
43169 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
43170 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
43171 N->getOperand(1).getOpcode() == X86ISD::CMP &&
43172 isNullConstant(N->getOperand(1).getOperand(1)) &&
43173 N->getOperand(1).getValueType().bitsGE(MVT::i32);
43176 SDNode *OR = N->getOperand(0).getNode();
43177 SDValue LHS = OR->getOperand(0);
43178 SDValue RHS = OR->getOperand(1);
43180 // Save nodes matching or(or, setcc(eq, cmp 0)).
43181 SmallVector<SDNode *, 2> ORNodes;
43182 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
43183 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
43184 ORNodes.push_back(OR);
43185 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
43186 LHS = OR->getOperand(0);
43187 RHS = OR->getOperand(1);
43190 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
43191 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
43192 !isORCandidate(SDValue(OR, 0)))
43195 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
43197 // or(srl(ctlz),srl(ctlz)).
43198 // The dag combiner can then fold it into:
43199 // srl(or(ctlz, ctlz)).
43200 EVT VT = OR->getValueType(0);
43201 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
43202 SDValue Ret, NewRHS;
43203 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
43204 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
43209 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
43210 while (ORNodes.size() > 0) {
43211 OR = ORNodes.pop_back_val();
43212 LHS = OR->getOperand(0);
43213 RHS = OR->getOperand(1);
43214 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
43215 if (RHS->getOpcode() == ISD::OR)
43216 std::swap(LHS, RHS);
43217 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
43220 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
43224 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
43229 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
43230 TargetLowering::DAGCombinerInfo &DCI,
43231 const X86Subtarget &Subtarget) {
43232 SDValue N0 = N->getOperand(0);
43233 SDValue N1 = N->getOperand(1);
43234 EVT VT = N->getValueType(0);
43236 // If this is SSE1 only convert to FOR to avoid scalarization.
43237 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
43238 return DAG.getBitcast(MVT::v4i32,
43239 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
43240 DAG.getBitcast(MVT::v4f32, N0),
43241 DAG.getBitcast(MVT::v4f32, N1)));
43244 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
43245 // TODO: Support multiple SrcOps.
43246 if (VT == MVT::i1) {
43247 SmallVector<SDValue, 2> SrcOps;
43248 SmallVector<APInt, 2> SrcPartials;
43249 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
43250 SrcOps.size() == 1) {
43252 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43253 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
43254 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
43255 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
43256 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
43257 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
43259 assert(SrcPartials[0].getBitWidth() == NumElts &&
43260 "Unexpected partial reduction mask");
43261 SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
43262 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
43263 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
43264 return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
43269 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
43272 if (DCI.isBeforeLegalizeOps())
43275 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
43278 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
43281 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
43284 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
43287 // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
43288 // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
43289 // iff the upper elements of the non-shifted arg are zero.
43290 // KUNPCK require 16+ bool vector elements.
43291 if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
43292 unsigned NumElts = VT.getVectorNumElements();
43293 unsigned HalfElts = NumElts / 2;
43294 APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
43295 if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
43296 N1.getConstantOperandAPInt(1) == HalfElts &&
43297 DAG.MaskedValueIsZero(N0, APInt(1, 1), UpperElts)) {
43299 return DAG.getNode(
43300 ISD::CONCAT_VECTORS, dl, VT,
43301 extractSubVector(N0, 0, DAG, dl, HalfElts),
43302 extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
43304 if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
43305 N0.getConstantOperandAPInt(1) == HalfElts &&
43306 DAG.MaskedValueIsZero(N1, APInt(1, 1), UpperElts)) {
43308 return DAG.getNode(
43309 ISD::CONCAT_VECTORS, dl, VT,
43310 extractSubVector(N1, 0, DAG, dl, HalfElts),
43311 extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
43315 // Attempt to recursively combine an OR of shuffles.
43316 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
43318 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43325 /// Try to turn tests against the signbit in the form of:
43326 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
43329 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
43330 // This is only worth doing if the output type is i8 or i1.
43331 EVT ResultType = N->getValueType(0);
43332 if (ResultType != MVT::i8 && ResultType != MVT::i1)
43335 SDValue N0 = N->getOperand(0);
43336 SDValue N1 = N->getOperand(1);
43338 // We should be performing an xor against a truncated shift.
43339 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
43342 // Make sure we are performing an xor against one.
43343 if (!isOneConstant(N1))
43346 // SetCC on x86 zero extends so only act on this if it's a logical shift.
43347 SDValue Shift = N0.getOperand(0);
43348 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
43351 // Make sure we are truncating from one of i16, i32 or i64.
43352 EVT ShiftTy = Shift.getValueType();
43353 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
43356 // Make sure the shift amount extracts the sign bit.
43357 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
43358 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
43361 // Create a greater-than comparison against -1.
43362 // N.B. Using SETGE against 0 works but we want a canonical looking
43363 // comparison, using SETGT matches up with what TranslateX86CC.
43365 SDValue ShiftOp = Shift.getOperand(0);
43366 EVT ShiftOpTy = ShiftOp.getValueType();
43367 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43368 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
43369 *DAG.getContext(), ResultType);
43370 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
43371 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
43372 if (SetCCResultType != ResultType)
43373 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
43377 /// Turn vector tests of the signbit in the form of:
43378 /// xor (sra X, elt_size(X)-1), -1
43382 /// This should be called before type legalization because the pattern may not
43383 /// persist after that.
43384 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
43385 const X86Subtarget &Subtarget) {
43386 EVT VT = N->getValueType(0);
43387 if (!VT.isSimple())
43390 switch (VT.getSimpleVT().SimpleTy) {
43391 default: return SDValue();
43395 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
43399 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
43402 // There must be a shift right algebraic before the xor, and the xor must be a
43403 // 'not' operation.
43404 SDValue Shift = N->getOperand(0);
43405 SDValue Ones = N->getOperand(1);
43406 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
43407 !ISD::isBuildVectorAllOnes(Ones.getNode()))
43410 // The shift should be smearing the sign bit across each vector element.
43412 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
43414 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
43417 // Create a greater-than comparison against -1. We don't use the more obvious
43418 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
43419 return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
43422 /// Detect patterns of truncation with unsigned saturation:
43424 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
43425 /// Return the source value x to be truncated or SDValue() if the pattern was
43428 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
43429 /// where C1 >= 0 and C2 is unsigned max of destination type.
43431 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
43432 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
43434 /// These two patterns are equivalent to:
43435 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
43436 /// So return the smax(x, C1) value to be truncated or SDValue() if the
43437 /// pattern was not matched.
43438 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
43440 EVT InVT = In.getValueType();
43442 // Saturation with truncation. We truncate from InVT to VT.
43443 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
43444 "Unexpected types for truncate operation");
43446 // Match min/max and return limit value as a parameter.
43447 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
43448 if (V.getOpcode() == Opcode &&
43449 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
43450 return V.getOperand(0);
43455 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
43456 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
43457 // the element size of the destination type.
43458 if (C2.isMask(VT.getScalarSizeInBits()))
43461 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
43462 if (MatchMinMax(SMin, ISD::SMAX, C1))
43463 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
43466 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
43467 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
43468 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
43470 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
43476 /// Detect patterns of truncation with signed saturation:
43477 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
43478 /// signed_max_of_dest_type)) to dest_type)
43480 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
43481 /// signed_min_of_dest_type)) to dest_type).
43482 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
43483 /// Return the source value to be truncated or SDValue() if the pattern was not
43485 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
43486 unsigned NumDstBits = VT.getScalarSizeInBits();
43487 unsigned NumSrcBits = In.getScalarValueSizeInBits();
43488 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
43490 auto MatchMinMax = [](SDValue V, unsigned Opcode,
43491 const APInt &Limit) -> SDValue {
43493 if (V.getOpcode() == Opcode &&
43494 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
43495 return V.getOperand(0);
43499 APInt SignedMax, SignedMin;
43501 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
43502 SignedMin = APInt(NumSrcBits, 0);
43504 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
43505 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
43508 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
43509 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
43512 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
43513 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
43519 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
43521 const X86Subtarget &Subtarget) {
43522 if (!Subtarget.hasSSE2() || !VT.isVector())
43525 EVT SVT = VT.getVectorElementType();
43526 EVT InVT = In.getValueType();
43527 EVT InSVT = InVT.getVectorElementType();
43529 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
43530 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
43531 // and concatenate at the same time. Then we can use a final vpmovuswb to
43533 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
43534 InVT == MVT::v16i32 && VT == MVT::v16i8) {
43535 if (auto USatVal = detectSSatPattern(In, VT, true)) {
43536 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
43537 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
43538 DL, DAG, Subtarget);
43539 assert(Mid && "Failed to pack!");
43540 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
43544 // vXi32 truncate instructions are available with AVX512F.
43545 // vXi16 truncate instructions are only available with AVX512BW.
43546 // For 256-bit or smaller vectors, we require VLX.
43547 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
43548 // If the result type is 256-bits or larger and we have disable 512-bit
43549 // registers, we should go ahead and use the pack instructions if possible.
43550 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
43551 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
43552 (InVT.getSizeInBits() > 128) &&
43553 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
43554 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
43556 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
43557 VT.getSizeInBits() >= 64 &&
43558 (SVT == MVT::i8 || SVT == MVT::i16) &&
43559 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
43560 if (auto USatVal = detectSSatPattern(In, VT, true)) {
43561 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
43562 // Only do this when the result is at least 64 bits or we'll leaving
43563 // dangling PACKSSDW nodes.
43564 if (SVT == MVT::i8 && InSVT == MVT::i32) {
43565 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43566 VT.getVectorNumElements());
43567 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
43569 assert(Mid && "Failed to pack!");
43570 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
43572 assert(V && "Failed to pack!");
43574 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
43575 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
43578 if (auto SSatVal = detectSSatPattern(In, VT))
43579 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
43583 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43584 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
43585 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
43586 unsigned TruncOpc = 0;
43588 if (auto SSatVal = detectSSatPattern(In, VT)) {
43590 TruncOpc = X86ISD::VTRUNCS;
43591 } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
43593 TruncOpc = X86ISD::VTRUNCUS;
43596 unsigned ResElts = VT.getVectorNumElements();
43597 // If the input type is less than 512 bits and we don't have VLX, we need
43598 // to widen to 512 bits.
43599 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
43600 unsigned NumConcats = 512 / InVT.getSizeInBits();
43601 ResElts *= NumConcats;
43602 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
43603 ConcatOps[0] = SatVal;
43604 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
43605 NumConcats * InVT.getVectorNumElements());
43606 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
43608 // Widen the result if its narrower than 128 bits.
43609 if (ResElts * SVT.getSizeInBits() < 128)
43610 ResElts = 128 / SVT.getSizeInBits();
43611 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
43612 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
43613 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
43614 DAG.getIntPtrConstant(0, DL));
43621 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
43622 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
43623 /// X86ISD::AVG instruction.
43624 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
43625 const X86Subtarget &Subtarget,
43627 if (!VT.isVector())
43629 EVT InVT = In.getValueType();
43630 unsigned NumElems = VT.getVectorNumElements();
43632 EVT ScalarVT = VT.getVectorElementType();
43633 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
43634 NumElems >= 2 && isPowerOf2_32(NumElems)))
43637 // InScalarVT is the intermediate type in AVG pattern and it should be greater
43638 // than the original input type (i8/i16).
43639 EVT InScalarVT = InVT.getVectorElementType();
43640 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
43643 if (!Subtarget.hasSSE2())
43646 // Detect the following pattern:
43648 // %1 = zext <N x i8> %a to <N x i32>
43649 // %2 = zext <N x i8> %b to <N x i32>
43650 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
43651 // %4 = add nuw nsw <N x i32> %3, %2
43652 // %5 = lshr <N x i32> %N, <i32 1 x N>
43653 // %6 = trunc <N x i32> %5 to <N x i8>
43655 // In AVX512, the last instruction can also be a trunc store.
43656 if (In.getOpcode() != ISD::SRL)
43659 // A lambda checking the given SDValue is a constant vector and each element
43660 // is in the range [Min, Max].
43661 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
43662 return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
43663 return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
43667 // Check if each element of the vector is right-shifted by one.
43668 auto LHS = In.getOperand(0);
43669 auto RHS = In.getOperand(1);
43670 if (!IsConstVectorInRange(RHS, 1, 1))
43672 if (LHS.getOpcode() != ISD::ADD)
43675 // Detect a pattern of a + b + 1 where the order doesn't matter.
43676 SDValue Operands[3];
43677 Operands[0] = LHS.getOperand(0);
43678 Operands[1] = LHS.getOperand(1);
43680 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43681 ArrayRef<SDValue> Ops) {
43682 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
43685 // Take care of the case when one of the operands is a constant vector whose
43686 // element is in the range [1, 256].
43687 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
43688 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
43689 Operands[0].getOperand(0).getValueType() == VT) {
43690 // The pattern is detected. Subtract one from the constant vector, then
43691 // demote it and emit X86ISD::AVG instruction.
43692 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
43693 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
43694 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
43695 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
43696 { Operands[0].getOperand(0), Operands[1] },
43700 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
43701 // Match the or case only if its 'add-like' - can be replaced by an add.
43702 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
43703 if (ISD::ADD == V.getOpcode()) {
43704 Op0 = V.getOperand(0);
43705 Op1 = V.getOperand(1);
43708 if (ISD::ZERO_EXTEND != V.getOpcode())
43710 V = V.getOperand(0);
43711 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
43712 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
43714 Op0 = V.getOperand(0);
43715 Op1 = V.getOperand(1);
43720 if (FindAddLike(Operands[0], Op0, Op1))
43721 std::swap(Operands[0], Operands[1]);
43722 else if (!FindAddLike(Operands[1], Op0, Op1))
43727 // Now we have three operands of two additions. Check that one of them is a
43728 // constant vector with ones, and the other two can be promoted from i8/i16.
43729 for (int i = 0; i < 3; ++i) {
43730 if (!IsConstVectorInRange(Operands[i], 1, 1))
43732 std::swap(Operands[i], Operands[2]);
43734 // Check if Operands[0] and Operands[1] are results of type promotion.
43735 for (int j = 0; j < 2; ++j)
43736 if (Operands[j].getValueType() != VT) {
43737 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
43738 Operands[j].getOperand(0).getValueType() != VT)
43740 Operands[j] = Operands[j].getOperand(0);
43743 // The pattern is detected, emit X86ISD::AVG instruction(s).
43744 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
43751 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
43752 TargetLowering::DAGCombinerInfo &DCI,
43753 const X86Subtarget &Subtarget) {
43754 LoadSDNode *Ld = cast<LoadSDNode>(N);
43755 EVT RegVT = Ld->getValueType(0);
43756 EVT MemVT = Ld->getMemoryVT();
43758 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43760 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
43761 // into two 16-byte operations. Also split non-temporal aligned loads on
43762 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
43763 ISD::LoadExtType Ext = Ld->getExtensionType();
43765 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
43766 Ext == ISD::NON_EXTLOAD &&
43767 ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
43768 Ld->getAlignment() >= 16) ||
43769 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
43770 *Ld->getMemOperand(), &Fast) &&
43772 unsigned NumElems = RegVT.getVectorNumElements();
43776 unsigned HalfOffset = 16;
43777 SDValue Ptr1 = Ld->getBasePtr();
43778 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfOffset, dl);
43779 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
43782 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
43783 Ld->getOriginalAlign(),
43784 Ld->getMemOperand()->getFlags());
43785 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
43786 Ld->getPointerInfo().getWithOffset(HalfOffset),
43787 Ld->getOriginalAlign(),
43788 Ld->getMemOperand()->getFlags());
43789 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
43790 Load1.getValue(1), Load2.getValue(1));
43792 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
43793 return DCI.CombineTo(N, NewVec, TF, true);
43796 // Bool vector load - attempt to cast to an integer, as we have good
43797 // (vXiY *ext(vXi1 bitcast(iX))) handling.
43798 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
43799 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
43800 unsigned NumElts = RegVT.getVectorNumElements();
43801 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
43802 if (TLI.isTypeLegal(IntVT)) {
43803 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
43804 Ld->getPointerInfo(),
43805 Ld->getOriginalAlign(),
43806 Ld->getMemOperand()->getFlags());
43807 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
43808 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
43812 // Cast ptr32 and ptr64 pointers to the default address space before a load.
43813 unsigned AddrSpace = Ld->getAddressSpace();
43814 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
43815 AddrSpace == X86AS::PTR32_UPTR) {
43816 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
43817 if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
43819 DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
43820 return DAG.getLoad(RegVT, dl, Ld->getChain(), Cast, Ld->getPointerInfo(),
43821 Ld->getOriginalAlign(),
43822 Ld->getMemOperand()->getFlags());
43829 /// If V is a build vector of boolean constants and exactly one of those
43830 /// constants is true, return the operand index of that true element.
43831 /// Otherwise, return -1.
43832 static int getOneTrueElt(SDValue V) {
43833 // This needs to be a build vector of booleans.
43834 // TODO: Checking for the i1 type matches the IR definition for the mask,
43835 // but the mask check could be loosened to i8 or other types. That might
43836 // also require checking more than 'allOnesValue'; eg, the x86 HW
43837 // instructions only require that the MSB is set for each mask element.
43838 // The ISD::MSTORE comments/definition do not specify how the mask operand
43840 auto *BV = dyn_cast<BuildVectorSDNode>(V);
43841 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
43844 int TrueIndex = -1;
43845 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
43846 for (unsigned i = 0; i < NumElts; ++i) {
43847 const SDValue &Op = BV->getOperand(i);
43850 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
43853 if (ConstNode->getAPIntValue().isAllOnesValue()) {
43854 // If we already found a one, this is too many.
43855 if (TrueIndex >= 0)
43863 /// Given a masked memory load/store operation, return true if it has one mask
43864 /// bit set. If it has one mask bit set, then also return the memory address of
43865 /// the scalar element to load/store, the vector index to insert/extract that
43866 /// scalar element, and the alignment for the scalar memory access.
43867 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
43868 SelectionDAG &DAG, SDValue &Addr,
43869 SDValue &Index, unsigned &Alignment) {
43870 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
43871 if (TrueMaskElt < 0)
43874 // Get the address of the one scalar element that is specified by the mask
43875 // using the appropriate offset from the base pointer.
43876 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
43877 Addr = MaskedOp->getBasePtr();
43878 if (TrueMaskElt != 0) {
43879 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
43880 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
43883 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
43884 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
43888 /// If exactly one element of the mask is set for a non-extending masked load,
43889 /// it is a scalar load and vector insert.
43890 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
43891 /// mask have already been optimized in IR, so we don't bother with those here.
43893 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
43894 TargetLowering::DAGCombinerInfo &DCI) {
43895 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
43896 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
43897 // However, some target hooks may need to be added to know when the transform
43898 // is profitable. Endianness would also have to be considered.
43900 SDValue Addr, VecIndex;
43901 unsigned Alignment;
43902 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
43905 // Load the one scalar element that is specified by the mask using the
43906 // appropriate offset from the base pointer.
43908 EVT VT = ML->getValueType(0);
43909 EVT EltVT = VT.getVectorElementType();
43911 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
43912 Alignment, ML->getMemOperand()->getFlags());
43914 // Insert the loaded element into the appropriate place in the vector.
43915 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
43916 ML->getPassThru(), Load, VecIndex);
43917 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
43921 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
43922 TargetLowering::DAGCombinerInfo &DCI) {
43923 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
43924 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
43928 EVT VT = ML->getValueType(0);
43930 // If we are loading the first and last elements of a vector, it is safe and
43931 // always faster to load the whole vector. Replace the masked load with a
43932 // vector load and select.
43933 unsigned NumElts = VT.getVectorNumElements();
43934 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
43935 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
43936 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
43937 if (LoadFirstElt && LoadLastElt) {
43938 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
43939 ML->getMemOperand());
43940 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
43941 ML->getPassThru());
43942 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
43945 // Convert a masked load with a constant mask into a masked load and a select.
43946 // This allows the select operation to use a faster kind of select instruction
43947 // (for example, vblendvps -> vblendps).
43949 // Don't try this if the pass-through operand is already undefined. That would
43950 // cause an infinite loop because that's what we're about to create.
43951 if (ML->getPassThru().isUndef())
43954 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
43957 // The new masked load has an undef pass-through operand. The select uses the
43958 // original pass-through operand.
43959 SDValue NewML = DAG.getMaskedLoad(
43960 VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
43961 DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
43962 ML->getAddressingMode(), ML->getExtensionType());
43963 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
43964 ML->getPassThru());
43966 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
43969 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
43970 TargetLowering::DAGCombinerInfo &DCI,
43971 const X86Subtarget &Subtarget) {
43972 auto *Mld = cast<MaskedLoadSDNode>(N);
43974 // TODO: Expanding load with constant mask may be optimized as well.
43975 if (Mld->isExpandingLoad())
43978 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
43979 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
43982 // TODO: Do some AVX512 subsets benefit from this transform?
43983 if (!Subtarget.hasAVX512())
43984 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
43988 // If the mask value has been legalized to a non-boolean vector, try to
43989 // simplify ops leading up to it. We only demand the MSB of each lane.
43990 SDValue Mask = Mld->getMask();
43991 if (Mask.getScalarValueSizeInBits() != 1) {
43992 EVT VT = Mld->getValueType(0);
43993 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43994 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
43995 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
43996 if (N->getOpcode() != ISD::DELETED_NODE)
43997 DCI.AddToWorklist(N);
43998 return SDValue(N, 0);
44000 if (SDValue NewMask =
44001 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
44002 return DAG.getMaskedLoad(
44003 VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
44004 NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
44005 Mld->getAddressingMode(), Mld->getExtensionType());
44011 /// If exactly one element of the mask is set for a non-truncating masked store,
44012 /// it is a vector extract and scalar store.
44013 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
44014 /// mask have already been optimized in IR, so we don't bother with those here.
44015 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
44016 SelectionDAG &DAG) {
44017 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
44018 // However, some target hooks may need to be added to know when the transform
44019 // is profitable. Endianness would also have to be considered.
44021 SDValue Addr, VecIndex;
44022 unsigned Alignment;
44023 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
44026 // Extract the one scalar element that is actually being stored.
44028 EVT VT = MS->getValue().getValueType();
44029 EVT EltVT = VT.getVectorElementType();
44030 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
44031 MS->getValue(), VecIndex);
44033 // Store that element at the appropriate offset from the base pointer.
44034 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
44035 Alignment, MS->getMemOperand()->getFlags());
44038 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
44039 TargetLowering::DAGCombinerInfo &DCI,
44040 const X86Subtarget &Subtarget) {
44041 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
44042 if (Mst->isCompressingStore())
44045 EVT VT = Mst->getValue().getValueType();
44047 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44049 if (Mst->isTruncatingStore())
44052 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
44053 return ScalarStore;
44055 // If the mask value has been legalized to a non-boolean vector, try to
44056 // simplify ops leading up to it. We only demand the MSB of each lane.
44057 SDValue Mask = Mst->getMask();
44058 if (Mask.getScalarValueSizeInBits() != 1) {
44059 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
44060 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
44061 if (N->getOpcode() != ISD::DELETED_NODE)
44062 DCI.AddToWorklist(N);
44063 return SDValue(N, 0);
44065 if (SDValue NewMask =
44066 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
44067 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
44068 Mst->getBasePtr(), Mst->getOffset(), NewMask,
44069 Mst->getMemoryVT(), Mst->getMemOperand(),
44070 Mst->getAddressingMode());
44073 SDValue Value = Mst->getValue();
44074 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
44075 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
44076 Mst->getMemoryVT())) {
44077 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
44078 Mst->getBasePtr(), Mst->getOffset(), Mask,
44079 Mst->getMemoryVT(), Mst->getMemOperand(),
44080 Mst->getAddressingMode(), true);
44086 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
44087 TargetLowering::DAGCombinerInfo &DCI,
44088 const X86Subtarget &Subtarget) {
44089 StoreSDNode *St = cast<StoreSDNode>(N);
44090 EVT StVT = St->getMemoryVT();
44092 SDValue StoredVal = St->getValue();
44093 EVT VT = StoredVal.getValueType();
44094 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44096 // Convert a store of vXi1 into a store of iX and a bitcast.
44097 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
44098 VT.getVectorElementType() == MVT::i1) {
44100 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
44101 StoredVal = DAG.getBitcast(NewVT, StoredVal);
44103 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
44104 St->getPointerInfo(), St->getOriginalAlign(),
44105 St->getMemOperand()->getFlags());
44108 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
44109 // This will avoid a copy to k-register.
44110 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
44111 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
44112 StoredVal.getOperand(0).getValueType() == MVT::i8) {
44113 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
44114 St->getBasePtr(), St->getPointerInfo(),
44115 St->getOriginalAlign(),
44116 St->getMemOperand()->getFlags());
44119 // Widen v2i1/v4i1 stores to v8i1.
44120 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
44121 Subtarget.hasAVX512()) {
44122 unsigned NumConcats = 8 / VT.getVectorNumElements();
44123 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
44124 Ops[0] = StoredVal;
44125 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
44126 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
44127 St->getPointerInfo(), St->getOriginalAlign(),
44128 St->getMemOperand()->getFlags());
44131 // Turn vXi1 stores of constants into a scalar store.
44132 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
44133 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
44134 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
44135 // If its a v64i1 store without 64-bit support, we need two stores.
44136 if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
44137 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
44138 StoredVal->ops().slice(0, 32));
44139 Lo = combinevXi1ConstantToInteger(Lo, DAG);
44140 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
44141 StoredVal->ops().slice(32, 32));
44142 Hi = combinevXi1ConstantToInteger(Hi, DAG);
44144 SDValue Ptr0 = St->getBasePtr();
44145 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
44148 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
44149 St->getOriginalAlign(),
44150 St->getMemOperand()->getFlags());
44152 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
44153 St->getPointerInfo().getWithOffset(4),
44154 St->getOriginalAlign(),
44155 St->getMemOperand()->getFlags());
44156 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
44159 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
44160 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
44161 St->getPointerInfo(), St->getOriginalAlign(),
44162 St->getMemOperand()->getFlags());
44165 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
44166 // Sandy Bridge, perform two 16-byte stores.
44168 if (VT.is256BitVector() && StVT == VT &&
44169 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
44170 *St->getMemOperand(), &Fast) &&
44172 unsigned NumElems = VT.getVectorNumElements();
44176 return splitVectorStore(St, DAG);
44179 // Split under-aligned vector non-temporal stores.
44180 if (St->isNonTemporal() && StVT == VT &&
44181 St->getAlignment() < VT.getStoreSize()) {
44182 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
44183 // vectors or the legalizer can scalarize it to use MOVNTI.
44184 if (VT.is256BitVector() || VT.is512BitVector()) {
44185 unsigned NumElems = VT.getVectorNumElements();
44188 return splitVectorStore(St, DAG);
44191 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
44193 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
44194 MVT NTVT = Subtarget.hasSSE4A()
44196 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
44197 return scalarizeVectorStore(St, NTVT, DAG);
44201 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
44202 // supported, but avx512f is by extending to v16i32 and truncating.
44203 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
44204 St->getValue().getOpcode() == ISD::TRUNCATE &&
44205 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
44206 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
44207 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
44208 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
44209 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
44210 MVT::v16i8, St->getMemOperand());
44213 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
44214 if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
44215 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
44216 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
44217 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
44218 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
44219 return EmitTruncSStore(IsSigned, St->getChain(),
44220 dl, StoredVal.getOperand(0), St->getBasePtr(),
44221 VT, St->getMemOperand(), DAG);
44224 // Optimize trunc store (of multiple scalars) to shuffle and store.
44225 // First, pack all of the elements in one place. Next, store to memory
44226 // in fewer chunks.
44227 if (St->isTruncatingStore() && VT.isVector()) {
44228 // Check if we can detect an AVG pattern from the truncation. If yes,
44229 // replace the trunc store by a normal store with the result of X86ISD::AVG
44231 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
44232 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
44234 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
44235 St->getPointerInfo(), St->getOriginalAlign(),
44236 St->getMemOperand()->getFlags());
44238 if (TLI.isTruncStoreLegal(VT, StVT)) {
44239 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
44240 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
44241 dl, Val, St->getBasePtr(),
44242 St->getMemoryVT(), St->getMemOperand(), DAG);
44243 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
44245 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
44246 dl, Val, St->getBasePtr(),
44247 St->getMemoryVT(), St->getMemOperand(), DAG);
44253 // Cast ptr32 and ptr64 pointers to the default address space before a store.
44254 unsigned AddrSpace = St->getAddressSpace();
44255 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
44256 AddrSpace == X86AS::PTR32_UPTR) {
44257 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
44258 if (PtrVT != St->getBasePtr().getSimpleValueType()) {
44260 DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
44261 return DAG.getStore(St->getChain(), dl, StoredVal, Cast,
44262 St->getPointerInfo(), St->getOriginalAlign(),
44263 St->getMemOperand()->getFlags(), St->getAAInfo());
44267 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
44268 // the FP state in cases where an emms may be missing.
44269 // A preferable solution to the general problem is to figure out the right
44270 // places to insert EMMS. This qualifies as a quick hack.
44272 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
44273 if (VT.getSizeInBits() != 64)
44276 const Function &F = DAG.getMachineFunction().getFunction();
44277 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
44279 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
44280 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
44281 isa<LoadSDNode>(St->getValue()) &&
44282 cast<LoadSDNode>(St->getValue())->isSimple() &&
44283 St->getChain().hasOneUse() && St->isSimple()) {
44284 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
44286 if (!ISD::isNormalLoad(Ld))
44289 // Avoid the transformation if there are multiple uses of the loaded value.
44290 if (!Ld->hasNUsesOfValue(1, 0))
44295 // Lower to a single movq load/store pair.
44296 SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
44297 Ld->getBasePtr(), Ld->getMemOperand());
44299 // Make sure new load is placed in same chain order.
44300 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
44301 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
44302 St->getMemOperand());
44305 // This is similar to the above case, but here we handle a scalar 64-bit
44306 // integer store that is extracted from a vector on a 32-bit target.
44307 // If we have SSE2, then we can treat it like a floating-point double
44308 // to get past legalization. The execution dependencies fixup pass will
44309 // choose the optimal machine instruction for the store if this really is
44310 // an integer or v2f32 rather than an f64.
44311 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
44312 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
44313 SDValue OldExtract = St->getOperand(1);
44314 SDValue ExtOp0 = OldExtract.getOperand(0);
44315 unsigned VecSize = ExtOp0.getValueSizeInBits();
44316 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
44317 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
44318 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
44319 BitCast, OldExtract.getOperand(1));
44320 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
44321 St->getPointerInfo(), St->getOriginalAlign(),
44322 St->getMemOperand()->getFlags());
44328 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
44329 TargetLowering::DAGCombinerInfo &DCI,
44330 const X86Subtarget &Subtarget) {
44331 auto *St = cast<MemIntrinsicSDNode>(N);
44333 SDValue StoredVal = N->getOperand(1);
44334 MVT VT = StoredVal.getSimpleValueType();
44335 EVT MemVT = St->getMemoryVT();
44337 // Figure out which elements we demand.
44338 unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
44339 APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
44341 APInt KnownUndef, KnownZero;
44342 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44343 if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, KnownUndef,
44345 if (N->getOpcode() != ISD::DELETED_NODE)
44346 DCI.AddToWorklist(N);
44347 return SDValue(N, 0);
44353 /// Return 'true' if this vector operation is "horizontal"
44354 /// and return the operands for the horizontal operation in LHS and RHS. A
44355 /// horizontal operation performs the binary operation on successive elements
44356 /// of its first operand, then on successive elements of its second operand,
44357 /// returning the resulting values in a vector. For example, if
44358 /// A = < float a0, float a1, float a2, float a3 >
44360 /// B = < float b0, float b1, float b2, float b3 >
44361 /// then the result of doing a horizontal operation on A and B is
44362 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
44363 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
44364 /// A horizontal-op B, for some already available A and B, and if so then LHS is
44365 /// set to A, RHS to B, and the routine returns 'true'.
44366 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
44367 const X86Subtarget &Subtarget, bool IsCommutative,
44368 SmallVectorImpl<int> &PostShuffleMask) {
44369 // If either operand is undef, bail out. The binop should be simplified.
44370 if (LHS.isUndef() || RHS.isUndef())
44373 // Look for the following pattern:
44374 // A = < float a0, float a1, float a2, float a3 >
44375 // B = < float b0, float b1, float b2, float b3 >
44377 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
44378 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
44379 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
44380 // which is A horizontal-op B.
44382 MVT VT = LHS.getSimpleValueType();
44383 assert((VT.is128BitVector() || VT.is256BitVector()) &&
44384 "Unsupported vector type for horizontal add/sub");
44385 unsigned NumElts = VT.getVectorNumElements();
44387 // TODO - can we make a general helper method that does all of this for us?
44388 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
44389 SmallVectorImpl<int> &ShuffleMask) {
44390 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
44391 if (!Op.getOperand(0).isUndef())
44392 N0 = Op.getOperand(0);
44393 if (!Op.getOperand(1).isUndef())
44394 N1 = Op.getOperand(1);
44395 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
44396 ShuffleMask.append(Mask.begin(), Mask.end());
44399 bool UseSubVector = false;
44400 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
44401 Op.getOperand(0).getValueType().is256BitVector() &&
44402 llvm::isNullConstant(Op.getOperand(1))) {
44403 Op = Op.getOperand(0);
44404 UseSubVector = true;
44407 SmallVector<SDValue, 2> SrcOps;
44408 SmallVector<int, 16> SrcShuffleMask;
44409 SDValue BC = peekThroughBitcasts(Op);
44410 if (isTargetShuffle(BC.getOpcode()) &&
44411 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
44412 SrcOps, SrcShuffleMask, IsUnary)) {
44413 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
44414 SrcOps.size() <= 2) {
44415 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
44416 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
44417 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
44419 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
44420 SrcOps.size() == 1) {
44421 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
44422 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
44423 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
44424 ShuffleMask.append(Mask.begin(), Mask.end());
44429 // View LHS in the form
44430 // LHS = VECTOR_SHUFFLE A, B, LMask
44431 // If LHS is not a shuffle, then pretend it is the identity shuffle:
44432 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
44433 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
44435 SmallVector<int, 16> LMask;
44436 GetShuffle(LHS, A, B, LMask);
44438 // Likewise, view RHS in the form
44439 // RHS = VECTOR_SHUFFLE C, D, RMask
44441 SmallVector<int, 16> RMask;
44442 GetShuffle(RHS, C, D, RMask);
44444 // At least one of the operands should be a vector shuffle.
44445 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
44446 if (NumShuffles == 0)
44449 if (LMask.empty()) {
44451 for (unsigned i = 0; i != NumElts; ++i)
44452 LMask.push_back(i);
44455 if (RMask.empty()) {
44457 for (unsigned i = 0; i != NumElts; ++i)
44458 RMask.push_back(i);
44461 // Avoid 128-bit lane crossing if pre-AVX2 and FP (integer will split).
44462 if (!Subtarget.hasAVX2() && VT.isFloatingPoint() &&
44463 (isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), LMask) ||
44464 isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), RMask)))
44467 // If A and B occur in reverse order in RHS, then canonicalize by commuting
44468 // RHS operands and shuffle mask.
44471 ShuffleVectorSDNode::commuteMask(RMask);
44473 // Check that the shuffles are both shuffling the same vectors.
44474 if (!(A == C && B == D))
44477 PostShuffleMask.clear();
44478 PostShuffleMask.append(NumElts, SM_SentinelUndef);
44480 // LHS and RHS are now:
44481 // LHS = shuffle A, B, LMask
44482 // RHS = shuffle A, B, RMask
44483 // Check that the masks correspond to performing a horizontal operation.
44484 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
44485 // so we just repeat the inner loop if this is a 256-bit op.
44486 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
44487 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
44488 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
44489 assert((NumEltsPer128BitChunk % 2 == 0) &&
44490 "Vector type should have an even number of elements in each lane");
44491 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
44492 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
44493 // Ignore undefined components.
44494 int LIdx = LMask[i + j], RIdx = RMask[i + j];
44495 if (LIdx < 0 || RIdx < 0 ||
44496 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
44497 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
44500 // Check that successive odd/even elements are being operated on. If not,
44501 // this is not a horizontal operation.
44502 if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
44503 !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
44506 // Compute the post-shuffle mask index based on where the element
44507 // is stored in the HOP result, and where it needs to be moved to.
44508 int Base = LIdx & ~1u;
44509 int Index = ((Base % NumEltsPer128BitChunk) / 2) +
44510 ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
44512 // The low half of the 128-bit result must choose from A.
44513 // The high half of the 128-bit result must choose from B,
44514 // unless B is undef. In that case, we are always choosing from A.
44515 if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
44516 Index += NumEltsPer64BitChunk;
44517 PostShuffleMask[i + j] = Index;
44521 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
44522 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
44524 bool IsIdentityPostShuffle =
44525 isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
44526 if (IsIdentityPostShuffle)
44527 PostShuffleMask.clear();
44529 // Assume a SingleSource HOP if we only shuffle one input and don't need to
44530 // shuffle the result.
44531 if (!shouldUseHorizontalOp(LHS == RHS &&
44532 (NumShuffles < 2 || !IsIdentityPostShuffle),
44536 LHS = DAG.getBitcast(VT, LHS);
44537 RHS = DAG.getBitcast(VT, RHS);
44541 /// Do target-specific dag combines on floating-point adds/subs.
44542 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
44543 const X86Subtarget &Subtarget) {
44544 EVT VT = N->getValueType(0);
44545 SDValue LHS = N->getOperand(0);
44546 SDValue RHS = N->getOperand(1);
44547 bool IsFadd = N->getOpcode() == ISD::FADD;
44548 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
44549 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
44551 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
44552 SmallVector<int, 8> PostShuffleMask;
44553 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
44554 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
44555 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd, PostShuffleMask)) {
44556 SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
44557 if (!PostShuffleMask.empty())
44558 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
44559 DAG.getUNDEF(VT), PostShuffleMask);
44563 // NOTE: isHorizontalBinOp may have changed LHS/RHS variables.
44568 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
44570 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
44571 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
44572 /// anything that is guaranteed to be transformed by DAGCombiner.
44573 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
44574 const X86Subtarget &Subtarget,
44576 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
44577 SDValue Src = N->getOperand(0);
44578 unsigned SrcOpcode = Src.getOpcode();
44579 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44581 EVT VT = N->getValueType(0);
44582 EVT SrcVT = Src.getValueType();
44584 auto IsFreeTruncation = [VT](SDValue Op) {
44585 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
44587 // See if this has been extended from a smaller/equal size to
44588 // the truncation size, allowing a truncation to combine with the extend.
44589 unsigned Opcode = Op.getOpcode();
44590 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
44591 Opcode == ISD::ZERO_EXTEND) &&
44592 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
44595 // See if this is a single use constant which can be constant folded.
44596 // NOTE: We don't peek throught bitcasts here because there is currently
44597 // no support for constant folding truncate+bitcast+vector_of_constants. So
44598 // we'll just send up with a truncate on both operands which will
44599 // get turned back into (truncate (binop)) causing an infinite loop.
44600 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
44603 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
44604 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
44605 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
44606 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
44609 // Don't combine if the operation has other uses.
44610 if (!Src.hasOneUse())
44613 // Only support vector truncation for now.
44614 // TODO: i64 scalar math would benefit as well.
44615 if (!VT.isVector())
44618 // In most cases its only worth pre-truncating if we're only facing the cost
44619 // of one truncation.
44620 // i.e. if one of the inputs will constant fold or the input is repeated.
44621 switch (SrcOpcode) {
44623 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
44624 // better to truncate if we have the chance.
44625 if (SrcVT.getScalarType() == MVT::i64 &&
44626 TLI.isOperationLegal(SrcOpcode, VT) &&
44627 !TLI.isOperationLegal(SrcOpcode, SrcVT))
44628 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
44635 SDValue Op0 = Src.getOperand(0);
44636 SDValue Op1 = Src.getOperand(1);
44637 if (TLI.isOperationLegal(SrcOpcode, VT) &&
44638 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
44639 return TruncateArithmetic(Op0, Op1);
44647 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
44648 /// e.g. trunc <8 x i32> X to <8 x i16> -->
44649 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
44650 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
44651 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
44652 const X86Subtarget &Subtarget,
44653 SelectionDAG &DAG) {
44654 SDValue In = N->getOperand(0);
44655 EVT InVT = In.getValueType();
44656 EVT OutVT = N->getValueType(0);
44658 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
44659 OutVT.getScalarSizeInBits());
44660 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
44661 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
44664 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
44665 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
44666 const X86Subtarget &Subtarget,
44667 SelectionDAG &DAG) {
44668 SDValue In = N->getOperand(0);
44669 EVT InVT = In.getValueType();
44670 EVT OutVT = N->getValueType(0);
44671 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
44672 DAG.getValueType(OutVT));
44673 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
44676 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
44677 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
44678 /// legalization the truncation will be translated into a BUILD_VECTOR with each
44679 /// element that is extracted from a vector and then truncated, and it is
44680 /// difficult to do this optimization based on them.
44681 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
44682 const X86Subtarget &Subtarget) {
44683 EVT OutVT = N->getValueType(0);
44684 if (!OutVT.isVector())
44687 SDValue In = N->getOperand(0);
44688 if (!In.getValueType().isSimple())
44691 EVT InVT = In.getValueType();
44692 unsigned NumElems = OutVT.getVectorNumElements();
44694 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
44695 // SSE2, and we need to take care of it specially.
44696 // AVX512 provides vpmovdb.
44697 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
44700 EVT OutSVT = OutVT.getVectorElementType();
44701 EVT InSVT = InVT.getVectorElementType();
44702 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
44703 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
44707 // SSSE3's pshufb results in less instructions in the cases below.
44708 if (Subtarget.hasSSSE3() && NumElems == 8 &&
44709 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
44710 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
44714 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
44715 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
44716 // truncate 2 x v4i32 to v8i16.
44717 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
44718 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
44719 if (InSVT == MVT::i32)
44720 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
44725 /// This function transforms vector truncation of 'extended sign-bits' or
44726 /// 'extended zero-bits' values.
44727 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
44728 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
44730 const X86Subtarget &Subtarget) {
44732 if (!Subtarget.hasSSE2())
44735 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
44738 SDValue In = N->getOperand(0);
44739 if (!In.getValueType().isSimple())
44742 MVT VT = N->getValueType(0).getSimpleVT();
44743 MVT SVT = VT.getScalarType();
44745 MVT InVT = In.getValueType().getSimpleVT();
44746 MVT InSVT = InVT.getScalarType();
44748 // Check we have a truncation suited for PACKSS/PACKUS.
44749 if (!isPowerOf2_32(VT.getVectorNumElements()))
44751 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
44753 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
44756 // Truncation to sub-128bit vXi32 can be better handled with shuffles.
44757 if (SVT == MVT::i32 && VT.getSizeInBits() < 128)
44760 // AVX512 has fast truncate, but if the input is already going to be split,
44761 // there's no harm in trying pack.
44762 if (Subtarget.hasAVX512() &&
44763 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
44764 InVT.is512BitVector()))
44767 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
44768 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
44770 // Use PACKUS if the input has zero-bits that extend all the way to the
44771 // packed/truncated value. e.g. masks, zext_in_reg, etc.
44772 KnownBits Known = DAG.computeKnownBits(In);
44773 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
44774 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
44775 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
44777 // Use PACKSS if the input has sign-bits that extend all the way to the
44778 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
44779 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
44781 // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
44782 // a sign splat. ComputeNumSignBits struggles to see through BITCASTs later
44783 // on and combines/simplifications can't then use it.
44784 if (SVT == MVT::i32 && NumSignBits != InSVT.getSizeInBits())
44787 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
44788 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
44793 // Try to form a MULHU or MULHS node by looking for
44794 // (trunc (srl (mul ext, ext), 16))
44795 // TODO: This is X86 specific because we want to be able to handle wide types
44796 // before type legalization. But we can only do it if the vector will be
44797 // legalized via widening/splitting. Type legalization can't handle promotion
44798 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
44800 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
44801 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
44802 // First instruction should be a right shift of a multiply.
44803 if (Src.getOpcode() != ISD::SRL ||
44804 Src.getOperand(0).getOpcode() != ISD::MUL)
44807 if (!Subtarget.hasSSE2())
44810 // Only handle vXi16 types that are at least 128-bits unless they will be
44812 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
44815 // Input type should be at least vXi32.
44816 EVT InVT = Src.getValueType();
44817 if (InVT.getVectorElementType().getSizeInBits() < 32)
44820 // Need a shift by 16.
44822 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
44826 SDValue LHS = Src.getOperand(0).getOperand(0);
44827 SDValue RHS = Src.getOperand(0).getOperand(1);
44829 unsigned ExtOpc = LHS.getOpcode();
44830 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
44831 RHS.getOpcode() != ExtOpc)
44834 // Peek through the extends.
44835 LHS = LHS.getOperand(0);
44836 RHS = RHS.getOperand(0);
44838 // Ensure the input types match.
44839 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
44842 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
44843 return DAG.getNode(Opc, DL, VT, LHS, RHS);
44846 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
44847 // from one vector with signed bytes from another vector, adds together
44848 // adjacent pairs of 16-bit products, and saturates the result before
44849 // truncating to 16-bits.
44851 // Which looks something like this:
44852 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
44853 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
44854 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
44855 const X86Subtarget &Subtarget,
44857 if (!VT.isVector() || !Subtarget.hasSSSE3())
44860 unsigned NumElems = VT.getVectorNumElements();
44861 EVT ScalarVT = VT.getVectorElementType();
44862 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
44865 SDValue SSatVal = detectSSatPattern(In, VT);
44866 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
44869 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
44870 // of multiplies from even/odd elements.
44871 SDValue N0 = SSatVal.getOperand(0);
44872 SDValue N1 = SSatVal.getOperand(1);
44874 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
44877 SDValue N00 = N0.getOperand(0);
44878 SDValue N01 = N0.getOperand(1);
44879 SDValue N10 = N1.getOperand(0);
44880 SDValue N11 = N1.getOperand(1);
44882 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
44883 // Canonicalize zero_extend to LHS.
44884 if (N01.getOpcode() == ISD::ZERO_EXTEND)
44885 std::swap(N00, N01);
44886 if (N11.getOpcode() == ISD::ZERO_EXTEND)
44887 std::swap(N10, N11);
44889 // Ensure we have a zero_extend and a sign_extend.
44890 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
44891 N01.getOpcode() != ISD::SIGN_EXTEND ||
44892 N10.getOpcode() != ISD::ZERO_EXTEND ||
44893 N11.getOpcode() != ISD::SIGN_EXTEND)
44896 // Peek through the extends.
44897 N00 = N00.getOperand(0);
44898 N01 = N01.getOperand(0);
44899 N10 = N10.getOperand(0);
44900 N11 = N11.getOperand(0);
44902 // Ensure the extend is from vXi8.
44903 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
44904 N01.getValueType().getVectorElementType() != MVT::i8 ||
44905 N10.getValueType().getVectorElementType() != MVT::i8 ||
44906 N11.getValueType().getVectorElementType() != MVT::i8)
44909 // All inputs should be build_vectors.
44910 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
44911 N01.getOpcode() != ISD::BUILD_VECTOR ||
44912 N10.getOpcode() != ISD::BUILD_VECTOR ||
44913 N11.getOpcode() != ISD::BUILD_VECTOR)
44916 // N00/N10 are zero extended. N01/N11 are sign extended.
44918 // For each element, we need to ensure we have an odd element from one vector
44919 // multiplied by the odd element of another vector and the even element from
44920 // one of the same vectors being multiplied by the even element from the
44921 // other vector. So we need to make sure for each element i, this operator
44922 // is being performed:
44923 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
44924 SDValue ZExtIn, SExtIn;
44925 for (unsigned i = 0; i != NumElems; ++i) {
44926 SDValue N00Elt = N00.getOperand(i);
44927 SDValue N01Elt = N01.getOperand(i);
44928 SDValue N10Elt = N10.getOperand(i);
44929 SDValue N11Elt = N11.getOperand(i);
44930 // TODO: Be more tolerant to undefs.
44931 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44932 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44933 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44934 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
44936 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
44937 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
44938 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
44939 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
44940 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
44942 unsigned IdxN00 = ConstN00Elt->getZExtValue();
44943 unsigned IdxN01 = ConstN01Elt->getZExtValue();
44944 unsigned IdxN10 = ConstN10Elt->getZExtValue();
44945 unsigned IdxN11 = ConstN11Elt->getZExtValue();
44946 // Add is commutative so indices can be reordered.
44947 if (IdxN00 > IdxN10) {
44948 std::swap(IdxN00, IdxN10);
44949 std::swap(IdxN01, IdxN11);
44951 // N0 indices be the even element. N1 indices must be the next odd element.
44952 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
44953 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
44955 SDValue N00In = N00Elt.getOperand(0);
44956 SDValue N01In = N01Elt.getOperand(0);
44957 SDValue N10In = N10Elt.getOperand(0);
44958 SDValue N11In = N11Elt.getOperand(0);
44959 // First time we find an input capture it.
44964 if (ZExtIn != N00In || SExtIn != N01In ||
44965 ZExtIn != N10In || SExtIn != N11In)
44969 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44970 ArrayRef<SDValue> Ops) {
44971 // Shrink by adding truncate nodes and let DAGCombine fold with the
44973 EVT InVT = Ops[0].getValueType();
44974 assert(InVT.getScalarType() == MVT::i8 &&
44975 "Unexpected scalar element type");
44976 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
44977 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
44978 InVT.getVectorNumElements() / 2);
44979 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
44981 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
44985 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
44986 const X86Subtarget &Subtarget) {
44987 EVT VT = N->getValueType(0);
44988 SDValue Src = N->getOperand(0);
44991 // Attempt to pre-truncate inputs to arithmetic ops instead.
44992 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
44995 // Try to detect AVG pattern first.
44996 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
44999 // Try to detect PMADD
45000 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
45003 // Try to combine truncation with signed/unsigned saturation.
45004 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
45007 // Try to combine PMULHUW/PMULHW for vXi16.
45008 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
45011 // The bitcast source is a direct mmx result.
45012 // Detect bitcasts between i32 to x86mmx
45013 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
45014 SDValue BCSrc = Src.getOperand(0);
45015 if (BCSrc.getValueType() == MVT::x86mmx)
45016 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
45019 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
45020 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
45023 return combineVectorTruncation(N, DAG, Subtarget);
45026 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
45027 TargetLowering::DAGCombinerInfo &DCI) {
45028 EVT VT = N->getValueType(0);
45029 SDValue In = N->getOperand(0);
45032 if (auto SSatVal = detectSSatPattern(In, VT))
45033 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
45034 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
45035 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
45037 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45038 APInt DemandedMask(APInt::getAllOnesValue(VT.getScalarSizeInBits()));
45039 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
45040 return SDValue(N, 0);
45045 /// Returns the negated value if the node \p N flips sign of FP value.
45047 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
45049 /// AVX512F does not have FXOR, so FNEG is lowered as
45050 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
45051 /// In this case we go though all bitcasts.
45052 /// This also recognizes splat of a negated value and returns the splat of that
45054 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
45055 if (N->getOpcode() == ISD::FNEG)
45056 return N->getOperand(0);
45058 // Don't recurse exponentially.
45059 if (Depth > SelectionDAG::MaxRecursionDepth)
45062 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
45064 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
45065 EVT VT = Op->getValueType(0);
45067 // Make sure the element size doesn't change.
45068 if (VT.getScalarSizeInBits() != ScalarSize)
45071 unsigned Opc = Op.getOpcode();
45073 case ISD::VECTOR_SHUFFLE: {
45074 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
45075 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
45076 if (!Op.getOperand(1).isUndef())
45078 if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
45079 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
45080 return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
45081 cast<ShuffleVectorSDNode>(Op)->getMask());
45084 case ISD::INSERT_VECTOR_ELT: {
45085 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
45087 SDValue InsVector = Op.getOperand(0);
45088 SDValue InsVal = Op.getOperand(1);
45089 if (!InsVector.isUndef())
45091 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
45092 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
45093 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
45094 NegInsVal, Op.getOperand(2));
45099 case X86ISD::FXOR: {
45100 SDValue Op1 = Op.getOperand(1);
45101 SDValue Op0 = Op.getOperand(0);
45103 // For XOR and FXOR, we want to check if constant
45104 // bits of Op1 are sign bit masks. For FSUB, we
45105 // have to check if constant bits of Op0 are sign
45106 // bit masks and hence we swap the operands.
45107 if (Opc == ISD::FSUB)
45108 std::swap(Op0, Op1);
45111 SmallVector<APInt, 16> EltBits;
45112 // Extract constant bits and see if they are all
45113 // sign bit masks. Ignore the undef elements.
45114 if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
45115 /* AllowWholeUndefs */ true,
45116 /* AllowPartialUndefs */ false)) {
45117 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
45118 if (!UndefElts[I] && !EltBits[I].isSignMask())
45121 return peekThroughBitcasts(Op0);
45129 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
45133 default: llvm_unreachable("Unexpected opcode");
45134 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
45135 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FNMADD; break;
45136 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
45137 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
45138 case X86ISD::STRICT_FMSUB: Opcode = X86ISD::STRICT_FNMSUB; break;
45139 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
45140 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
45141 case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA; break;
45142 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
45143 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
45144 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB; break;
45145 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
45151 default: llvm_unreachable("Unexpected opcode");
45152 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
45153 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FMSUB; break;
45154 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
45155 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
45156 case X86ISD::STRICT_FMSUB: Opcode = ISD::STRICT_FMA; break;
45157 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
45158 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
45159 case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
45160 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
45161 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
45162 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
45163 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
45164 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
45165 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
45166 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
45167 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
45173 // For accuracy reason, we never combine fneg and fma under strict FP.
45174 default: llvm_unreachable("Unexpected opcode");
45175 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
45176 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
45177 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
45178 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
45179 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
45180 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
45181 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
45182 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
45189 /// Do target-specific dag combines on floating point negations.
45190 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
45191 TargetLowering::DAGCombinerInfo &DCI,
45192 const X86Subtarget &Subtarget) {
45193 EVT OrigVT = N->getValueType(0);
45194 SDValue Arg = isFNEG(DAG, N);
45198 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45199 EVT VT = Arg.getValueType();
45200 EVT SVT = VT.getScalarType();
45203 // Let legalize expand this if it isn't a legal type yet.
45204 if (!TLI.isTypeLegal(VT))
45207 // If we're negating a FMUL node on a target with FMA, then we can avoid the
45208 // use of a constant by performing (-0 - A*B) instead.
45209 // FIXME: Check rounding control flags as well once it becomes available.
45210 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
45211 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
45212 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
45213 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
45214 Arg.getOperand(1), Zero);
45215 return DAG.getBitcast(OrigVT, NewNode);
45218 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
45219 bool LegalOperations = !DCI.isBeforeLegalizeOps();
45220 if (SDValue NegArg =
45221 TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
45222 return DAG.getBitcast(OrigVT, NegArg);
45227 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
45228 bool LegalOperations,
45230 NegatibleCost &Cost,
45231 unsigned Depth) const {
45232 // fneg patterns are removable even if they have multiple uses.
45233 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
45234 Cost = NegatibleCost::Cheaper;
45235 return DAG.getBitcast(Op.getValueType(), Arg);
45238 EVT VT = Op.getValueType();
45239 EVT SVT = VT.getScalarType();
45240 unsigned Opc = Op.getOpcode();
45243 case X86ISD::FMSUB:
45244 case X86ISD::FNMADD:
45245 case X86ISD::FNMSUB:
45246 case X86ISD::FMADD_RND:
45247 case X86ISD::FMSUB_RND:
45248 case X86ISD::FNMADD_RND:
45249 case X86ISD::FNMSUB_RND: {
45250 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
45251 !(SVT == MVT::f32 || SVT == MVT::f64) ||
45252 !isOperationLegal(ISD::FMA, VT))
45255 // This is always negatible for free but we might be able to remove some
45256 // extra operand negations as well.
45257 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
45258 for (int i = 0; i != 3; ++i)
45259 NewOps[i] = getCheaperNegatedExpression(
45260 Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
45262 bool NegA = !!NewOps[0];
45263 bool NegB = !!NewOps[1];
45264 bool NegC = !!NewOps[2];
45265 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
45267 Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
45268 : NegatibleCost::Neutral;
45270 // Fill in the non-negated ops with the original values.
45271 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
45273 NewOps[i] = Op.getOperand(i);
45274 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
45277 if (SDValue NegOp0 =
45278 getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
45279 ForCodeSize, Cost, Depth + 1))
45280 return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
45284 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
45285 ForCodeSize, Cost, Depth);
45288 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
45289 const X86Subtarget &Subtarget) {
45290 MVT VT = N->getSimpleValueType(0);
45291 // If we have integer vector types available, use the integer opcodes.
45292 if (!VT.isVector() || !Subtarget.hasSSE2())
45297 unsigned IntBits = VT.getScalarSizeInBits();
45298 MVT IntSVT = MVT::getIntegerVT(IntBits);
45299 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
45301 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
45302 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
45303 unsigned IntOpcode;
45304 switch (N->getOpcode()) {
45305 default: llvm_unreachable("Unexpected FP logic op");
45306 case X86ISD::FOR: IntOpcode = ISD::OR; break;
45307 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
45308 case X86ISD::FAND: IntOpcode = ISD::AND; break;
45309 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
45311 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
45312 return DAG.getBitcast(VT, IntOp);
45316 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
45317 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
45318 if (N->getOpcode() != ISD::XOR)
45321 SDValue LHS = N->getOperand(0);
45322 if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
45325 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
45326 X86::CondCode(LHS->getConstantOperandVal(0)));
45328 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
45331 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
45332 TargetLowering::DAGCombinerInfo &DCI,
45333 const X86Subtarget &Subtarget) {
45334 // If this is SSE1 only convert to FXOR to avoid scalarization.
45335 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
45336 N->getValueType(0) == MVT::v4i32) {
45337 return DAG.getBitcast(
45338 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
45339 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
45340 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
45343 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
45346 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
45349 if (DCI.isBeforeLegalizeOps())
45352 if (SDValue SetCC = foldXor1SetCC(N, DAG))
45355 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
45358 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
45361 return combineFneg(N, DAG, DCI, Subtarget);
45364 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
45365 TargetLowering::DAGCombinerInfo &DCI,
45366 const X86Subtarget &Subtarget) {
45367 EVT VT = N->getValueType(0);
45368 unsigned NumBits = VT.getSizeInBits();
45370 // TODO - Constant Folding.
45372 // Simplify the inputs.
45373 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45374 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
45375 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
45376 return SDValue(N, 0);
45381 static bool isNullFPScalarOrVectorConst(SDValue V) {
45382 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
45385 /// If a value is a scalar FP zero or a vector FP zero (potentially including
45386 /// undefined elements), return a zero constant that may be used to fold away
45387 /// that value. In the case of a vector, the returned constant will not contain
45388 /// undefined elements even if the input parameter does. This makes it suitable
45389 /// to be used as a replacement operand with operations (eg, bitwise-and) where
45390 /// an undef should not propagate.
45391 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
45392 const X86Subtarget &Subtarget) {
45393 if (!isNullFPScalarOrVectorConst(V))
45396 if (V.getValueType().isVector())
45397 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
45402 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
45403 const X86Subtarget &Subtarget) {
45404 SDValue N0 = N->getOperand(0);
45405 SDValue N1 = N->getOperand(1);
45406 EVT VT = N->getValueType(0);
45409 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
45410 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
45411 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
45412 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
45415 auto isAllOnesConstantFP = [](SDValue V) {
45416 if (V.getSimpleValueType().isVector())
45417 return ISD::isBuildVectorAllOnes(V.getNode());
45418 auto *C = dyn_cast<ConstantFPSDNode>(V);
45419 return C && C->getConstantFPValue()->isAllOnesValue();
45422 // fand (fxor X, -1), Y --> fandn X, Y
45423 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
45424 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
45426 // fand X, (fxor Y, -1) --> fandn Y, X
45427 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
45428 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
45433 /// Do target-specific dag combines on X86ISD::FAND nodes.
45434 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
45435 const X86Subtarget &Subtarget) {
45436 // FAND(0.0, x) -> 0.0
45437 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
45440 // FAND(x, 0.0) -> 0.0
45441 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
45444 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
45447 return lowerX86FPLogicOp(N, DAG, Subtarget);
45450 /// Do target-specific dag combines on X86ISD::FANDN nodes.
45451 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
45452 const X86Subtarget &Subtarget) {
45453 // FANDN(0.0, x) -> x
45454 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
45455 return N->getOperand(1);
45457 // FANDN(x, 0.0) -> 0.0
45458 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
45461 return lowerX86FPLogicOp(N, DAG, Subtarget);
45464 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
45465 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
45466 TargetLowering::DAGCombinerInfo &DCI,
45467 const X86Subtarget &Subtarget) {
45468 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
45470 // F[X]OR(0.0, x) -> x
45471 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
45472 return N->getOperand(1);
45474 // F[X]OR(x, 0.0) -> x
45475 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
45476 return N->getOperand(0);
45478 if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
45481 return lowerX86FPLogicOp(N, DAG, Subtarget);
45484 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
45485 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
45486 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
45488 // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
45489 if (!DAG.getTarget().Options.NoNaNsFPMath ||
45490 !DAG.getTarget().Options.NoSignedZerosFPMath)
45493 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
45494 // into FMINC and FMAXC, which are Commutative operations.
45495 unsigned NewOp = 0;
45496 switch (N->getOpcode()) {
45497 default: llvm_unreachable("unknown opcode");
45498 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
45499 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
45502 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
45503 N->getOperand(0), N->getOperand(1));
45506 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
45507 const X86Subtarget &Subtarget) {
45508 if (Subtarget.useSoftFloat())
45511 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45513 EVT VT = N->getValueType(0);
45514 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
45515 (Subtarget.hasSSE2() && VT == MVT::f64) ||
45516 (VT.isVector() && TLI.isTypeLegal(VT))))
45519 SDValue Op0 = N->getOperand(0);
45520 SDValue Op1 = N->getOperand(1);
45522 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
45524 // If we don't have to respect NaN inputs, this is a direct translation to x86
45525 // min/max instructions.
45526 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
45527 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
45529 // If one of the operands is known non-NaN use the native min/max instructions
45530 // with the non-NaN input as second operand.
45531 if (DAG.isKnownNeverNaN(Op1))
45532 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
45533 if (DAG.isKnownNeverNaN(Op0))
45534 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
45536 // If we have to respect NaN inputs, this takes at least 3 instructions.
45537 // Favor a library call when operating on a scalar and minimizing code size.
45538 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
45541 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
45544 // There are 4 possibilities involving NaN inputs, and these are the required
45548 // ----------------
45549 // Num | Max | Op0 |
45550 // Op0 ----------------
45551 // NaN | Op1 | NaN |
45552 // ----------------
45554 // The SSE FP max/min instructions were not designed for this case, but rather
45556 // Min = Op1 < Op0 ? Op1 : Op0
45557 // Max = Op1 > Op0 ? Op1 : Op0
45559 // So they always return Op0 if either input is a NaN. However, we can still
45560 // use those instructions for fmaxnum by selecting away a NaN input.
45562 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
45563 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
45564 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
45566 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
45567 // are NaN, the NaN value of Op1 is the result.
45568 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
45571 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
45572 TargetLowering::DAGCombinerInfo &DCI) {
45573 EVT VT = N->getValueType(0);
45574 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45576 APInt KnownUndef, KnownZero;
45577 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
45578 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
45580 return SDValue(N, 0);
45582 // Convert a full vector load into vzload when not all bits are needed.
45583 SDValue In = N->getOperand(0);
45584 MVT InVT = In.getSimpleValueType();
45585 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
45586 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
45587 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
45588 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
45589 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
45590 MVT MemVT = MVT::getIntegerVT(NumBits);
45591 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
45592 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
45594 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
45595 DAG.getBitcast(InVT, VZLoad));
45596 DCI.CombineTo(N, Convert);
45597 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
45598 DCI.recursivelyDeleteUnusedNodes(LN);
45599 return SDValue(N, 0);
45606 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
45607 TargetLowering::DAGCombinerInfo &DCI) {
45608 bool IsStrict = N->isTargetStrictFPOpcode();
45609 EVT VT = N->getValueType(0);
45611 // Convert a full vector load into vzload when not all bits are needed.
45612 SDValue In = N->getOperand(IsStrict ? 1 : 0);
45613 MVT InVT = In.getSimpleValueType();
45614 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
45615 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
45616 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
45617 LoadSDNode *LN = cast<LoadSDNode>(In);
45618 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
45619 MVT MemVT = MVT::getFloatingPointVT(NumBits);
45620 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
45621 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
45625 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
45626 {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
45627 DCI.CombineTo(N, Convert, Convert.getValue(1));
45630 DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
45631 DCI.CombineTo(N, Convert);
45633 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
45634 DCI.recursivelyDeleteUnusedNodes(LN);
45635 return SDValue(N, 0);
45642 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
45643 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
45644 TargetLowering::DAGCombinerInfo &DCI,
45645 const X86Subtarget &Subtarget) {
45646 MVT VT = N->getSimpleValueType(0);
45648 // ANDNP(0, x) -> x
45649 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
45650 return N->getOperand(1);
45652 // ANDNP(x, 0) -> 0
45653 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
45654 return DAG.getConstant(0, SDLoc(N), VT);
45656 // Turn ANDNP back to AND if input is inverted.
45657 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
45658 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
45661 // Attempt to recursively combine a bitmask ANDNP with shuffles.
45662 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
45664 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
45671 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
45672 TargetLowering::DAGCombinerInfo &DCI) {
45673 SDValue N1 = N->getOperand(1);
45675 // BT ignores high bits in the bit index operand.
45676 unsigned BitWidth = N1.getValueSizeInBits();
45677 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
45678 if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
45679 if (N->getOpcode() != ISD::DELETED_NODE)
45680 DCI.AddToWorklist(N);
45681 return SDValue(N, 0);
45687 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
45688 TargetLowering::DAGCombinerInfo &DCI) {
45689 bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
45690 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
45692 if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
45693 APInt KnownUndef, KnownZero;
45694 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45695 APInt DemandedElts = APInt::getLowBitsSet(8, 4);
45696 if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
45698 if (N->getOpcode() != ISD::DELETED_NODE)
45699 DCI.AddToWorklist(N);
45700 return SDValue(N, 0);
45703 // Convert a full vector load into vzload when not all bits are needed.
45704 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
45705 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
45706 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
45709 SDValue Convert = DAG.getNode(
45710 N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
45711 {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
45712 DCI.CombineTo(N, Convert, Convert.getValue(1));
45714 SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
45715 DAG.getBitcast(MVT::v8i16, VZLoad));
45716 DCI.CombineTo(N, Convert);
45719 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
45720 DCI.recursivelyDeleteUnusedNodes(LN);
45721 return SDValue(N, 0);
45729 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
45730 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
45731 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
45733 EVT DstVT = N->getValueType(0);
45735 SDValue N0 = N->getOperand(0);
45736 SDValue N1 = N->getOperand(1);
45737 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
45739 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
45742 // Look through single use any_extends / truncs.
45743 SDValue IntermediateBitwidthOp;
45744 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
45746 IntermediateBitwidthOp = N0;
45747 N0 = N0.getOperand(0);
45750 // See if we have a single use cmov.
45751 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
45754 SDValue CMovOp0 = N0.getOperand(0);
45755 SDValue CMovOp1 = N0.getOperand(1);
45757 // Make sure both operands are constants.
45758 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
45759 !isa<ConstantSDNode>(CMovOp1.getNode()))
45764 // If we looked through an any_extend/trunc above, add one to the constants.
45765 if (IntermediateBitwidthOp) {
45766 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
45767 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
45768 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
45771 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
45772 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
45774 EVT CMovVT = DstVT;
45775 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
45776 if (DstVT == MVT::i16) {
45778 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
45779 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
45782 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
45783 N0.getOperand(2), N0.getOperand(3));
45785 if (CMovVT != DstVT)
45786 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
45791 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
45792 const X86Subtarget &Subtarget) {
45793 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
45795 if (SDValue V = combineSextInRegCmov(N, DAG))
45798 EVT VT = N->getValueType(0);
45799 SDValue N0 = N->getOperand(0);
45800 SDValue N1 = N->getOperand(1);
45801 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
45804 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
45805 // both SSE and AVX2 since there is no sign-extended shift right
45806 // operation on a vector with 64-bit elements.
45807 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
45808 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
45809 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
45810 N0.getOpcode() == ISD::SIGN_EXTEND)) {
45811 SDValue N00 = N0.getOperand(0);
45813 // EXTLOAD has a better solution on AVX2,
45814 // it may be replaced with X86ISD::VSEXT node.
45815 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
45816 if (!ISD::isNormalLoad(N00.getNode()))
45819 // Attempt to promote any comparison mask ops before moving the
45820 // SIGN_EXTEND_INREG in the way.
45821 if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
45822 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
45824 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
45826 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
45827 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
45833 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
45834 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
45835 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
45836 /// opportunities to combine math ops, use an LEA, or use a complex addressing
45837 /// mode. This can eliminate extend, add, and shift instructions.
45838 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
45839 const X86Subtarget &Subtarget) {
45840 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
45841 Ext->getOpcode() != ISD::ZERO_EXTEND)
45844 // TODO: This should be valid for other integer types.
45845 EVT VT = Ext->getValueType(0);
45846 if (VT != MVT::i64)
45849 SDValue Add = Ext->getOperand(0);
45850 if (Add.getOpcode() != ISD::ADD)
45853 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
45854 bool NSW = Add->getFlags().hasNoSignedWrap();
45855 bool NUW = Add->getFlags().hasNoUnsignedWrap();
45857 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
45859 if ((Sext && !NSW) || (!Sext && !NUW))
45862 // Having a constant operand to the 'add' ensures that we are not increasing
45863 // the instruction count because the constant is extended for free below.
45864 // A constant operand can also become the displacement field of an LEA.
45865 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
45869 // Don't make the 'add' bigger if there's no hope of combining it with some
45870 // other 'add' or 'shl' instruction.
45871 // TODO: It may be profitable to generate simpler LEA instructions in place
45872 // of single 'add' instructions, but the cost model for selecting an LEA
45873 // currently has a high threshold.
45874 bool HasLEAPotential = false;
45875 for (auto *User : Ext->uses()) {
45876 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
45877 HasLEAPotential = true;
45881 if (!HasLEAPotential)
45884 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
45885 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
45886 SDValue AddOp0 = Add.getOperand(0);
45887 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
45888 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
45890 // The wider add is guaranteed to not wrap because both operands are
45893 Flags.setNoSignedWrap(NSW);
45894 Flags.setNoUnsignedWrap(NUW);
45895 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
45898 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
45899 // operands and the result of CMOV is not used anywhere else - promote CMOV
45900 // itself instead of promoting its result. This could be beneficial, because:
45901 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
45902 // (or more) pseudo-CMOVs only when they go one-after-another and
45903 // getting rid of result extension code after CMOV will help that.
45904 // 2) Promotion of constant CMOV arguments is free, hence the
45905 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
45906 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
45907 // promotion is also good in terms of code-size.
45908 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
45910 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
45911 SDValue CMovN = Extend->getOperand(0);
45912 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
45915 EVT TargetVT = Extend->getValueType(0);
45916 unsigned ExtendOpcode = Extend->getOpcode();
45919 EVT VT = CMovN.getValueType();
45920 SDValue CMovOp0 = CMovN.getOperand(0);
45921 SDValue CMovOp1 = CMovN.getOperand(1);
45923 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
45924 !isa<ConstantSDNode>(CMovOp1.getNode()))
45927 // Only extend to i32 or i64.
45928 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
45931 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
45933 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
45936 // If this a zero extend to i64, we should only extend to i32 and use a free
45937 // zero extend to finish.
45938 EVT ExtendVT = TargetVT;
45939 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
45940 ExtendVT = MVT::i32;
45942 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
45943 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
45945 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
45946 CMovN.getOperand(2), CMovN.getOperand(3));
45948 // Finish extending if needed.
45949 if (ExtendVT != TargetVT)
45950 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
45955 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
45956 // This is more or less the reverse of combineBitcastvxi1.
45958 combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
45959 TargetLowering::DAGCombinerInfo &DCI,
45960 const X86Subtarget &Subtarget) {
45961 unsigned Opcode = N->getOpcode();
45962 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
45963 Opcode != ISD::ANY_EXTEND)
45965 if (!DCI.isBeforeLegalizeOps())
45967 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
45970 SDValue N0 = N->getOperand(0);
45971 EVT VT = N->getValueType(0);
45972 EVT SVT = VT.getScalarType();
45973 EVT InSVT = N0.getValueType().getScalarType();
45974 unsigned EltSizeInBits = SVT.getSizeInBits();
45976 // Input type must be extending a bool vector (bit-casted from a scalar
45977 // integer) to legal integer types.
45978 if (!VT.isVector())
45980 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
45982 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
45985 SDValue N00 = N0.getOperand(0);
45986 EVT SclVT = N0.getOperand(0).getValueType();
45987 if (!SclVT.isScalarInteger())
45992 SmallVector<int, 32> ShuffleMask;
45993 unsigned NumElts = VT.getVectorNumElements();
45994 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
45996 // Broadcast the scalar integer to the vector elements.
45997 if (NumElts > EltSizeInBits) {
45998 // If the scalar integer is greater than the vector element size, then we
45999 // must split it down into sub-sections for broadcasting. For example:
46000 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
46001 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
46002 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
46003 unsigned Scale = NumElts / EltSizeInBits;
46005 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
46006 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
46007 Vec = DAG.getBitcast(VT, Vec);
46009 for (unsigned i = 0; i != Scale; ++i)
46010 ShuffleMask.append(EltSizeInBits, i);
46011 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
46012 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
46013 (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
46014 // If we have register broadcast instructions, use the scalar size as the
46015 // element type for the shuffle. Then cast to the wider element type. The
46016 // widened bits won't be used, and this might allow the use of a broadcast
46018 assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
46019 unsigned Scale = EltSizeInBits / NumElts;
46021 EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
46022 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
46023 ShuffleMask.append(NumElts * Scale, 0);
46024 Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
46025 Vec = DAG.getBitcast(VT, Vec);
46027 // For smaller scalar integers, we can simply any-extend it to the vector
46028 // element size (we don't care about the upper bits) and broadcast it to all
46030 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
46031 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
46032 ShuffleMask.append(NumElts, 0);
46033 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
46036 // Now, mask the relevant bit in each element.
46037 SmallVector<SDValue, 32> Bits;
46038 for (unsigned i = 0; i != NumElts; ++i) {
46039 int BitIdx = (i % EltSizeInBits);
46040 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
46041 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
46043 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
46044 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
46046 // Compare against the bitmask and extend the result.
46047 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
46048 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
46049 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
46051 // For SEXT, this is now done, otherwise shift the result down for
46053 if (Opcode == ISD::SIGN_EXTEND)
46055 return DAG.getNode(ISD::SRL, DL, VT, Vec,
46056 DAG.getConstant(EltSizeInBits - 1, DL, VT));
46059 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
46061 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
46062 const X86Subtarget &Subtarget) {
46063 SDValue N0 = N->getOperand(0);
46064 EVT VT = N->getValueType(0);
46067 // Only do this combine with AVX512 for vector extends.
46068 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
46071 // Only combine legal element types.
46072 EVT SVT = VT.getVectorElementType();
46073 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
46074 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
46077 // We can only do this if the vector size in 256 bits or less.
46078 unsigned Size = VT.getSizeInBits();
46079 if (Size > 256 && Subtarget.useAVX512Regs())
46082 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
46083 // that's the only integer compares with we have.
46084 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
46085 if (ISD::isUnsignedIntSetCC(CC))
46088 // Only do this combine if the extension will be fully consumed by the setcc.
46089 EVT N00VT = N0.getOperand(0).getValueType();
46090 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
46091 if (Size != MatchingVecType.getSizeInBits())
46094 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
46096 if (N->getOpcode() == ISD::ZERO_EXTEND)
46097 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
46102 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
46103 TargetLowering::DAGCombinerInfo &DCI,
46104 const X86Subtarget &Subtarget) {
46105 SDValue N0 = N->getOperand(0);
46106 EVT VT = N->getValueType(0);
46107 EVT InVT = N0.getValueType();
46110 // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
46111 if (!DCI.isBeforeLegalizeOps() &&
46112 N0.getOpcode() == X86ISD::SETCC_CARRY) {
46113 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
46114 N0->getOperand(1));
46115 bool ReplaceOtherUses = !N0.hasOneUse();
46116 DCI.CombineTo(N, Setcc);
46117 // Replace other uses with a truncate of the widened setcc_carry.
46118 if (ReplaceOtherUses) {
46119 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
46120 N0.getValueType(), Setcc);
46121 DCI.CombineTo(N0.getNode(), Trunc);
46124 return SDValue(N, 0);
46127 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
46130 if (!DCI.isBeforeLegalizeOps())
46133 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
46136 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
46137 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
46138 // Invert and sign-extend a boolean is the same as zero-extend and subtract
46139 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
46140 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
46141 // sext (xor Bool, -1) --> sub (zext Bool), 1
46142 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
46143 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
46146 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
46150 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
46153 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
46159 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
46160 TargetLowering::DAGCombinerInfo &DCI,
46161 const X86Subtarget &Subtarget) {
46163 EVT VT = N->getValueType(0);
46164 bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
46166 // Let legalize expand this if it isn't a legal type yet.
46167 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46168 if (!TLI.isTypeLegal(VT))
46171 EVT ScalarVT = VT.getScalarType();
46172 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
46175 SDValue A = N->getOperand(IsStrict ? 1 : 0);
46176 SDValue B = N->getOperand(IsStrict ? 2 : 1);
46177 SDValue C = N->getOperand(IsStrict ? 3 : 2);
46179 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
46180 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
46181 bool LegalOperations = !DCI.isBeforeLegalizeOps();
46182 if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
46187 // Look through extract_vector_elts. If it comes from an FNEG, create a
46188 // new extract from the FNEG input.
46189 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
46190 isNullConstant(V.getOperand(1))) {
46191 SDValue Vec = V.getOperand(0);
46192 if (SDValue NegV = TLI.getCheaperNegatedExpression(
46193 Vec, DAG, LegalOperations, CodeSize)) {
46194 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
46195 NegV, V.getOperand(1));
46203 // Do not convert the passthru input of scalar intrinsics.
46204 // FIXME: We could allow negations of the lower element only.
46205 bool NegA = invertIfNegative(A);
46206 bool NegB = invertIfNegative(B);
46207 bool NegC = invertIfNegative(C);
46209 if (!NegA && !NegB && !NegC)
46212 unsigned NewOpcode =
46213 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
46216 assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
46217 return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
46218 {N->getOperand(0), A, B, C});
46220 if (N->getNumOperands() == 4)
46221 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
46222 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
46226 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
46227 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
46228 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
46229 TargetLowering::DAGCombinerInfo &DCI) {
46231 EVT VT = N->getValueType(0);
46232 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46233 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
46234 bool LegalOperations = !DCI.isBeforeLegalizeOps();
46236 SDValue N2 = N->getOperand(2);
46239 TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
46242 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
46244 if (N->getNumOperands() == 4)
46245 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
46246 NegN2, N->getOperand(3));
46247 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
46251 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
46252 TargetLowering::DAGCombinerInfo &DCI,
46253 const X86Subtarget &Subtarget) {
46255 SDValue N0 = N->getOperand(0);
46256 EVT VT = N->getValueType(0);
46258 // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
46259 // FIXME: Is this needed? We don't seem to have any tests for it.
46260 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
46261 N0.getOpcode() == X86ISD::SETCC_CARRY) {
46262 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
46263 N0->getOperand(1));
46264 bool ReplaceOtherUses = !N0.hasOneUse();
46265 DCI.CombineTo(N, Setcc);
46266 // Replace other uses with a truncate of the widened setcc_carry.
46267 if (ReplaceOtherUses) {
46268 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
46269 N0.getValueType(), Setcc);
46270 DCI.CombineTo(N0.getNode(), Trunc);
46273 return SDValue(N, 0);
46276 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
46279 if (DCI.isBeforeLegalizeOps())
46280 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
46283 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
46287 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
46290 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
46293 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
46296 // TODO: Combine with any target/faux shuffle.
46297 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
46298 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
46299 SDValue N00 = N0.getOperand(0);
46300 SDValue N01 = N0.getOperand(1);
46301 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
46302 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
46303 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
46304 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
46305 return concatSubVectors(N00, N01, DAG, dl);
46312 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
46313 /// recognizable memcmp expansion.
46314 static bool isOrXorXorTree(SDValue X, bool Root = true) {
46315 if (X.getOpcode() == ISD::OR)
46316 return isOrXorXorTree(X.getOperand(0), false) &&
46317 isOrXorXorTree(X.getOperand(1), false);
46320 return X.getOpcode() == ISD::XOR;
46323 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
46325 template<typename F>
46326 static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
46327 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
46328 SDValue Op0 = X.getOperand(0);
46329 SDValue Op1 = X.getOperand(1);
46330 if (X.getOpcode() == ISD::OR) {
46331 SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
46332 SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
46333 if (VecVT != CmpVT)
46334 return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
46336 return DAG.getNode(ISD::OR, DL, VecVT, A, B);
46337 return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
46338 } else if (X.getOpcode() == ISD::XOR) {
46339 SDValue A = SToV(Op0);
46340 SDValue B = SToV(Op1);
46341 if (VecVT != CmpVT)
46342 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
46344 return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
46345 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
46347 llvm_unreachable("Impossible");
46350 /// Try to map a 128-bit or larger integer comparison to vector instructions
46351 /// before type legalization splits it up into chunks.
46352 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
46353 const X86Subtarget &Subtarget) {
46354 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
46355 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
46357 // We're looking for an oversized integer equality comparison.
46358 SDValue X = SetCC->getOperand(0);
46359 SDValue Y = SetCC->getOperand(1);
46360 EVT OpVT = X.getValueType();
46361 unsigned OpSize = OpVT.getSizeInBits();
46362 if (!OpVT.isScalarInteger() || OpSize < 128)
46365 // Ignore a comparison with zero because that gets special treatment in
46366 // EmitTest(). But make an exception for the special case of a pair of
46367 // logically-combined vector-sized operands compared to zero. This pattern may
46368 // be generated by the memcmp expansion pass with oversized integer compares
46370 bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
46371 if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
46374 // Don't perform this combine if constructing the vector will be expensive.
46375 auto IsVectorBitCastCheap = [](SDValue X) {
46376 X = peekThroughBitcasts(X);
46377 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
46378 X.getOpcode() == ISD::LOAD;
46380 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
46381 !IsOrXorXorTreeCCZero)
46384 EVT VT = SetCC->getValueType(0);
46387 // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
46388 // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
46389 // Otherwise use PCMPEQ (plus AND) and mask testing.
46390 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
46391 (OpSize == 256 && Subtarget.hasAVX()) ||
46392 (OpSize == 512 && Subtarget.useAVX512Regs())) {
46393 bool HasPT = Subtarget.hasSSE41();
46395 // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
46396 // vector registers are essentially free. (Technically, widening registers
46397 // prevents load folding, but the tradeoff is worth it.)
46398 bool PreferKOT = Subtarget.preferMaskRegisters();
46399 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
46401 EVT VecVT = MVT::v16i8;
46402 EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
46403 if (OpSize == 256) {
46404 VecVT = MVT::v32i8;
46405 CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
46407 EVT CastVT = VecVT;
46408 bool NeedsAVX512FCast = false;
46409 if (OpSize == 512 || NeedZExt) {
46410 if (Subtarget.hasBWI()) {
46411 VecVT = MVT::v64i8;
46412 CmpVT = MVT::v64i1;
46416 VecVT = MVT::v16i32;
46417 CmpVT = MVT::v16i1;
46418 CastVT = OpSize == 512 ? VecVT :
46419 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
46420 NeedsAVX512FCast = true;
46424 auto ScalarToVector = [&](SDValue X) -> SDValue {
46425 bool TmpZext = false;
46426 EVT TmpCastVT = CastVT;
46427 if (X.getOpcode() == ISD::ZERO_EXTEND) {
46428 SDValue OrigX = X.getOperand(0);
46429 unsigned OrigSize = OrigX.getScalarValueSizeInBits();
46430 if (OrigSize < OpSize) {
46431 if (OrigSize == 128) {
46432 TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
46435 } else if (OrigSize == 256) {
46436 TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
46442 X = DAG.getBitcast(TmpCastVT, X);
46443 if (!NeedZExt && !TmpZext)
46445 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
46446 DAG.getConstant(0, DL, VecVT), X,
46447 DAG.getVectorIdxConstant(0, DL));
46451 if (IsOrXorXorTreeCCZero) {
46452 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
46453 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
46454 // Use 2 vector equality compares and 'and' the results before doing a
46456 Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
46458 SDValue VecX = ScalarToVector(X);
46459 SDValue VecY = ScalarToVector(Y);
46460 if (VecVT != CmpVT) {
46461 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
46462 } else if (HasPT) {
46463 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
46465 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
46468 // AVX512 should emit a setcc that will lower to kortest.
46469 if (VecVT != CmpVT) {
46470 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
46471 CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
46472 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
46473 DAG.getConstant(0, DL, KRegVT), CC);
46476 SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
46478 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
46479 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
46480 SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
46481 return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
46483 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
46484 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
46485 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
46486 assert(Cmp.getValueType() == MVT::v16i8 &&
46487 "Non 128-bit vector on pre-SSE41 target");
46488 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
46489 SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
46490 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
46496 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
46497 const X86Subtarget &Subtarget) {
46498 const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
46499 const SDValue LHS = N->getOperand(0);
46500 const SDValue RHS = N->getOperand(1);
46501 EVT VT = N->getValueType(0);
46502 EVT OpVT = LHS.getValueType();
46505 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
46506 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
46509 if (VT == MVT::i1 && isNullConstant(RHS)) {
46512 MatchVectorAllZeroTest(LHS, CC, DL, Subtarget, DAG, X86CC))
46513 return DAG.getNode(ISD::TRUNCATE, DL, VT,
46514 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, X86CC, V));
46518 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
46519 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
46520 // Using temporaries to avoid messing up operand ordering for later
46521 // transformations if this doesn't work.
46524 ISD::CondCode TmpCC = CC;
46525 // Put build_vector on the right.
46526 if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
46527 std::swap(Op0, Op1);
46528 TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
46532 (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
46533 (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
46534 bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
46536 if (IsSEXT0 && IsVZero1) {
46537 assert(VT == Op0.getOperand(0).getValueType() &&
46538 "Unexpected operand type");
46539 if (TmpCC == ISD::SETGT)
46540 return DAG.getConstant(0, DL, VT);
46541 if (TmpCC == ISD::SETLE)
46542 return DAG.getConstant(1, DL, VT);
46543 if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
46544 return DAG.getNOT(DL, Op0.getOperand(0), VT);
46546 assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
46547 "Unexpected condition code!");
46548 return Op0.getOperand(0);
46552 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
46553 // pre-promote its result type since vXi1 vectors don't get promoted
46554 // during type legalization.
46555 // NOTE: The element count check is to ignore operand types that need to
46556 // go through type promotion to a 128-bit vector.
46557 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
46558 VT.getVectorElementType() == MVT::i1 &&
46559 (OpVT.getVectorElementType() == MVT::i8 ||
46560 OpVT.getVectorElementType() == MVT::i16)) {
46561 SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
46562 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
46565 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
46566 // to avoid scalarization via legalization because v4i32 is not a legal type.
46567 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
46568 LHS.getValueType() == MVT::v4f32)
46569 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
46574 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
46575 TargetLowering::DAGCombinerInfo &DCI,
46576 const X86Subtarget &Subtarget) {
46577 SDValue Src = N->getOperand(0);
46578 MVT SrcVT = Src.getSimpleValueType();
46579 MVT VT = N->getSimpleValueType(0);
46580 unsigned NumBits = VT.getScalarSizeInBits();
46581 unsigned NumElts = SrcVT.getVectorNumElements();
46583 // Perform constant folding.
46584 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
46585 assert(VT == MVT::i32 && "Unexpected result type");
46587 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
46588 if (!Src.getOperand(Idx).isUndef() &&
46589 Src.getConstantOperandAPInt(Idx).isNegative())
46592 return DAG.getConstant(Imm, SDLoc(N), VT);
46595 // Look through int->fp bitcasts that don't change the element width.
46596 unsigned EltWidth = SrcVT.getScalarSizeInBits();
46597 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
46598 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
46599 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
46601 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
46602 // with scalar comparisons.
46603 if (SDValue NotSrc = IsNOT(Src, DAG)) {
46605 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
46606 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
46607 return DAG.getNode(ISD::XOR, DL, VT,
46608 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
46609 DAG.getConstant(NotMask, DL, VT));
46612 // Simplify the inputs.
46613 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46614 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
46615 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
46616 return SDValue(N, 0);
46621 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
46622 TargetLowering::DAGCombinerInfo &DCI) {
46623 // With vector masks we only demand the upper bit of the mask.
46624 SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
46625 if (Mask.getScalarValueSizeInBits() != 1) {
46626 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46627 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
46628 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
46629 if (N->getOpcode() != ISD::DELETED_NODE)
46630 DCI.AddToWorklist(N);
46631 return SDValue(N, 0);
46638 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
46639 SDValue Index, SDValue Base, SDValue Scale,
46640 SelectionDAG &DAG) {
46643 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
46644 SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
46645 Gather->getMask(), Base, Index, Scale } ;
46646 return DAG.getMaskedGather(Gather->getVTList(),
46647 Gather->getMemoryVT(), DL, Ops,
46648 Gather->getMemOperand(),
46649 Gather->getIndexType());
46651 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
46652 SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
46653 Scatter->getMask(), Base, Index, Scale };
46654 return DAG.getMaskedScatter(Scatter->getVTList(),
46655 Scatter->getMemoryVT(), DL,
46656 Ops, Scatter->getMemOperand(),
46657 Scatter->getIndexType());
46660 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
46661 TargetLowering::DAGCombinerInfo &DCI) {
46663 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
46664 SDValue Index = GorS->getIndex();
46665 SDValue Base = GorS->getBasePtr();
46666 SDValue Scale = GorS->getScale();
46668 if (DCI.isBeforeLegalize()) {
46669 unsigned IndexWidth = Index.getScalarValueSizeInBits();
46671 // Shrink constant indices if they are larger than 32-bits.
46672 // Only do this before legalize types since v2i64 could become v2i32.
46673 // FIXME: We could check that the type is legal if we're after legalize
46674 // types, but then we would need to construct test cases where that happens.
46675 // FIXME: We could support more than just constant vectors, but we need to
46676 // careful with costing. A truncate that can be optimized out would be fine.
46677 // Otherwise we might only want to create a truncate if it avoids a split.
46678 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
46679 if (BV->isConstant() && IndexWidth > 32 &&
46680 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
46681 unsigned NumElts = Index.getValueType().getVectorNumElements();
46682 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
46683 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
46684 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
46688 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
46689 // there are sufficient sign bits. Only do this before legalize types to
46690 // avoid creating illegal types in truncate.
46691 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
46692 Index.getOpcode() == ISD::ZERO_EXTEND) &&
46694 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
46695 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
46696 unsigned NumElts = Index.getValueType().getVectorNumElements();
46697 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
46698 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
46699 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
46703 if (DCI.isBeforeLegalizeOps()) {
46704 unsigned IndexWidth = Index.getScalarValueSizeInBits();
46706 // Make sure the index is either i32 or i64
46707 if (IndexWidth != 32 && IndexWidth != 64) {
46708 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
46709 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
46710 Index.getValueType().getVectorNumElements());
46711 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
46712 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
46716 // With vector masks we only demand the upper bit of the mask.
46717 SDValue Mask = GorS->getMask();
46718 if (Mask.getScalarValueSizeInBits() != 1) {
46719 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46720 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
46721 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
46722 if (N->getOpcode() != ISD::DELETED_NODE)
46723 DCI.AddToWorklist(N);
46724 return SDValue(N, 0);
46731 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
46732 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
46733 const X86Subtarget &Subtarget) {
46735 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
46736 SDValue EFLAGS = N->getOperand(1);
46738 // Try to simplify the EFLAGS and condition code operands.
46739 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
46740 return getSETCC(CC, Flags, DL, DAG);
46745 /// Optimize branch condition evaluation.
46746 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
46747 const X86Subtarget &Subtarget) {
46749 SDValue EFLAGS = N->getOperand(3);
46750 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
46752 // Try to simplify the EFLAGS and condition code operands.
46753 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
46754 // RAUW them under us.
46755 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
46756 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
46757 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
46758 N->getOperand(1), Cond, Flags);
46764 // TODO: Could we move this to DAGCombine?
46765 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
46766 SelectionDAG &DAG) {
46767 // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
46768 // to optimize away operation when it's from a constant.
46770 // The general transformation is:
46771 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
46772 // AND(VECTOR_CMP(x,y), constant2)
46773 // constant2 = UNARYOP(constant)
46775 // Early exit if this isn't a vector operation, the operand of the
46776 // unary operation isn't a bitwise AND, or if the sizes of the operations
46777 // aren't the same.
46778 EVT VT = N->getValueType(0);
46779 bool IsStrict = N->isStrictFPOpcode();
46780 unsigned NumEltBits = VT.getScalarSizeInBits();
46781 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
46782 if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
46783 DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
46784 VT.getSizeInBits() != Op0.getValueSizeInBits())
46787 // Now check that the other operand of the AND is a constant. We could
46788 // make the transformation for non-constant splats as well, but it's unclear
46789 // that would be a benefit as it would not eliminate any operations, just
46790 // perform one more step in scalar code before moving to the vector unit.
46791 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
46792 // Bail out if the vector isn't a constant.
46793 if (!BV->isConstant())
46796 // Everything checks out. Build up the new and improved node.
46798 EVT IntVT = BV->getValueType(0);
46799 // Create a new constant of the appropriate type for the transformed
46801 SDValue SourceConst;
46803 SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
46804 {N->getOperand(0), SDValue(BV, 0)});
46806 SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
46807 // The AND node needs bitcasts to/from an integer vector type around it.
46808 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
46809 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
46811 SDValue Res = DAG.getBitcast(VT, NewAnd);
46813 return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
46820 /// If we are converting a value to floating-point, try to replace scalar
46821 /// truncate of an extracted vector element with a bitcast. This tries to keep
46822 /// the sequence on XMM registers rather than moving between vector and GPRs.
46823 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
46824 // TODO: This is currently only used by combineSIntToFP, but it is generalized
46825 // to allow being called by any similar cast opcode.
46826 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
46827 SDValue Trunc = N->getOperand(0);
46828 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
46831 SDValue ExtElt = Trunc.getOperand(0);
46832 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
46833 !isNullConstant(ExtElt.getOperand(1)))
46836 EVT TruncVT = Trunc.getValueType();
46837 EVT SrcVT = ExtElt.getValueType();
46838 unsigned DestWidth = TruncVT.getSizeInBits();
46839 unsigned SrcWidth = SrcVT.getSizeInBits();
46840 if (SrcWidth % DestWidth != 0)
46843 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
46844 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
46845 unsigned VecWidth = SrcVecVT.getSizeInBits();
46846 unsigned NumElts = VecWidth / DestWidth;
46847 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
46848 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
46850 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
46851 BitcastVec, ExtElt.getOperand(1));
46852 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
46855 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
46856 const X86Subtarget &Subtarget) {
46857 bool IsStrict = N->isStrictFPOpcode();
46858 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
46859 EVT VT = N->getValueType(0);
46860 EVT InVT = Op0.getValueType();
46862 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
46863 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
46864 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
46865 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
46867 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
46868 InVT.getVectorNumElements());
46869 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
46871 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
46873 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
46874 {N->getOperand(0), P});
46875 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
46878 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
46879 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
46880 // the optimization here.
46881 if (DAG.SignBitIsZero(Op0)) {
46883 return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
46884 {N->getOperand(0), Op0});
46885 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
46891 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
46892 TargetLowering::DAGCombinerInfo &DCI,
46893 const X86Subtarget &Subtarget) {
46894 // First try to optimize away the conversion entirely when it's
46895 // conditionally from a constant. Vectors only.
46896 bool IsStrict = N->isStrictFPOpcode();
46897 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
46900 // Now move on to more general possibilities.
46901 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
46902 EVT VT = N->getValueType(0);
46903 EVT InVT = Op0.getValueType();
46905 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
46906 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
46907 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
46908 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
46910 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
46911 InVT.getVectorNumElements());
46912 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
46914 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
46915 {N->getOperand(0), P});
46916 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
46919 // Without AVX512DQ we only support i64 to float scalar conversion. For both
46920 // vectors and scalars, see if we know that the upper bits are all the sign
46921 // bit, in which case we can truncate the input to i32 and convert from that.
46922 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
46923 unsigned BitWidth = InVT.getScalarSizeInBits();
46924 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
46925 if (NumSignBits >= (BitWidth - 31)) {
46926 EVT TruncVT = MVT::i32;
46927 if (InVT.isVector())
46928 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
46929 InVT.getVectorNumElements());
46931 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
46932 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
46934 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
46935 {N->getOperand(0), Trunc});
46936 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
46938 // If we're after legalize and the type is v2i32 we need to shuffle and
46940 assert(InVT == MVT::v2i64 && "Unexpected VT!");
46941 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
46942 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
46945 return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
46946 {N->getOperand(0), Shuf});
46947 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
46951 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
46952 // a 32-bit target where SSE doesn't support i64->FP operations.
46953 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
46954 Op0.getOpcode() == ISD::LOAD) {
46955 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
46957 // This transformation is not supported if the result type is f16 or f128.
46958 if (VT == MVT::f16 || VT == MVT::f128)
46961 // If we have AVX512DQ we can use packed conversion instructions unless
46963 if (Subtarget.hasDQI() && VT != MVT::f80)
46966 if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
46967 Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
46968 std::pair<SDValue, SDValue> Tmp =
46969 Subtarget.getTargetLowering()->BuildFILD(
46970 VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
46971 Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
46972 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
46980 if (SDValue V = combineToFPTruncExtElt(N, DAG))
46986 static bool needCarryOrOverflowFlag(SDValue Flags) {
46987 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
46989 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
46991 SDNode *User = *UI;
46994 switch (User->getOpcode()) {
46996 // Be conservative.
46998 case X86ISD::SETCC:
46999 case X86ISD::SETCC_CARRY:
47000 CC = (X86::CondCode)User->getConstantOperandVal(0);
47002 case X86ISD::BRCOND:
47003 CC = (X86::CondCode)User->getConstantOperandVal(2);
47006 CC = (X86::CondCode)User->getConstantOperandVal(2);
47012 case X86::COND_A: case X86::COND_AE:
47013 case X86::COND_B: case X86::COND_BE:
47014 case X86::COND_O: case X86::COND_NO:
47015 case X86::COND_G: case X86::COND_GE:
47016 case X86::COND_L: case X86::COND_LE:
47024 static bool onlyZeroFlagUsed(SDValue Flags) {
47025 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
47027 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
47029 SDNode *User = *UI;
47032 switch (User->getOpcode()) {
47034 // Be conservative.
47036 case X86ISD::SETCC: CCOpNo = 0; break;
47037 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
47038 case X86ISD::BRCOND: CCOpNo = 2; break;
47039 case X86ISD::CMOV: CCOpNo = 2; break;
47042 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
47043 if (CC != X86::COND_E && CC != X86::COND_NE)
47050 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
47051 // Only handle test patterns.
47052 if (!isNullConstant(N->getOperand(1)))
47055 // If we have a CMP of a truncated binop, see if we can make a smaller binop
47056 // and use its flags directly.
47057 // TODO: Maybe we should try promoting compares that only use the zero flag
47058 // first if we can prove the upper bits with computeKnownBits?
47060 SDValue Op = N->getOperand(0);
47061 EVT VT = Op.getValueType();
47063 // If we have a constant logical shift that's only used in a comparison
47064 // against zero turn it into an equivalent AND. This allows turning it into
47065 // a TEST instruction later.
47066 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
47067 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
47068 onlyZeroFlagUsed(SDValue(N, 0))) {
47069 unsigned BitWidth = VT.getSizeInBits();
47070 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
47071 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
47072 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
47073 APInt Mask = Op.getOpcode() == ISD::SRL
47074 ? APInt::getHighBitsSet(BitWidth, MaskBits)
47075 : APInt::getLowBitsSet(BitWidth, MaskBits);
47076 if (Mask.isSignedIntN(32)) {
47077 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
47078 DAG.getConstant(Mask, dl, VT));
47079 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
47080 DAG.getConstant(0, dl, VT));
47085 // Look for a truncate with a single use.
47086 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
47089 Op = Op.getOperand(0);
47091 // Arithmetic op can only have one use.
47092 if (!Op.hasOneUse())
47096 switch (Op.getOpcode()) {
47097 default: return SDValue();
47099 // Skip and with constant. We have special handling for and with immediate
47100 // during isel to generate test instructions.
47101 if (isa<ConstantSDNode>(Op.getOperand(1)))
47103 NewOpc = X86ISD::AND;
47105 case ISD::OR: NewOpc = X86ISD::OR; break;
47106 case ISD::XOR: NewOpc = X86ISD::XOR; break;
47108 // If the carry or overflow flag is used, we can't truncate.
47109 if (needCarryOrOverflowFlag(SDValue(N, 0)))
47111 NewOpc = X86ISD::ADD;
47114 // If the carry or overflow flag is used, we can't truncate.
47115 if (needCarryOrOverflowFlag(SDValue(N, 0)))
47117 NewOpc = X86ISD::SUB;
47121 // We found an op we can narrow. Truncate its inputs.
47122 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
47123 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
47125 // Use a X86 specific opcode to avoid DAG combine messing with it.
47126 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47127 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
47129 // For AND, keep a CMP so that we can match the test pattern.
47130 if (NewOpc == X86ISD::AND)
47131 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
47132 DAG.getConstant(0, dl, VT));
47134 // Return the flags.
47135 return Op.getValue(1);
47138 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
47139 TargetLowering::DAGCombinerInfo &DCI) {
47140 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
47141 "Expected X86ISD::ADD or X86ISD::SUB");
47144 SDValue LHS = N->getOperand(0);
47145 SDValue RHS = N->getOperand(1);
47146 MVT VT = LHS.getSimpleValueType();
47147 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
47149 // If we don't use the flag result, simplify back to a generic ADD/SUB.
47150 if (!N->hasAnyUseOfValue(1)) {
47151 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
47152 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
47155 // Fold any similar generic ADD/SUB opcodes to reuse this node.
47156 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
47157 SDValue Ops[] = {N0, N1};
47158 SDVTList VTs = DAG.getVTList(N->getValueType(0));
47159 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
47162 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
47163 DCI.CombineTo(GenericAddSub, Op);
47166 MatchGeneric(LHS, RHS, false);
47167 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
47172 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
47173 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
47174 MVT VT = N->getSimpleValueType(0);
47175 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47176 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
47177 N->getOperand(0), N->getOperand(1),
47181 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
47182 // iff the flag result is dead.
47183 SDValue Op0 = N->getOperand(0);
47184 SDValue Op1 = N->getOperand(1);
47185 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
47186 !N->hasAnyUseOfValue(1))
47187 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
47188 Op0.getOperand(1), N->getOperand(2));
47193 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
47194 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
47195 TargetLowering::DAGCombinerInfo &DCI) {
47196 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
47197 // the result is either zero or one (depending on the input carry bit).
47198 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
47199 if (X86::isZeroNode(N->getOperand(0)) &&
47200 X86::isZeroNode(N->getOperand(1)) &&
47201 // We don't have a good way to replace an EFLAGS use, so only do this when
47203 SDValue(N, 1).use_empty()) {
47205 EVT VT = N->getValueType(0);
47206 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
47208 DAG.getNode(ISD::AND, DL, VT,
47209 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47210 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47212 DAG.getConstant(1, DL, VT));
47213 return DCI.CombineTo(N, Res1, CarryOut);
47216 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
47217 MVT VT = N->getSimpleValueType(0);
47218 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47219 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
47220 N->getOperand(0), N->getOperand(1),
47227 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
47228 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
47229 /// with CMP+{ADC, SBB}.
47230 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
47231 bool IsSub = N->getOpcode() == ISD::SUB;
47232 SDValue X = N->getOperand(0);
47233 SDValue Y = N->getOperand(1);
47235 // If this is an add, canonicalize a zext operand to the RHS.
47236 // TODO: Incomplete? What if both sides are zexts?
47237 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
47238 Y.getOpcode() != ISD::ZERO_EXTEND)
47241 // Look through a one-use zext.
47242 bool PeekedThroughZext = false;
47243 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
47244 Y = Y.getOperand(0);
47245 PeekedThroughZext = true;
47248 // If this is an add, canonicalize a setcc operand to the RHS.
47249 // TODO: Incomplete? What if both sides are setcc?
47250 // TODO: Should we allow peeking through a zext of the other operand?
47251 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
47252 Y.getOpcode() != X86ISD::SETCC)
47255 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
47259 EVT VT = N->getValueType(0);
47260 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
47262 // If X is -1 or 0, then we have an opportunity to avoid constants required in
47263 // the general case below.
47264 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
47266 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
47267 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
47268 // This is a complicated way to get -1 or 0 from the carry flag:
47269 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
47270 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
47271 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47272 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47276 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
47277 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
47278 SDValue EFLAGS = Y->getOperand(1);
47279 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
47280 EFLAGS.getValueType().isInteger() &&
47281 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
47282 // Swap the operands of a SUB, and we have the same pattern as above.
47283 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
47284 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
47285 SDValue NewSub = DAG.getNode(
47286 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
47287 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
47288 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
47289 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47290 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47296 if (CC == X86::COND_B) {
47297 // X + SETB Z --> adc X, 0
47298 // X - SETB Z --> sbb X, 0
47299 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
47300 DAG.getVTList(VT, MVT::i32), X,
47301 DAG.getConstant(0, DL, VT), Y.getOperand(1));
47304 if (CC == X86::COND_A) {
47305 SDValue EFLAGS = Y.getOperand(1);
47306 // Try to convert COND_A into COND_B in an attempt to facilitate
47307 // materializing "setb reg".
47309 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
47310 // cannot take an immediate as its first operand.
47312 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
47313 EFLAGS.getValueType().isInteger() &&
47314 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
47315 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
47316 EFLAGS.getNode()->getVTList(),
47317 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
47318 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
47319 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
47320 DAG.getVTList(VT, MVT::i32), X,
47321 DAG.getConstant(0, DL, VT), NewEFLAGS);
47325 if (CC == X86::COND_AE) {
47326 // X + SETAE --> sbb X, -1
47327 // X - SETAE --> adc X, -1
47328 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
47329 DAG.getVTList(VT, MVT::i32), X,
47330 DAG.getConstant(-1, DL, VT), Y.getOperand(1));
47333 if (CC == X86::COND_BE) {
47334 // X + SETBE --> sbb X, -1
47335 // X - SETBE --> adc X, -1
47336 SDValue EFLAGS = Y.getOperand(1);
47337 // Try to convert COND_BE into COND_AE in an attempt to facilitate
47338 // materializing "setae reg".
47340 // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
47341 // cannot take an immediate as its first operand.
47343 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
47344 EFLAGS.getValueType().isInteger() &&
47345 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
47346 SDValue NewSub = DAG.getNode(
47347 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
47348 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
47349 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
47350 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
47351 DAG.getVTList(VT, MVT::i32), X,
47352 DAG.getConstant(-1, DL, VT), NewEFLAGS);
47356 if (CC != X86::COND_E && CC != X86::COND_NE)
47359 SDValue Cmp = Y.getOperand(1);
47360 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
47361 !X86::isZeroNode(Cmp.getOperand(1)) ||
47362 !Cmp.getOperand(0).getValueType().isInteger())
47365 SDValue Z = Cmp.getOperand(0);
47366 EVT ZVT = Z.getValueType();
47368 // If X is -1 or 0, then we have an opportunity to avoid constants required in
47369 // the general case below.
47371 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
47373 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
47374 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
47375 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
47376 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
47377 SDValue Zero = DAG.getConstant(0, DL, ZVT);
47378 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
47379 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
47380 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47381 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47382 SDValue(Neg.getNode(), 1));
47385 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
47386 // with fake operands:
47387 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
47388 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
47389 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
47390 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
47391 SDValue One = DAG.getConstant(1, DL, ZVT);
47392 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
47393 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
47394 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47395 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47400 // (cmp Z, 1) sets the carry flag if Z is 0.
47401 SDValue One = DAG.getConstant(1, DL, ZVT);
47402 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
47403 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
47405 // Add the flags type for ADC/SBB nodes.
47406 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47408 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
47409 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
47410 if (CC == X86::COND_NE)
47411 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
47412 DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
47414 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
47415 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
47416 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
47417 DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
47420 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
47421 const SDLoc &DL, EVT VT,
47422 const X86Subtarget &Subtarget) {
47423 // Example of pattern we try to detect:
47424 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
47425 //(add (build_vector (extract_elt t, 0),
47426 // (extract_elt t, 2),
47427 // (extract_elt t, 4),
47428 // (extract_elt t, 6)),
47429 // (build_vector (extract_elt t, 1),
47430 // (extract_elt t, 3),
47431 // (extract_elt t, 5),
47432 // (extract_elt t, 7)))
47434 if (!Subtarget.hasSSE2())
47437 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
47438 Op1.getOpcode() != ISD::BUILD_VECTOR)
47441 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
47442 VT.getVectorNumElements() < 4 ||
47443 !isPowerOf2_32(VT.getVectorNumElements()))
47446 // Check if one of Op0,Op1 is of the form:
47447 // (build_vector (extract_elt Mul, 0),
47448 // (extract_elt Mul, 2),
47449 // (extract_elt Mul, 4),
47451 // the other is of the form:
47452 // (build_vector (extract_elt Mul, 1),
47453 // (extract_elt Mul, 3),
47454 // (extract_elt Mul, 5),
47456 // and identify Mul.
47458 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
47459 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
47460 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
47461 // TODO: Be more tolerant to undefs.
47462 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47463 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47464 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47465 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
47467 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
47468 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
47469 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
47470 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
47471 if (!Const0L || !Const1L || !Const0H || !Const1H)
47473 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
47474 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
47475 // Commutativity of mul allows factors of a product to reorder.
47477 std::swap(Idx0L, Idx1L);
47479 std::swap(Idx0H, Idx1H);
47480 // Commutativity of add allows pairs of factors to reorder.
47481 if (Idx0L > Idx0H) {
47482 std::swap(Idx0L, Idx0H);
47483 std::swap(Idx1L, Idx1H);
47485 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
47486 Idx1H != 2 * i + 3)
47489 // First time an extract_elt's source vector is visited. Must be a MUL
47490 // with 2X number of vector elements than the BUILD_VECTOR.
47491 // Both extracts must be from same MUL.
47492 Mul = Op0L->getOperand(0);
47493 if (Mul->getOpcode() != ISD::MUL ||
47494 Mul.getValueType().getVectorNumElements() != 2 * e)
47497 // Check that the extract is from the same MUL previously seen.
47498 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
47499 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
47503 // Check if the Mul source can be safely shrunk.
47505 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
47506 Mode == ShrinkMode::MULU16)
47509 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
47510 VT.getVectorNumElements() * 2);
47511 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
47512 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
47514 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47515 ArrayRef<SDValue> Ops) {
47516 EVT InVT = Ops[0].getValueType();
47517 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
47518 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
47519 InVT.getVectorNumElements() / 2);
47520 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
47522 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
47525 // Attempt to turn this pattern into PMADDWD.
47526 // (add (mul (sext (build_vector)), (sext (build_vector))),
47527 // (mul (sext (build_vector)), (sext (build_vector)))
47528 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
47529 const SDLoc &DL, EVT VT,
47530 const X86Subtarget &Subtarget) {
47531 if (!Subtarget.hasSSE2())
47534 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
47537 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
47538 VT.getVectorNumElements() < 4 ||
47539 !isPowerOf2_32(VT.getVectorNumElements()))
47542 SDValue N00 = N0.getOperand(0);
47543 SDValue N01 = N0.getOperand(1);
47544 SDValue N10 = N1.getOperand(0);
47545 SDValue N11 = N1.getOperand(1);
47547 // All inputs need to be sign extends.
47548 // TODO: Support ZERO_EXTEND from known positive?
47549 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
47550 N01.getOpcode() != ISD::SIGN_EXTEND ||
47551 N10.getOpcode() != ISD::SIGN_EXTEND ||
47552 N11.getOpcode() != ISD::SIGN_EXTEND)
47555 // Peek through the extends.
47556 N00 = N00.getOperand(0);
47557 N01 = N01.getOperand(0);
47558 N10 = N10.getOperand(0);
47559 N11 = N11.getOperand(0);
47561 // Must be extending from vXi16.
47562 EVT InVT = N00.getValueType();
47563 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
47564 N10.getValueType() != InVT || N11.getValueType() != InVT)
47567 // All inputs should be build_vectors.
47568 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
47569 N01.getOpcode() != ISD::BUILD_VECTOR ||
47570 N10.getOpcode() != ISD::BUILD_VECTOR ||
47571 N11.getOpcode() != ISD::BUILD_VECTOR)
47574 // For each element, we need to ensure we have an odd element from one vector
47575 // multiplied by the odd element of another vector and the even element from
47576 // one of the same vectors being multiplied by the even element from the
47577 // other vector. So we need to make sure for each element i, this operator
47578 // is being performed:
47579 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
47581 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
47582 SDValue N00Elt = N00.getOperand(i);
47583 SDValue N01Elt = N01.getOperand(i);
47584 SDValue N10Elt = N10.getOperand(i);
47585 SDValue N11Elt = N11.getOperand(i);
47586 // TODO: Be more tolerant to undefs.
47587 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47588 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47589 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47590 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
47592 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
47593 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
47594 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
47595 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
47596 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
47598 unsigned IdxN00 = ConstN00Elt->getZExtValue();
47599 unsigned IdxN01 = ConstN01Elt->getZExtValue();
47600 unsigned IdxN10 = ConstN10Elt->getZExtValue();
47601 unsigned IdxN11 = ConstN11Elt->getZExtValue();
47602 // Add is commutative so indices can be reordered.
47603 if (IdxN00 > IdxN10) {
47604 std::swap(IdxN00, IdxN10);
47605 std::swap(IdxN01, IdxN11);
47607 // N0 indices be the even element. N1 indices must be the next odd element.
47608 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
47609 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
47611 SDValue N00In = N00Elt.getOperand(0);
47612 SDValue N01In = N01Elt.getOperand(0);
47613 SDValue N10In = N10Elt.getOperand(0);
47614 SDValue N11In = N11Elt.getOperand(0);
47615 // First time we find an input capture it.
47620 // Mul is commutative so the input vectors can be in any order.
47621 // Canonicalize to make the compares easier.
47623 std::swap(N00In, N01In);
47625 std::swap(N10In, N11In);
47626 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
47630 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47631 ArrayRef<SDValue> Ops) {
47632 // Shrink by adding truncate nodes and let DAGCombine fold with the
47634 EVT OpVT = Ops[0].getValueType();
47635 assert(OpVT.getScalarType() == MVT::i16 &&
47636 "Unexpected scalar element type");
47637 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
47638 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
47639 OpVT.getVectorNumElements() / 2);
47640 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
47642 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
47646 static SDValue combineAddOrSubToHADDorHSUB(SDNode *N, SelectionDAG &DAG,
47647 const X86Subtarget &Subtarget) {
47648 EVT VT = N->getValueType(0);
47649 SDValue Op0 = N->getOperand(0);
47650 SDValue Op1 = N->getOperand(1);
47651 bool IsAdd = N->getOpcode() == ISD::ADD;
47652 assert((IsAdd || N->getOpcode() == ISD::SUB) && "Wrong opcode");
47654 SmallVector<int, 8> PostShuffleMask;
47655 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
47656 VT == MVT::v8i32) &&
47657 Subtarget.hasSSSE3() &&
47658 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, IsAdd, PostShuffleMask)) {
47659 auto HOpBuilder = [IsAdd](SelectionDAG &DAG, const SDLoc &DL,
47660 ArrayRef<SDValue> Ops) {
47661 return DAG.getNode(IsAdd ? X86ISD::HADD : X86ISD::HSUB, DL,
47662 Ops[0].getValueType(), Ops);
47664 SDValue HorizBinOp =
47665 SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1}, HOpBuilder);
47666 if (!PostShuffleMask.empty())
47667 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
47668 DAG.getUNDEF(VT), PostShuffleMask);
47675 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
47676 TargetLowering::DAGCombinerInfo &DCI,
47677 const X86Subtarget &Subtarget) {
47678 EVT VT = N->getValueType(0);
47679 SDValue Op0 = N->getOperand(0);
47680 SDValue Op1 = N->getOperand(1);
47682 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
47684 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
47687 // Try to synthesize horizontal adds from adds of shuffles.
47688 if (SDValue V = combineAddOrSubToHADDorHSUB(N, DAG, Subtarget))
47691 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
47692 // (sub Y, (sext (vXi1 X))).
47693 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
47694 // generic DAG combine without a legal type check, but adding this there
47695 // caused regressions.
47696 if (VT.isVector()) {
47697 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47698 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
47699 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
47700 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
47702 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
47703 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
47706 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
47707 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
47708 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
47710 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
47711 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
47715 return combineAddOrSubToADCOrSBB(N, DAG);
47718 static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
47719 const X86Subtarget &Subtarget) {
47720 SDValue Op0 = N->getOperand(0);
47721 SDValue Op1 = N->getOperand(1);
47722 EVT VT = N->getValueType(0);
47724 if (!VT.isVector())
47727 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
47728 // is only worth it with SSSE3 (PSHUFB).
47729 EVT EltVT = VT.getVectorElementType();
47730 if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
47731 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
47732 !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
47735 SDValue SubusLHS, SubusRHS;
47736 // Try to find umax(a,b) - b or a - umin(a,b) patterns
47737 // they may be converted to subus(a,b).
47738 // TODO: Need to add IR canonicalization for this code.
47739 if (Op0.getOpcode() == ISD::UMAX) {
47741 SDValue MaxLHS = Op0.getOperand(0);
47742 SDValue MaxRHS = Op0.getOperand(1);
47745 else if (MaxRHS == Op1)
47749 } else if (Op1.getOpcode() == ISD::UMIN) {
47751 SDValue MinLHS = Op1.getOperand(0);
47752 SDValue MinRHS = Op1.getOperand(1);
47755 else if (MinRHS == Op0)
47759 } else if (Op1.getOpcode() == ISD::TRUNCATE &&
47760 Op1.getOperand(0).getOpcode() == ISD::UMIN &&
47761 (EltVT == MVT::i8 || EltVT == MVT::i16)) {
47762 // Special case where the UMIN has been truncated. Try to push the truncate
47763 // further up. This is similar to the i32/i64 special processing.
47765 SDValue MinLHS = Op1.getOperand(0).getOperand(0);
47766 SDValue MinRHS = Op1.getOperand(0).getOperand(1);
47767 EVT TruncVT = Op1.getOperand(0).getValueType();
47768 if (!(Subtarget.hasSSSE3() && (TruncVT == MVT::v8i32 ||
47769 TruncVT == MVT::v8i64)) &&
47770 !(Subtarget.useBWIRegs() && (TruncVT == MVT::v16i32)))
47772 SDValue OpToSaturate;
47773 if (MinLHS.getOpcode() == ISD::ZERO_EXTEND &&
47774 MinLHS.getOperand(0) == Op0)
47775 OpToSaturate = MinRHS;
47776 else if (MinRHS.getOpcode() == ISD::ZERO_EXTEND &&
47777 MinRHS.getOperand(0) == Op0)
47778 OpToSaturate = MinLHS;
47782 // Saturate the non-extended input and then truncate it.
47784 SDValue SaturationConst =
47785 DAG.getConstant(APInt::getLowBitsSet(TruncVT.getScalarSizeInBits(),
47786 VT.getScalarSizeInBits()),
47788 SDValue UMin = DAG.getNode(ISD::UMIN, DL, TruncVT, OpToSaturate,
47790 SubusRHS = DAG.getNode(ISD::TRUNCATE, DL, VT, UMin);
47794 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
47795 // special preprocessing in some cases.
47796 if (EltVT == MVT::i8 || EltVT == MVT::i16)
47797 return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
47799 assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
47802 // Special preprocessing case can be only applied
47803 // if the value was zero extended from 16 bit,
47804 // so we require first 16 bits to be zeros for 32 bit
47805 // values, or first 48 bits for 64 bit values.
47806 KnownBits Known = DAG.computeKnownBits(SubusLHS);
47807 unsigned NumZeros = Known.countMinLeadingZeros();
47808 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
47811 EVT ExtType = SubusLHS.getValueType();
47813 if (VT == MVT::v8i32 || VT == MVT::v8i64)
47814 ShrinkedType = MVT::v8i16;
47816 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
47818 // If SubusLHS is zeroextended - truncate SubusRHS to it's
47819 // size SubusRHS = umin(0xFFF.., SubusRHS).
47820 SDValue SaturationConst =
47821 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
47822 ShrinkedType.getScalarSizeInBits()),
47823 SDLoc(SubusLHS), ExtType);
47824 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
47826 SDValue NewSubusLHS =
47827 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
47828 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
47829 SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
47830 NewSubusLHS, NewSubusRHS);
47832 // Zero extend the result, it may be used somewhere as 32 bit,
47833 // if not zext and following trunc will shrink.
47834 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
47837 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
47838 TargetLowering::DAGCombinerInfo &DCI,
47839 const X86Subtarget &Subtarget) {
47840 SDValue Op0 = N->getOperand(0);
47841 SDValue Op1 = N->getOperand(1);
47843 // X86 can't encode an immediate LHS of a sub. See if we can push the
47844 // negation into a preceding instruction.
47845 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
47846 // If the RHS of the sub is a XOR with one use and a constant, invert the
47847 // immediate. Then add one to the LHS of the sub so we can turn
47848 // X-Y -> X+~Y+1, saving one register.
47849 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
47850 isa<ConstantSDNode>(Op1.getOperand(1))) {
47851 const APInt &XorC = Op1.getConstantOperandAPInt(1);
47852 EVT VT = Op0.getValueType();
47853 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
47855 DAG.getConstant(~XorC, SDLoc(Op1), VT));
47856 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
47857 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
47861 // Try to synthesize horizontal subs from subs of shuffles.
47862 if (SDValue V = combineAddOrSubToHADDorHSUB(N, DAG, Subtarget))
47865 // Try to create PSUBUS if SUB's argument is max/min
47866 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
47869 return combineAddOrSubToADCOrSBB(N, DAG);
47872 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
47873 const X86Subtarget &Subtarget) {
47874 MVT VT = N->getSimpleValueType(0);
47877 if (N->getOperand(0) == N->getOperand(1)) {
47878 if (N->getOpcode() == X86ISD::PCMPEQ)
47879 return DAG.getConstant(-1, DL, VT);
47880 if (N->getOpcode() == X86ISD::PCMPGT)
47881 return DAG.getConstant(0, DL, VT);
47887 /// Helper that combines an array of subvector ops as if they were the operands
47888 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
47889 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
47890 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
47891 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
47892 TargetLowering::DAGCombinerInfo &DCI,
47893 const X86Subtarget &Subtarget) {
47894 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
47895 unsigned EltSizeInBits = VT.getScalarSizeInBits();
47897 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
47898 return DAG.getUNDEF(VT);
47900 if (llvm::all_of(Ops, [](SDValue Op) {
47901 return ISD::isBuildVectorAllZeros(Op.getNode());
47903 return getZeroVector(VT, Subtarget, DAG, DL);
47905 SDValue Op0 = Ops[0];
47906 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
47908 // Fold subvector loads into one.
47909 // If needed, look through bitcasts to get to the load.
47910 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
47912 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
47913 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
47914 *FirstLd->getMemOperand(), &Fast) &&
47917 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
47922 // Repeated subvectors.
47924 // If this broadcast/subv_broadcast is inserted into both halves, use a
47925 // larger broadcast/subv_broadcast.
47926 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
47927 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
47928 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
47930 // If this broadcast_load is inserted into both halves, use a larger
47931 // broadcast_load. Update other uses to use an extracted subvector.
47932 if (Op0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
47933 auto *MemIntr = cast<MemIntrinsicSDNode>(Op0);
47934 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
47935 SDValue Ops[] = {MemIntr->getChain(), MemIntr->getBasePtr()};
47936 SDValue BcastLd = DAG.getMemIntrinsicNode(
47937 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
47938 MemIntr->getMemOperand());
47939 DAG.ReplaceAllUsesOfValueWith(
47940 Op0, extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits()));
47941 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
47945 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
47946 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
47947 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
47948 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
47949 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
47951 DAG.getIntPtrConstant(0, DL)));
47953 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
47954 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
47955 (Subtarget.hasAVX2() ||
47956 (EltSizeInBits >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
47957 Op0.getOperand(0).getValueType() == VT.getScalarType())
47958 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
47960 // concat_vectors(extract_subvector(broadcast(x)),
47961 // extract_subvector(broadcast(x))) -> broadcast(x)
47962 if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
47963 Op0.getOperand(0).getValueType() == VT) {
47964 if (Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST ||
47965 Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST_LOAD)
47966 return Op0.getOperand(0);
47970 // Repeated opcode.
47971 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
47972 // but it currently struggles with different vector widths.
47973 if (llvm::all_of(Ops, [Op0](SDValue Op) {
47974 return Op.getOpcode() == Op0.getOpcode();
47976 unsigned NumOps = Ops.size();
47977 switch (Op0.getOpcode()) {
47978 case X86ISD::SHUFP: {
47979 // Add SHUFPD support if/when necessary.
47980 if (!IsSplat && VT.getScalarType() == MVT::f32 &&
47981 llvm::all_of(Ops, [Op0](SDValue Op) {
47982 return Op.getOperand(2) == Op0.getOperand(2);
47984 SmallVector<SDValue, 2> LHS, RHS;
47985 for (unsigned i = 0; i != NumOps; ++i) {
47986 LHS.push_back(Ops[i].getOperand(0));
47987 RHS.push_back(Ops[i].getOperand(1));
47989 return DAG.getNode(Op0.getOpcode(), DL, VT,
47990 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS),
47991 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, RHS),
47992 Op0.getOperand(2));
47996 case X86ISD::PSHUFHW:
47997 case X86ISD::PSHUFLW:
47998 case X86ISD::PSHUFD:
47999 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
48000 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
48001 SmallVector<SDValue, 2> Src;
48002 for (unsigned i = 0; i != NumOps; ++i)
48003 Src.push_back(Ops[i].getOperand(0));
48004 return DAG.getNode(Op0.getOpcode(), DL, VT,
48005 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
48006 Op0.getOperand(1));
48009 case X86ISD::VPERMILPI:
48010 // TODO - add support for vXf64/vXi64 shuffles.
48011 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
48012 Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
48013 SmallVector<SDValue, 2> Src;
48014 for (unsigned i = 0; i != NumOps; ++i)
48015 Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
48016 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
48017 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
48018 Op0.getOperand(1));
48019 return DAG.getBitcast(VT, Res);
48022 case X86ISD::VSHLI:
48023 case X86ISD::VSRAI:
48024 case X86ISD::VSRLI:
48025 if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
48026 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
48027 (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
48028 llvm::all_of(Ops, [Op0](SDValue Op) {
48029 return Op0.getOperand(1) == Op.getOperand(1);
48031 SmallVector<SDValue, 2> Src;
48032 for (unsigned i = 0; i != NumOps; ++i)
48033 Src.push_back(Ops[i].getOperand(0));
48034 return DAG.getNode(Op0.getOpcode(), DL, VT,
48035 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
48036 Op0.getOperand(1));
48039 case X86ISD::VPERMI:
48040 case X86ISD::VROTLI:
48041 case X86ISD::VROTRI:
48042 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
48043 llvm::all_of(Ops, [Op0](SDValue Op) {
48044 return Op0.getOperand(1) == Op.getOperand(1);
48046 SmallVector<SDValue, 2> Src;
48047 for (unsigned i = 0; i != NumOps; ++i)
48048 Src.push_back(Ops[i].getOperand(0));
48049 return DAG.getNode(Op0.getOpcode(), DL, VT,
48050 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
48051 Op0.getOperand(1));
48054 case X86ISD::PACKSS:
48055 case X86ISD::PACKUS:
48056 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
48057 Subtarget.hasInt256()) {
48058 SmallVector<SDValue, 2> LHS, RHS;
48059 for (unsigned i = 0; i != NumOps; ++i) {
48060 LHS.push_back(Ops[i].getOperand(0));
48061 RHS.push_back(Ops[i].getOperand(1));
48063 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
48064 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
48065 NumOps * SrcVT.getVectorNumElements());
48066 return DAG.getNode(Op0.getOpcode(), DL, VT,
48067 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
48068 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
48071 case X86ISD::PALIGNR:
48073 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
48074 (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
48075 llvm::all_of(Ops, [Op0](SDValue Op) {
48076 return Op0.getOperand(2) == Op.getOperand(2);
48078 SmallVector<SDValue, 2> LHS, RHS;
48079 for (unsigned i = 0; i != NumOps; ++i) {
48080 LHS.push_back(Ops[i].getOperand(0));
48081 RHS.push_back(Ops[i].getOperand(1));
48083 return DAG.getNode(Op0.getOpcode(), DL, VT,
48084 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS),
48085 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, RHS),
48086 Op0.getOperand(2));
48095 static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
48096 TargetLowering::DAGCombinerInfo &DCI,
48097 const X86Subtarget &Subtarget) {
48098 EVT VT = N->getValueType(0);
48099 EVT SrcVT = N->getOperand(0).getValueType();
48100 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48102 // Don't do anything for i1 vectors.
48103 if (VT.getVectorElementType() == MVT::i1)
48106 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
48107 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
48108 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
48116 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
48117 TargetLowering::DAGCombinerInfo &DCI,
48118 const X86Subtarget &Subtarget) {
48119 if (DCI.isBeforeLegalizeOps())
48122 MVT OpVT = N->getSimpleValueType(0);
48124 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
48127 SDValue Vec = N->getOperand(0);
48128 SDValue SubVec = N->getOperand(1);
48130 uint64_t IdxVal = N->getConstantOperandVal(2);
48131 MVT SubVecVT = SubVec.getSimpleValueType();
48133 if (Vec.isUndef() && SubVec.isUndef())
48134 return DAG.getUNDEF(OpVT);
48136 // Inserting undefs/zeros into zeros/undefs is a zero vector.
48137 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
48138 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
48139 return getZeroVector(OpVT, Subtarget, DAG, dl);
48141 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
48142 // If we're inserting into a zero vector and then into a larger zero vector,
48143 // just insert into the larger zero vector directly.
48144 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
48145 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
48146 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
48147 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
48148 getZeroVector(OpVT, Subtarget, DAG, dl),
48149 SubVec.getOperand(1),
48150 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
48153 // If we're inserting into a zero vector and our input was extracted from an
48154 // insert into a zero vector of the same type and the extraction was at
48155 // least as large as the original insertion. Just insert the original
48156 // subvector into a zero vector.
48157 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
48158 isNullConstant(SubVec.getOperand(1)) &&
48159 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
48160 SDValue Ins = SubVec.getOperand(0);
48161 if (isNullConstant(Ins.getOperand(2)) &&
48162 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
48163 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
48164 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
48165 getZeroVector(OpVT, Subtarget, DAG, dl),
48166 Ins.getOperand(1), N->getOperand(2));
48170 // Stop here if this is an i1 vector.
48174 // If this is an insert of an extract, combine to a shuffle. Don't do this
48175 // if the insert or extract can be represented with a subregister operation.
48176 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
48177 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
48179 !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
48180 int ExtIdxVal = SubVec.getConstantOperandVal(1);
48181 if (ExtIdxVal != 0) {
48182 int VecNumElts = OpVT.getVectorNumElements();
48183 int SubVecNumElts = SubVecVT.getVectorNumElements();
48184 SmallVector<int, 64> Mask(VecNumElts);
48185 // First create an identity shuffle mask.
48186 for (int i = 0; i != VecNumElts; ++i)
48188 // Now insert the extracted portion.
48189 for (int i = 0; i != SubVecNumElts; ++i)
48190 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
48192 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
48196 // Match concat_vector style patterns.
48197 SmallVector<SDValue, 2> SubVectorOps;
48198 if (collectConcatOps(N, SubVectorOps)) {
48200 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
48203 // If we're inserting all zeros into the upper half, change this to
48204 // a concat with zero. We will match this to a move
48205 // with implicit upper bit zeroing during isel.
48206 // We do this here because we don't want combineConcatVectorOps to
48207 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
48208 if (SubVectorOps.size() == 2 &&
48209 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
48210 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
48211 getZeroVector(OpVT, Subtarget, DAG, dl),
48212 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
48215 // If this is a broadcast insert into an upper undef, use a larger broadcast.
48216 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
48217 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
48219 // If this is a broadcast load inserted into an upper undef, use a larger
48221 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
48222 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
48223 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
48224 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
48225 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
48227 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
48228 MemIntr->getMemoryVT(),
48229 MemIntr->getMemOperand());
48230 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
48237 /// If we are extracting a subvector of a vector select and the select condition
48238 /// is composed of concatenated vectors, try to narrow the select width. This
48239 /// is a common pattern for AVX1 integer code because 256-bit selects may be
48240 /// legal, but there is almost no integer math/logic available for 256-bit.
48241 /// This function should only be called with legal types (otherwise, the calls
48242 /// to get simple value types will assert).
48243 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
48244 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
48245 SmallVector<SDValue, 4> CatOps;
48246 if (Sel.getOpcode() != ISD::VSELECT ||
48247 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
48250 // Note: We assume simple value types because this should only be called with
48251 // legal operations/types.
48252 // TODO: This can be extended to handle extraction to 256-bits.
48253 MVT VT = Ext->getSimpleValueType(0);
48254 if (!VT.is128BitVector())
48257 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
48258 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
48261 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
48262 MVT SelVT = Sel.getSimpleValueType();
48263 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
48264 "Unexpected vector type with legal operations");
48266 unsigned SelElts = SelVT.getVectorNumElements();
48267 unsigned CastedElts = WideVT.getVectorNumElements();
48268 unsigned ExtIdx = Ext->getConstantOperandVal(1);
48269 if (SelElts % CastedElts == 0) {
48270 // The select has the same or more (narrower) elements than the extract
48271 // operand. The extraction index gets scaled by that factor.
48272 ExtIdx *= (SelElts / CastedElts);
48273 } else if (CastedElts % SelElts == 0) {
48274 // The select has less (wider) elements than the extract operand. Make sure
48275 // that the extraction index can be divided evenly.
48276 unsigned IndexDivisor = CastedElts / SelElts;
48277 if (ExtIdx % IndexDivisor != 0)
48279 ExtIdx /= IndexDivisor;
48281 llvm_unreachable("Element count of simple vector types are not divisible?");
48284 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
48285 unsigned NarrowElts = SelElts / NarrowingFactor;
48286 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
48288 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
48289 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
48290 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
48291 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
48292 return DAG.getBitcast(VT, NarrowSel);
48295 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
48296 TargetLowering::DAGCombinerInfo &DCI,
48297 const X86Subtarget &Subtarget) {
48298 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
48299 // eventually get combined/lowered into ANDNP) with a concatenated operand,
48300 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
48301 // We let generic combining take over from there to simplify the
48302 // insert/extract and 'not'.
48303 // This pattern emerges during AVX1 legalization. We handle it before lowering
48304 // to avoid complications like splitting constant vector loads.
48306 // Capture the original wide type in the likely case that we need to bitcast
48307 // back to this type.
48308 if (!N->getValueType(0).isSimple())
48311 MVT VT = N->getSimpleValueType(0);
48312 SDValue InVec = N->getOperand(0);
48313 unsigned IdxVal = N->getConstantOperandVal(1);
48314 SDValue InVecBC = peekThroughBitcasts(InVec);
48315 EVT InVecVT = InVec.getValueType();
48316 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48318 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
48319 TLI.isTypeLegal(InVecVT) &&
48320 InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
48321 auto isConcatenatedNot = [] (SDValue V) {
48322 V = peekThroughBitcasts(V);
48323 if (!isBitwiseNot(V))
48325 SDValue NotOp = V->getOperand(0);
48326 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
48328 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
48329 isConcatenatedNot(InVecBC.getOperand(1))) {
48330 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
48331 SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
48332 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
48333 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
48337 if (DCI.isBeforeLegalizeOps())
48340 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
48343 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
48344 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
48346 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
48347 if (VT.getScalarType() == MVT::i1)
48348 return DAG.getConstant(1, SDLoc(N), VT);
48349 return getOnesVector(VT, DAG, SDLoc(N));
48352 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
48353 return DAG.getBuildVector(
48355 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
48357 // If we are extracting from an insert into a zero vector, replace with a
48358 // smaller insert into zero if we don't access less than the original
48359 // subvector. Don't do this for i1 vectors.
48360 if (VT.getVectorElementType() != MVT::i1 &&
48361 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
48362 InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
48363 ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
48364 InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
48366 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
48367 getZeroVector(VT, Subtarget, DAG, DL),
48368 InVec.getOperand(1), InVec.getOperand(2));
48371 // If we're extracting from a broadcast then we're better off just
48372 // broadcasting to the smaller type directly, assuming this is the only use.
48373 // As its a broadcast we don't care about the extraction index.
48374 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
48375 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
48376 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
48378 if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
48379 auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
48380 if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
48381 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
48382 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
48384 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
48385 MemIntr->getMemoryVT(),
48386 MemIntr->getMemOperand());
48387 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
48392 // If we're extracting an upper subvector from a broadcast we should just
48393 // extract the lowest subvector instead which should allow
48394 // SimplifyDemandedVectorElts do more simplifications.
48395 if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
48396 InVec.getOpcode() == X86ISD::VBROADCAST_LOAD))
48397 return extractSubVector(InVec, 0, DAG, SDLoc(N), VT.getSizeInBits());
48399 // If we're extracting a broadcasted subvector, just use the source.
48400 if (InVec.getOpcode() == X86ISD::SUBV_BROADCAST &&
48401 InVec.getOperand(0).getValueType() == VT)
48402 return InVec.getOperand(0);
48404 // Attempt to extract from the source of a shuffle vector.
48405 if ((InVecVT.getSizeInBits() % VT.getSizeInBits()) == 0 &&
48406 (IdxVal % VT.getVectorNumElements()) == 0) {
48407 SmallVector<int, 32> ShuffleMask;
48408 SmallVector<int, 32> ScaledMask;
48409 SmallVector<SDValue, 2> ShuffleInputs;
48410 unsigned NumSubVecs = InVecVT.getSizeInBits() / VT.getSizeInBits();
48411 // Decode the shuffle mask and scale it so its shuffling subvectors.
48412 if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
48413 scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
48414 unsigned SubVecIdx = IdxVal / VT.getVectorNumElements();
48415 if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
48416 return DAG.getUNDEF(VT);
48417 if (ScaledMask[SubVecIdx] == SM_SentinelZero)
48418 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
48419 SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
48420 if (Src.getValueSizeInBits() == InVecVT.getSizeInBits()) {
48421 unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
48422 unsigned SrcEltIdx = SrcSubVecIdx * VT.getVectorNumElements();
48423 return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
48424 SDLoc(N), VT.getSizeInBits());
48429 // If we're extracting the lowest subvector and we're the only user,
48430 // we may be able to perform this with a smaller vector width.
48431 if (IdxVal == 0 && InVec.hasOneUse()) {
48432 unsigned InOpcode = InVec.getOpcode();
48433 if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
48434 // v2f64 CVTDQ2PD(v4i32).
48435 if (InOpcode == ISD::SINT_TO_FP &&
48436 InVec.getOperand(0).getValueType() == MVT::v4i32) {
48437 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
48439 // v2f64 CVTUDQ2PD(v4i32).
48440 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
48441 InVec.getOperand(0).getValueType() == MVT::v4i32) {
48442 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
48444 // v2f64 CVTPS2PD(v4f32).
48445 if (InOpcode == ISD::FP_EXTEND &&
48446 InVec.getOperand(0).getValueType() == MVT::v4f32) {
48447 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
48450 if ((InOpcode == ISD::ANY_EXTEND ||
48451 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
48452 InOpcode == ISD::ZERO_EXTEND ||
48453 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
48454 InOpcode == ISD::SIGN_EXTEND ||
48455 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
48456 VT.is128BitVector() &&
48457 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
48458 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
48459 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
48461 if (InOpcode == ISD::VSELECT &&
48462 InVec.getOperand(0).getValueType().is256BitVector() &&
48463 InVec.getOperand(1).getValueType().is256BitVector() &&
48464 InVec.getOperand(2).getValueType().is256BitVector()) {
48466 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
48467 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
48468 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
48469 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
48476 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
48477 EVT VT = N->getValueType(0);
48478 SDValue Src = N->getOperand(0);
48481 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
48482 // This occurs frequently in our masked scalar intrinsic code and our
48483 // floating point select lowering with AVX512.
48484 // TODO: SimplifyDemandedBits instead?
48485 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
48486 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
48487 if (C->getAPIntValue().isOneValue())
48488 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
48489 Src.getOperand(0));
48491 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
48492 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
48493 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
48494 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
48495 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
48496 if (C->isNullValue())
48497 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
48498 Src.getOperand(1));
48500 // Reduce v2i64 to v4i32 if we don't need the upper bits.
48501 // TODO: Move to DAGCombine/SimplifyDemandedBits?
48502 if (VT == MVT::v2i64 || VT == MVT::v2f64) {
48503 auto IsAnyExt64 = [](SDValue Op) {
48504 if (Op.getValueType() != MVT::i64 || !Op.hasOneUse())
48506 if (Op.getOpcode() == ISD::ANY_EXTEND &&
48507 Op.getOperand(0).getScalarValueSizeInBits() <= 32)
48508 return Op.getOperand(0);
48509 if (auto *Ld = dyn_cast<LoadSDNode>(Op))
48510 if (Ld->getExtensionType() == ISD::EXTLOAD &&
48511 Ld->getMemoryVT().getScalarSizeInBits() <= 32)
48515 if (SDValue ExtSrc = IsAnyExt64(peekThroughOneUseBitcasts(Src)))
48516 return DAG.getBitcast(
48517 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
48518 DAG.getAnyExtOrTrunc(ExtSrc, DL, MVT::i32)));
48521 // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
48522 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
48523 Src.getOperand(0).getValueType() == MVT::x86mmx)
48524 return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
48529 // Simplify PMULDQ and PMULUDQ operations.
48530 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
48531 TargetLowering::DAGCombinerInfo &DCI,
48532 const X86Subtarget &Subtarget) {
48533 SDValue LHS = N->getOperand(0);
48534 SDValue RHS = N->getOperand(1);
48536 // Canonicalize constant to RHS.
48537 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
48538 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
48539 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
48541 // Multiply by zero.
48542 // Don't return RHS as it may contain UNDEFs.
48543 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
48544 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
48546 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
48547 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48548 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
48549 return SDValue(N, 0);
48551 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
48552 // convert it to any_extend_invec, due to the LegalOperations check, do the
48553 // conversion directly to a vector shuffle manually. This exposes combine
48554 // opportunities missed by combineExtInVec not calling
48555 // combineX86ShufflesRecursively on SSE4.1 targets.
48556 // FIXME: This is basically a hack around several other issues related to
48557 // ANY_EXTEND_VECTOR_INREG.
48558 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
48559 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
48560 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
48561 LHS.getOperand(0).getValueType() == MVT::v4i32) {
48563 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
48564 LHS.getOperand(0), { 0, -1, 1, -1 });
48565 LHS = DAG.getBitcast(MVT::v2i64, LHS);
48566 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
48568 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
48569 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
48570 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
48571 RHS.getOperand(0).getValueType() == MVT::v4i32) {
48573 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
48574 RHS.getOperand(0), { 0, -1, 1, -1 });
48575 RHS = DAG.getBitcast(MVT::v2i64, RHS);
48576 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
48582 static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
48583 TargetLowering::DAGCombinerInfo &DCI,
48584 const X86Subtarget &Subtarget) {
48585 EVT VT = N->getValueType(0);
48586 SDValue In = N->getOperand(0);
48587 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48589 // Try to merge vector loads and extend_inreg to an extload.
48590 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
48592 auto *Ld = cast<LoadSDNode>(In);
48593 if (Ld->isSimple()) {
48594 MVT SVT = In.getSimpleValueType().getVectorElementType();
48595 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG
48599 EVT::getVectorVT(*DAG.getContext(), SVT, VT.getVectorNumElements());
48600 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
48602 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
48603 Ld->getPointerInfo(), MemVT,
48604 Ld->getOriginalAlign(),
48605 Ld->getMemOperand()->getFlags());
48606 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
48612 // Attempt to combine as a shuffle.
48613 // TODO: SSE41 support
48614 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
48616 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
48617 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48624 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
48625 TargetLowering::DAGCombinerInfo &DCI) {
48626 EVT VT = N->getValueType(0);
48628 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
48629 return DAG.getConstant(0, SDLoc(N), VT);
48631 APInt KnownUndef, KnownZero;
48632 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48633 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
48634 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
48636 return SDValue(N, 0);
48641 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
48642 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
48643 // extra instructions between the conversion due to going to scalar and back.
48644 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
48645 const X86Subtarget &Subtarget) {
48646 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
48649 if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
48652 if (N->getValueType(0) != MVT::f32 ||
48653 N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
48657 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
48658 N->getOperand(0).getOperand(0));
48659 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
48660 DAG.getTargetConstant(4, dl, MVT::i32));
48661 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
48662 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
48663 DAG.getIntPtrConstant(0, dl));
48666 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
48667 const X86Subtarget &Subtarget) {
48668 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
48671 bool IsStrict = N->isStrictFPOpcode();
48672 EVT VT = N->getValueType(0);
48673 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
48674 EVT SrcVT = Src.getValueType();
48676 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
48679 if (VT.getVectorElementType() != MVT::f32 &&
48680 VT.getVectorElementType() != MVT::f64)
48683 unsigned NumElts = VT.getVectorNumElements();
48684 if (NumElts == 1 || !isPowerOf2_32(NumElts))
48689 // Convert the input to vXi16.
48690 EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
48691 Src = DAG.getBitcast(IntVT, Src);
48693 // Widen to at least 8 input elements.
48695 unsigned NumConcats = 8 / NumElts;
48696 SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
48697 : DAG.getConstant(0, dl, IntVT);
48698 SmallVector<SDValue, 4> Ops(NumConcats, Fill);
48700 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
48703 // Destination is vXf32 with at least 4 elements.
48704 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
48705 std::max(4U, NumElts));
48706 SDValue Cvt, Chain;
48708 Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
48709 {N->getOperand(0), Src});
48710 Chain = Cvt.getValue(1);
48712 Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
48716 assert(NumElts == 2 && "Unexpected size");
48717 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
48718 DAG.getIntPtrConstant(0, dl));
48722 // Extend to the original VT if necessary.
48723 if (Cvt.getValueType() != VT) {
48724 Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
48726 Chain = Cvt.getValue(1);
48728 return DAG.getMergeValues({Cvt, Chain}, dl);
48731 // Extend to the original VT if necessary.
48732 return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
48735 // Try to find a larger VBROADCAST_LOAD that we can extract from. Limit this to
48736 // cases where the loads have the same input chain and the output chains are
48737 // unused. This avoids any memory ordering issues.
48738 static SDValue combineVBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
48739 TargetLowering::DAGCombinerInfo &DCI) {
48740 // Only do this if the chain result is unused.
48741 if (N->hasAnyUseOfValue(1))
48744 auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
48746 SDValue Ptr = MemIntrin->getBasePtr();
48747 SDValue Chain = MemIntrin->getChain();
48748 EVT VT = N->getSimpleValueType(0);
48749 EVT MemVT = MemIntrin->getMemoryVT();
48751 // Look at other users of our base pointer and try to find a wider broadcast.
48752 // The input chain and the size of the memory VT must match.
48753 for (SDNode *User : Ptr->uses())
48754 if (User != N && User->getOpcode() == X86ISD::VBROADCAST_LOAD &&
48755 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
48756 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
48757 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
48758 MemVT.getSizeInBits() &&
48759 !User->hasAnyUseOfValue(1) &&
48760 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
48761 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
48762 VT.getSizeInBits());
48763 Extract = DAG.getBitcast(VT, Extract);
48764 return DCI.CombineTo(N, Extract, SDValue(User, 1));
48770 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
48771 const X86Subtarget &Subtarget) {
48772 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
48775 EVT VT = N->getValueType(0);
48776 SDValue Src = N->getOperand(0);
48777 EVT SrcVT = Src.getValueType();
48779 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
48780 SrcVT.getVectorElementType() != MVT::f32)
48783 unsigned NumElts = VT.getVectorNumElements();
48784 if (NumElts == 1 || !isPowerOf2_32(NumElts))
48789 // Widen to at least 4 input elements.
48791 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
48792 DAG.getConstantFP(0.0, dl, SrcVT));
48794 // Destination is v8i16 with at least 8 elements.
48795 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
48796 std::max(8U, NumElts));
48797 SDValue Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src,
48798 DAG.getTargetConstant(4, dl, MVT::i32));
48800 // Extract down to real number of elements.
48802 EVT IntVT = VT.changeVectorElementTypeToInteger();
48803 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
48804 DAG.getIntPtrConstant(0, dl));
48807 return DAG.getBitcast(VT, Cvt);
48810 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
48811 SDValue Src = N->getOperand(0);
48813 // Turn MOVDQ2Q+simple_load into an mmx load.
48814 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
48815 LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
48817 if (LN->isSimple()) {
48818 SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
48820 LN->getPointerInfo(),
48821 LN->getOriginalAlign(),
48822 LN->getMemOperand()->getFlags());
48823 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
48831 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
48832 DAGCombinerInfo &DCI) const {
48833 SelectionDAG &DAG = DCI.DAG;
48834 switch (N->getOpcode()) {
48836 case ISD::SCALAR_TO_VECTOR:
48837 return combineScalarToVector(N, DAG);
48838 case ISD::EXTRACT_VECTOR_ELT:
48839 case X86ISD::PEXTRW:
48840 case X86ISD::PEXTRB:
48841 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
48842 case ISD::CONCAT_VECTORS:
48843 return combineConcatVectors(N, DAG, DCI, Subtarget);
48844 case ISD::INSERT_SUBVECTOR:
48845 return combineInsertSubvector(N, DAG, DCI, Subtarget);
48846 case ISD::EXTRACT_SUBVECTOR:
48847 return combineExtractSubvector(N, DAG, DCI, Subtarget);
48850 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
48851 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
48852 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
48853 case X86ISD::CMP: return combineCMP(N, DAG);
48854 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
48855 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
48857 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
48858 case X86ISD::SBB: return combineSBB(N, DAG);
48859 case X86ISD::ADC: return combineADC(N, DAG, DCI);
48860 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
48861 case ISD::SHL: return combineShiftLeft(N, DAG);
48862 case ISD::SRA: return combineShiftRightArithmetic(N, DAG, Subtarget);
48863 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI, Subtarget);
48864 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
48865 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
48866 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
48867 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
48868 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
48869 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
48870 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
48871 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
48872 case X86ISD::VEXTRACT_STORE:
48873 return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
48874 case ISD::SINT_TO_FP:
48875 case ISD::STRICT_SINT_TO_FP:
48876 return combineSIntToFP(N, DAG, DCI, Subtarget);
48877 case ISD::UINT_TO_FP:
48878 case ISD::STRICT_UINT_TO_FP:
48879 return combineUIntToFP(N, DAG, Subtarget);
48881 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
48882 case ISD::FNEG: return combineFneg(N, DAG, DCI, Subtarget);
48883 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
48884 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG, DCI);
48885 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
48886 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
48887 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
48889 case X86ISD::FOR: return combineFOr(N, DAG, DCI, Subtarget);
48891 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
48893 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
48894 case X86ISD::CVTSI2P:
48895 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
48896 case X86ISD::CVTP2SI:
48897 case X86ISD::CVTP2UI:
48898 case X86ISD::STRICT_CVTTP2SI:
48899 case X86ISD::CVTTP2SI:
48900 case X86ISD::STRICT_CVTTP2UI:
48901 case X86ISD::CVTTP2UI:
48902 return combineCVTP2I_CVTTP2I(N, DAG, DCI);
48903 case X86ISD::STRICT_CVTPH2PS:
48904 case X86ISD::CVTPH2PS: return combineCVTPH2PS(N, DAG, DCI);
48905 case X86ISD::BT: return combineBT(N, DAG, DCI);
48906 case ISD::ANY_EXTEND:
48907 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
48908 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
48909 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
48910 case ISD::ANY_EXTEND_VECTOR_INREG:
48911 case ISD::SIGN_EXTEND_VECTOR_INREG:
48912 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
48914 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
48915 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
48916 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
48917 case X86ISD::PACKSS:
48918 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
48922 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
48923 case X86ISD::VSHLI:
48924 case X86ISD::VSRAI:
48925 case X86ISD::VSRLI:
48926 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
48927 case ISD::INSERT_VECTOR_ELT:
48928 case X86ISD::PINSRB:
48929 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
48930 case X86ISD::SHUFP: // Handle all target specific shuffles
48931 case X86ISD::INSERTPS:
48932 case X86ISD::EXTRQI:
48933 case X86ISD::INSERTQI:
48934 case X86ISD::VALIGN:
48935 case X86ISD::PALIGNR:
48936 case X86ISD::VSHLDQ:
48937 case X86ISD::VSRLDQ:
48938 case X86ISD::BLENDI:
48939 case X86ISD::UNPCKH:
48940 case X86ISD::UNPCKL:
48941 case X86ISD::MOVHLPS:
48942 case X86ISD::MOVLHPS:
48943 case X86ISD::PSHUFB:
48944 case X86ISD::PSHUFD:
48945 case X86ISD::PSHUFHW:
48946 case X86ISD::PSHUFLW:
48947 case X86ISD::MOVSHDUP:
48948 case X86ISD::MOVSLDUP:
48949 case X86ISD::MOVDDUP:
48950 case X86ISD::MOVSS:
48951 case X86ISD::MOVSD:
48952 case X86ISD::VBROADCAST:
48953 case X86ISD::VPPERM:
48954 case X86ISD::VPERMI:
48955 case X86ISD::VPERMV:
48956 case X86ISD::VPERMV3:
48957 case X86ISD::VPERMIL2:
48958 case X86ISD::VPERMILPI:
48959 case X86ISD::VPERMILPV:
48960 case X86ISD::VPERM2X128:
48961 case X86ISD::SHUF128:
48962 case X86ISD::VZEXT_MOVL:
48963 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
48964 case X86ISD::FMADD_RND:
48965 case X86ISD::FMSUB:
48966 case X86ISD::STRICT_FMSUB:
48967 case X86ISD::FMSUB_RND:
48968 case X86ISD::FNMADD:
48969 case X86ISD::STRICT_FNMADD:
48970 case X86ISD::FNMADD_RND:
48971 case X86ISD::FNMSUB:
48972 case X86ISD::STRICT_FNMSUB:
48973 case X86ISD::FNMSUB_RND:
48975 case ISD::STRICT_FMA: return combineFMA(N, DAG, DCI, Subtarget);
48976 case X86ISD::FMADDSUB_RND:
48977 case X86ISD::FMSUBADD_RND:
48978 case X86ISD::FMADDSUB:
48979 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
48980 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
48981 case X86ISD::MGATHER:
48982 case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
48984 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
48985 case X86ISD::PCMPEQ:
48986 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
48987 case X86ISD::PMULDQ:
48988 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
48989 case X86ISD::KSHIFTL:
48990 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
48991 case ISD::FP16_TO_FP: return combineFP16_TO_FP(N, DAG, Subtarget);
48992 case ISD::STRICT_FP_EXTEND:
48993 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DAG, Subtarget);
48994 case ISD::FP_ROUND: return combineFP_ROUND(N, DAG, Subtarget);
48995 case X86ISD::VBROADCAST_LOAD: return combineVBROADCAST_LOAD(N, DAG, DCI);
48996 case X86ISD::MOVDQ2Q: return combineMOVDQ2Q(N, DAG);
49002 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
49003 if (!isTypeLegal(VT))
49006 // There are no vXi8 shifts.
49007 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
49010 // TODO: Almost no 8-bit ops are desirable because they have no actual
49011 // size/speed advantages vs. 32-bit ops, but they do have a major
49012 // potential disadvantage by causing partial register stalls.
49014 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
49015 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
49016 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
49017 // check for a constant operand to the multiply.
49018 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
49021 // i16 instruction encodings are longer and some i16 instructions are slow,
49022 // so those are not desirable.
49023 if (VT == MVT::i16) {
49028 case ISD::SIGN_EXTEND:
49029 case ISD::ZERO_EXTEND:
49030 case ISD::ANY_EXTEND:
49044 // Any legal type not explicitly accounted for above here is desirable.
49048 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
49049 SDValue Value, SDValue Addr,
49050 SelectionDAG &DAG) const {
49051 const Module *M = DAG.getMachineFunction().getMMI().getModule();
49052 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
49053 if (IsCFProtectionSupported) {
49054 // In case control-flow branch protection is enabled, we need to add
49055 // notrack prefix to the indirect branch.
49056 // In order to do that we create NT_BRIND SDNode.
49057 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
49058 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
49061 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
49064 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
49065 EVT VT = Op.getValueType();
49066 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
49067 isa<ConstantSDNode>(Op.getOperand(1));
49069 // i16 is legal, but undesirable since i16 instruction encodings are longer
49070 // and some i16 instructions are slow.
49071 // 8-bit multiply-by-constant can usually be expanded to something cheaper
49072 // using LEA and/or other ALU ops.
49073 if (VT != MVT::i16 && !Is8BitMulByConstant)
49076 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
49077 if (!Op.hasOneUse())
49079 SDNode *User = *Op->use_begin();
49080 if (!ISD::isNormalStore(User))
49082 auto *Ld = cast<LoadSDNode>(Load);
49083 auto *St = cast<StoreSDNode>(User);
49084 return Ld->getBasePtr() == St->getBasePtr();
49087 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
49088 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
49090 if (!Op.hasOneUse())
49092 SDNode *User = *Op->use_begin();
49093 if (User->getOpcode() != ISD::ATOMIC_STORE)
49095 auto *Ld = cast<AtomicSDNode>(Load);
49096 auto *St = cast<AtomicSDNode>(User);
49097 return Ld->getBasePtr() == St->getBasePtr();
49100 bool Commute = false;
49101 switch (Op.getOpcode()) {
49102 default: return false;
49103 case ISD::SIGN_EXTEND:
49104 case ISD::ZERO_EXTEND:
49105 case ISD::ANY_EXTEND:
49110 SDValue N0 = Op.getOperand(0);
49111 // Look out for (store (shl (load), x)).
49112 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
49124 SDValue N0 = Op.getOperand(0);
49125 SDValue N1 = Op.getOperand(1);
49126 // Avoid disabling potential load folding opportunities.
49127 if (MayFoldLoad(N1) &&
49128 (!Commute || !isa<ConstantSDNode>(N0) ||
49129 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
49131 if (MayFoldLoad(N0) &&
49132 ((Commute && !isa<ConstantSDNode>(N1)) ||
49133 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
49135 if (IsFoldableAtomicRMW(N0, Op) ||
49136 (Commute && IsFoldableAtomicRMW(N1, Op)))
49145 //===----------------------------------------------------------------------===//
49146 // X86 Inline Assembly Support
49147 //===----------------------------------------------------------------------===//
49149 // Helper to match a string separated by whitespace.
49150 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
49151 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
49153 for (StringRef Piece : Pieces) {
49154 if (!S.startswith(Piece)) // Check if the piece matches.
49157 S = S.substr(Piece.size());
49158 StringRef::size_type Pos = S.find_first_not_of(" \t");
49159 if (Pos == 0) // We matched a prefix.
49168 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
49170 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
49171 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
49172 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
49173 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
49175 if (AsmPieces.size() == 3)
49177 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
49184 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
49185 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
49187 const std::string &AsmStr = IA->getAsmString();
49189 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
49190 if (!Ty || Ty->getBitWidth() % 16 != 0)
49193 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
49194 SmallVector<StringRef, 4> AsmPieces;
49195 SplitString(AsmStr, AsmPieces, ";\n");
49197 switch (AsmPieces.size()) {
49198 default: return false;
49200 // FIXME: this should verify that we are targeting a 486 or better. If not,
49201 // we will turn this bswap into something that will be lowered to logical
49202 // ops instead of emitting the bswap asm. For now, we don't support 486 or
49203 // lower so don't worry about this.
49205 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
49206 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
49207 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
49208 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
49209 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
49210 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
49211 // No need to check constraints, nothing other than the equivalent of
49212 // "=r,0" would be valid here.
49213 return IntrinsicLowering::LowerToByteSwap(CI);
49216 // rorw $$8, ${0:w} --> llvm.bswap.i16
49217 if (CI->getType()->isIntegerTy(16) &&
49218 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
49219 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
49220 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
49222 StringRef ConstraintsStr = IA->getConstraintString();
49223 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
49224 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
49225 if (clobbersFlagRegisters(AsmPieces))
49226 return IntrinsicLowering::LowerToByteSwap(CI);
49230 if (CI->getType()->isIntegerTy(32) &&
49231 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
49232 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
49233 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
49234 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
49236 StringRef ConstraintsStr = IA->getConstraintString();
49237 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
49238 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
49239 if (clobbersFlagRegisters(AsmPieces))
49240 return IntrinsicLowering::LowerToByteSwap(CI);
49243 if (CI->getType()->isIntegerTy(64)) {
49244 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
49245 if (Constraints.size() >= 2 &&
49246 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
49247 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
49248 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
49249 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
49250 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
49251 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
49252 return IntrinsicLowering::LowerToByteSwap(CI);
49260 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
49261 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
49262 .Case("{@cca}", X86::COND_A)
49263 .Case("{@ccae}", X86::COND_AE)
49264 .Case("{@ccb}", X86::COND_B)
49265 .Case("{@ccbe}", X86::COND_BE)
49266 .Case("{@ccc}", X86::COND_B)
49267 .Case("{@cce}", X86::COND_E)
49268 .Case("{@ccz}", X86::COND_E)
49269 .Case("{@ccg}", X86::COND_G)
49270 .Case("{@ccge}", X86::COND_GE)
49271 .Case("{@ccl}", X86::COND_L)
49272 .Case("{@ccle}", X86::COND_LE)
49273 .Case("{@ccna}", X86::COND_BE)
49274 .Case("{@ccnae}", X86::COND_B)
49275 .Case("{@ccnb}", X86::COND_AE)
49276 .Case("{@ccnbe}", X86::COND_A)
49277 .Case("{@ccnc}", X86::COND_AE)
49278 .Case("{@ccne}", X86::COND_NE)
49279 .Case("{@ccnz}", X86::COND_NE)
49280 .Case("{@ccng}", X86::COND_LE)
49281 .Case("{@ccnge}", X86::COND_L)
49282 .Case("{@ccnl}", X86::COND_GE)
49283 .Case("{@ccnle}", X86::COND_G)
49284 .Case("{@ccno}", X86::COND_NO)
49285 .Case("{@ccnp}", X86::COND_P)
49286 .Case("{@ccns}", X86::COND_NS)
49287 .Case("{@cco}", X86::COND_O)
49288 .Case("{@ccp}", X86::COND_P)
49289 .Case("{@ccs}", X86::COND_S)
49290 .Default(X86::COND_INVALID);
49294 /// Given a constraint letter, return the type of constraint for this target.
49295 X86TargetLowering::ConstraintType
49296 X86TargetLowering::getConstraintType(StringRef Constraint) const {
49297 if (Constraint.size() == 1) {
49298 switch (Constraint[0]) {
49309 case 'k': // AVX512 masking registers.
49310 return C_RegisterClass;
49326 return C_Immediate;
49335 else if (Constraint.size() == 2) {
49336 switch (Constraint[0]) {
49340 switch (Constraint[1]) {
49350 return C_RegisterClass;
49353 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
49355 return TargetLowering::getConstraintType(Constraint);
49358 /// Examine constraint type and operand type and determine a weight value.
49359 /// This object must already have been set up with the operand type
49360 /// and the current alternative constraint selected.
49361 TargetLowering::ConstraintWeight
49362 X86TargetLowering::getSingleConstraintMatchWeight(
49363 AsmOperandInfo &info, const char *constraint) const {
49364 ConstraintWeight weight = CW_Invalid;
49365 Value *CallOperandVal = info.CallOperandVal;
49366 // If we don't have a value, we can't do a match,
49367 // but allow it at the lowest weight.
49368 if (!CallOperandVal)
49370 Type *type = CallOperandVal->getType();
49371 // Look at the constraint type.
49372 switch (*constraint) {
49374 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
49386 if (CallOperandVal->getType()->isIntegerTy())
49387 weight = CW_SpecificReg;
49392 if (type->isFloatingPointTy())
49393 weight = CW_SpecificReg;
49396 if (type->isX86_MMXTy() && Subtarget.hasMMX())
49397 weight = CW_SpecificReg;
49400 if (StringRef(constraint).size() != 2)
49402 switch (constraint[1]) {
49407 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
49408 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
49409 ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
49410 return CW_SpecificReg;
49412 // Conditional OpMask regs (AVX512)
49414 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
49415 return CW_Register;
49419 if (type->isX86_MMXTy() && Subtarget.hasMMX())
49422 // Any SSE reg when ISA >= SSE2, same as 'x'
49426 if (!Subtarget.hasSSE2())
49432 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
49433 weight = CW_Register;
49436 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
49437 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
49438 weight = CW_Register;
49441 // Enable conditional vector operations using %k<#> registers.
49442 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
49443 weight = CW_Register;
49446 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
49447 if (C->getZExtValue() <= 31)
49448 weight = CW_Constant;
49452 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49453 if (C->getZExtValue() <= 63)
49454 weight = CW_Constant;
49458 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49459 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
49460 weight = CW_Constant;
49464 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49465 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
49466 weight = CW_Constant;
49470 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49471 if (C->getZExtValue() <= 3)
49472 weight = CW_Constant;
49476 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49477 if (C->getZExtValue() <= 0xff)
49478 weight = CW_Constant;
49483 if (isa<ConstantFP>(CallOperandVal)) {
49484 weight = CW_Constant;
49488 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49489 if ((C->getSExtValue() >= -0x80000000LL) &&
49490 (C->getSExtValue() <= 0x7fffffffLL))
49491 weight = CW_Constant;
49495 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49496 if (C->getZExtValue() <= 0xffffffff)
49497 weight = CW_Constant;
49504 /// Try to replace an X constraint, which matches anything, with another that
49505 /// has more specific requirements based on the type of the corresponding
49507 const char *X86TargetLowering::
49508 LowerXConstraint(EVT ConstraintVT) const {
49509 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
49510 // 'f' like normal targets.
49511 if (ConstraintVT.isFloatingPoint()) {
49512 if (Subtarget.hasSSE1())
49516 return TargetLowering::LowerXConstraint(ConstraintVT);
49519 // Lower @cc targets via setcc.
49520 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
49521 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
49522 SelectionDAG &DAG) const {
49523 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
49524 if (Cond == X86::COND_INVALID)
49526 // Check that return type is valid.
49527 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
49528 OpInfo.ConstraintVT.getSizeInBits() < 8)
49529 report_fatal_error("Flag output operand is of invalid type");
49531 // Get EFLAGS register. Only update chain when copyfrom is glued.
49532 if (Flag.getNode()) {
49533 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
49534 Chain = Flag.getValue(1);
49536 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
49537 // Extract CC code.
49538 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
49539 // Extend to 32-bits
49540 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
49545 /// Lower the specified operand into the Ops vector.
49546 /// If it is invalid, don't add anything to Ops.
49547 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
49548 std::string &Constraint,
49549 std::vector<SDValue>&Ops,
49550 SelectionDAG &DAG) const {
49553 // Only support length 1 constraints for now.
49554 if (Constraint.length() > 1) return;
49556 char ConstraintLetter = Constraint[0];
49557 switch (ConstraintLetter) {
49560 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49561 if (C->getZExtValue() <= 31) {
49562 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49563 Op.getValueType());
49569 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49570 if (C->getZExtValue() <= 63) {
49571 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49572 Op.getValueType());
49578 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49579 if (isInt<8>(C->getSExtValue())) {
49580 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49581 Op.getValueType());
49587 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49588 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
49589 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
49590 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
49591 Op.getValueType());
49597 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49598 if (C->getZExtValue() <= 3) {
49599 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49600 Op.getValueType());
49606 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49607 if (C->getZExtValue() <= 255) {
49608 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49609 Op.getValueType());
49615 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49616 if (C->getZExtValue() <= 127) {
49617 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49618 Op.getValueType());
49624 // 32-bit signed value
49625 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49626 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
49627 C->getSExtValue())) {
49628 // Widen to 64 bits here to get it sign extended.
49629 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
49632 // FIXME gcc accepts some relocatable values here too, but only in certain
49633 // memory models; it's complicated.
49638 // 32-bit unsigned value
49639 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49640 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
49641 C->getZExtValue())) {
49642 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49643 Op.getValueType());
49647 // FIXME gcc accepts some relocatable values here too, but only in certain
49648 // memory models; it's complicated.
49652 // Literal immediates are always ok.
49653 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
49654 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
49655 BooleanContent BCont = getBooleanContents(MVT::i64);
49656 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
49657 : ISD::SIGN_EXTEND;
49658 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
49659 : CST->getSExtValue();
49660 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
49664 // In any sort of PIC mode addresses need to be computed at runtime by
49665 // adding in a register or some sort of table lookup. These can't
49666 // be used as immediates.
49667 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
49670 // If we are in non-pic codegen mode, we allow the address of a global (with
49671 // an optional displacement) to be used with 'i'.
49672 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
49673 // If we require an extra load to get this address, as in PIC mode, we
49674 // can't accept it.
49675 if (isGlobalStubReference(
49676 Subtarget.classifyGlobalReference(GA->getGlobal())))
49682 if (Result.getNode()) {
49683 Ops.push_back(Result);
49686 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
49689 /// Check if \p RC is a general purpose register class.
49690 /// I.e., GR* or one of their variant.
49691 static bool isGRClass(const TargetRegisterClass &RC) {
49692 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
49693 RC.hasSuperClassEq(&X86::GR16RegClass) ||
49694 RC.hasSuperClassEq(&X86::GR32RegClass) ||
49695 RC.hasSuperClassEq(&X86::GR64RegClass) ||
49696 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
49699 /// Check if \p RC is a vector register class.
49700 /// I.e., FR* / VR* or one of their variant.
49701 static bool isFRClass(const TargetRegisterClass &RC) {
49702 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
49703 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
49704 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
49705 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
49706 RC.hasSuperClassEq(&X86::VR512RegClass);
49709 /// Check if \p RC is a mask register class.
49710 /// I.e., VK* or one of their variant.
49711 static bool isVKClass(const TargetRegisterClass &RC) {
49712 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
49713 RC.hasSuperClassEq(&X86::VK2RegClass) ||
49714 RC.hasSuperClassEq(&X86::VK4RegClass) ||
49715 RC.hasSuperClassEq(&X86::VK8RegClass) ||
49716 RC.hasSuperClassEq(&X86::VK16RegClass) ||
49717 RC.hasSuperClassEq(&X86::VK32RegClass) ||
49718 RC.hasSuperClassEq(&X86::VK64RegClass);
49721 std::pair<unsigned, const TargetRegisterClass *>
49722 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
49723 StringRef Constraint,
49725 // First, see if this is a constraint that directly corresponds to an LLVM
49727 if (Constraint.size() == 1) {
49728 // GCC Constraint Letters
49729 switch (Constraint[0]) {
49731 // 'A' means [ER]AX + [ER]DX.
49733 if (Subtarget.is64Bit())
49734 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
49735 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
49736 "Expecting 64, 32 or 16 bit subtarget");
49737 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
49739 // TODO: Slight differences here in allocation order and leaving
49740 // RIP in the class. Do they matter any more here than they do
49741 // in the normal allocation?
49743 if (Subtarget.hasAVX512()) {
49745 return std::make_pair(0U, &X86::VK1RegClass);
49747 return std::make_pair(0U, &X86::VK8RegClass);
49748 if (VT == MVT::i16)
49749 return std::make_pair(0U, &X86::VK16RegClass);
49751 if (Subtarget.hasBWI()) {
49752 if (VT == MVT::i32)
49753 return std::make_pair(0U, &X86::VK32RegClass);
49754 if (VT == MVT::i64)
49755 return std::make_pair(0U, &X86::VK64RegClass);
49758 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
49759 if (Subtarget.is64Bit()) {
49760 if (VT == MVT::i8 || VT == MVT::i1)
49761 return std::make_pair(0U, &X86::GR8RegClass);
49762 if (VT == MVT::i16)
49763 return std::make_pair(0U, &X86::GR16RegClass);
49764 if (VT == MVT::i32 || VT == MVT::f32)
49765 return std::make_pair(0U, &X86::GR32RegClass);
49766 if (VT != MVT::f80)
49767 return std::make_pair(0U, &X86::GR64RegClass);
49771 // 32-bit fallthrough
49772 case 'Q': // Q_REGS
49773 if (VT == MVT::i8 || VT == MVT::i1)
49774 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
49775 if (VT == MVT::i16)
49776 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
49777 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
49778 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
49779 if (VT != MVT::f80)
49780 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
49782 case 'r': // GENERAL_REGS
49783 case 'l': // INDEX_REGS
49784 if (VT == MVT::i8 || VT == MVT::i1)
49785 return std::make_pair(0U, &X86::GR8RegClass);
49786 if (VT == MVT::i16)
49787 return std::make_pair(0U, &X86::GR16RegClass);
49788 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
49789 return std::make_pair(0U, &X86::GR32RegClass);
49790 if (VT != MVT::f80)
49791 return std::make_pair(0U, &X86::GR64RegClass);
49793 case 'R': // LEGACY_REGS
49794 if (VT == MVT::i8 || VT == MVT::i1)
49795 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
49796 if (VT == MVT::i16)
49797 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
49798 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
49799 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
49800 if (VT != MVT::f80)
49801 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
49803 case 'f': // FP Stack registers.
49804 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
49805 // value to the correct fpstack register class.
49806 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
49807 return std::make_pair(0U, &X86::RFP32RegClass);
49808 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
49809 return std::make_pair(0U, &X86::RFP64RegClass);
49810 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
49811 return std::make_pair(0U, &X86::RFP80RegClass);
49813 case 'y': // MMX_REGS if MMX allowed.
49814 if (!Subtarget.hasMMX()) break;
49815 return std::make_pair(0U, &X86::VR64RegClass);
49817 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
49818 if (!Subtarget.hasSSE1()) break;
49819 bool VConstraint = (Constraint[0] == 'v');
49821 switch (VT.SimpleTy) {
49823 // Scalar SSE types.
49826 if (VConstraint && Subtarget.hasVLX())
49827 return std::make_pair(0U, &X86::FR32XRegClass);
49828 return std::make_pair(0U, &X86::FR32RegClass);
49831 if (VConstraint && Subtarget.hasVLX())
49832 return std::make_pair(0U, &X86::FR64XRegClass);
49833 return std::make_pair(0U, &X86::FR64RegClass);
49835 if (Subtarget.is64Bit()) {
49836 if (VConstraint && Subtarget.hasVLX())
49837 return std::make_pair(0U, &X86::VR128XRegClass);
49838 return std::make_pair(0U, &X86::VR128RegClass);
49841 // Vector types and fp128.
49849 if (VConstraint && Subtarget.hasVLX())
49850 return std::make_pair(0U, &X86::VR128XRegClass);
49851 return std::make_pair(0U, &X86::VR128RegClass);
49859 if (VConstraint && Subtarget.hasVLX())
49860 return std::make_pair(0U, &X86::VR256XRegClass);
49861 if (Subtarget.hasAVX())
49862 return std::make_pair(0U, &X86::VR256RegClass);
49870 if (!Subtarget.hasAVX512()) break;
49872 return std::make_pair(0U, &X86::VR512RegClass);
49873 return std::make_pair(0U, &X86::VR512_0_15RegClass);
49877 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
49878 switch (Constraint[1]) {
49884 return getRegForInlineAsmConstraint(TRI, "x", VT);
49886 if (!Subtarget.hasMMX()) break;
49887 return std::make_pair(0U, &X86::VR64RegClass);
49889 if (!Subtarget.hasSSE1()) break;
49890 switch (VT.SimpleTy) {
49892 // Scalar SSE types.
49895 return std::make_pair(X86::XMM0, &X86::FR32RegClass);
49898 return std::make_pair(X86::XMM0, &X86::FR64RegClass);
49906 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
49914 if (Subtarget.hasAVX())
49915 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
49923 if (Subtarget.hasAVX512())
49924 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
49929 // This register class doesn't allocate k0 for masked vector operation.
49930 if (Subtarget.hasAVX512()) {
49932 return std::make_pair(0U, &X86::VK1WMRegClass);
49934 return std::make_pair(0U, &X86::VK8WMRegClass);
49935 if (VT == MVT::i16)
49936 return std::make_pair(0U, &X86::VK16WMRegClass);
49938 if (Subtarget.hasBWI()) {
49939 if (VT == MVT::i32)
49940 return std::make_pair(0U, &X86::VK32WMRegClass);
49941 if (VT == MVT::i64)
49942 return std::make_pair(0U, &X86::VK64WMRegClass);
49948 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
49949 return std::make_pair(0U, &X86::GR32RegClass);
49951 // Use the default implementation in TargetLowering to convert the register
49952 // constraint into a member of a register class.
49953 std::pair<Register, const TargetRegisterClass*> Res;
49954 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
49956 // Not found as a standard register?
49958 // Map st(0) -> st(7) -> ST0
49959 if (Constraint.size() == 7 && Constraint[0] == '{' &&
49960 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
49961 Constraint[3] == '(' &&
49962 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
49963 Constraint[5] == ')' && Constraint[6] == '}') {
49964 // st(7) is not allocatable and thus not a member of RFP80. Return
49965 // singleton class in cases where we have a reference to it.
49966 if (Constraint[4] == '7')
49967 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
49968 return std::make_pair(X86::FP0 + Constraint[4] - '0',
49969 &X86::RFP80RegClass);
49972 // GCC allows "st(0)" to be called just plain "st".
49973 if (StringRef("{st}").equals_lower(Constraint))
49974 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
49977 if (StringRef("{flags}").equals_lower(Constraint))
49978 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
49981 if (StringRef("{dirflag}").equals_lower(Constraint))
49982 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
49985 if (StringRef("{fpsr}").equals_lower(Constraint))
49986 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
49991 // Make sure it isn't a register that requires 64-bit mode.
49992 if (!Subtarget.is64Bit() &&
49993 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
49994 TRI->getEncodingValue(Res.first) >= 8) {
49995 // Register requires REX prefix, but we're in 32-bit mode.
49996 return std::make_pair(0, nullptr);
49999 // Make sure it isn't a register that requires AVX512.
50000 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
50001 TRI->getEncodingValue(Res.first) & 0x10) {
50002 // Register requires EVEX prefix.
50003 return std::make_pair(0, nullptr);
50006 // Otherwise, check to see if this is a register class of the wrong value
50007 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
50008 // turn into {ax},{dx}.
50009 // MVT::Other is used to specify clobber names.
50010 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
50011 return Res; // Correct type already, nothing to do.
50013 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
50014 // return "eax". This should even work for things like getting 64bit integer
50015 // registers when given an f64 type.
50016 const TargetRegisterClass *Class = Res.second;
50017 // The generic code will match the first register class that contains the
50018 // given register. Thus, based on the ordering of the tablegened file,
50019 // the "plain" GR classes might not come first.
50020 // Therefore, use a helper method.
50021 if (isGRClass(*Class)) {
50022 unsigned Size = VT.getSizeInBits();
50023 if (Size == 1) Size = 8;
50024 Register DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
50026 bool is64Bit = Subtarget.is64Bit();
50027 const TargetRegisterClass *RC =
50028 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
50029 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
50030 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
50031 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
50033 if (Size == 64 && !is64Bit) {
50034 // Model GCC's behavior here and select a fixed pair of 32-bit
50038 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
50040 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
50042 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
50044 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
50046 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
50048 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
50050 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
50052 return std::make_pair(0, nullptr);
50055 if (RC && RC->contains(DestReg))
50056 return std::make_pair(DestReg, RC);
50059 // No register found/type mismatch.
50060 return std::make_pair(0, nullptr);
50061 } else if (isFRClass(*Class)) {
50062 // Handle references to XMM physical registers that got mapped into the
50063 // wrong class. This can happen with constraints like {xmm0} where the
50064 // target independent register mapper will just pick the first match it can
50065 // find, ignoring the required type.
50067 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
50068 if (VT == MVT::f32 || VT == MVT::i32)
50069 Res.second = &X86::FR32XRegClass;
50070 else if (VT == MVT::f64 || VT == MVT::i64)
50071 Res.second = &X86::FR64XRegClass;
50072 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
50073 Res.second = &X86::VR128XRegClass;
50074 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
50075 Res.second = &X86::VR256XRegClass;
50076 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
50077 Res.second = &X86::VR512RegClass;
50079 // Type mismatch and not a clobber: Return an error;
50081 Res.second = nullptr;
50083 } else if (isVKClass(*Class)) {
50085 Res.second = &X86::VK1RegClass;
50086 else if (VT == MVT::i8)
50087 Res.second = &X86::VK8RegClass;
50088 else if (VT == MVT::i16)
50089 Res.second = &X86::VK16RegClass;
50090 else if (VT == MVT::i32)
50091 Res.second = &X86::VK32RegClass;
50092 else if (VT == MVT::i64)
50093 Res.second = &X86::VK64RegClass;
50095 // Type mismatch and not a clobber: Return an error;
50097 Res.second = nullptr;
50104 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
50105 const AddrMode &AM, Type *Ty,
50106 unsigned AS) const {
50107 // Scaling factors are not free at all.
50108 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
50109 // will take 2 allocations in the out of order engine instead of 1
50110 // for plain addressing mode, i.e. inst (reg1).
50112 // vaddps (%rsi,%rdx), %ymm0, %ymm1
50113 // Requires two allocations (one for the load, one for the computation)
50115 // vaddps (%rsi), %ymm0, %ymm1
50116 // Requires just 1 allocation, i.e., freeing allocations for other operations
50117 // and having less micro operations to execute.
50119 // For some X86 architectures, this is even worse because for instance for
50120 // stores, the complex addressing mode forces the instruction to use the
50121 // "load" ports instead of the dedicated "store" port.
50122 // E.g., on Haswell:
50123 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
50124 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
50125 if (isLegalAddressingMode(DL, AM, Ty, AS))
50126 // Scale represents reg2 * scale, thus account for 1
50127 // as soon as we use a second register.
50128 return AM.Scale != 0;
50132 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
50133 // Integer division on x86 is expensive. However, when aggressively optimizing
50134 // for code size, we prefer to use a div instruction, as it is usually smaller
50135 // than the alternative sequence.
50136 // The exception to this is vector division. Since x86 doesn't have vector
50137 // integer division, leaving the division as-is is a loss even in terms of
50138 // size, because it will have to be scalarized, while the alternative code
50139 // sequence can be performed in vector form.
50140 bool OptSize = Attr.hasFnAttribute(Attribute::MinSize);
50141 return OptSize && !VT.isVector();
50144 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
50145 if (!Subtarget.is64Bit())
50148 // Update IsSplitCSR in X86MachineFunctionInfo.
50149 X86MachineFunctionInfo *AFI =
50150 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
50151 AFI->setIsSplitCSR(true);
50154 void X86TargetLowering::insertCopiesSplitCSR(
50155 MachineBasicBlock *Entry,
50156 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
50157 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
50158 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
50162 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
50163 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
50164 MachineBasicBlock::iterator MBBI = Entry->begin();
50165 for (const MCPhysReg *I = IStart; *I; ++I) {
50166 const TargetRegisterClass *RC = nullptr;
50167 if (X86::GR64RegClass.contains(*I))
50168 RC = &X86::GR64RegClass;
50170 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
50172 Register NewVR = MRI->createVirtualRegister(RC);
50173 // Create copy from CSR to a virtual register.
50174 // FIXME: this currently does not emit CFI pseudo-instructions, it works
50175 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
50176 // nounwind. If we want to generalize this later, we may need to emit
50177 // CFI pseudo-instructions.
50179 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
50180 "Function should be nounwind in insertCopiesSplitCSR!");
50181 Entry->addLiveIn(*I);
50182 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
50185 // Insert the copy-back instructions right before the terminator.
50186 for (auto *Exit : Exits)
50187 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
50188 TII->get(TargetOpcode::COPY), *I)
50193 bool X86TargetLowering::supportSwiftError() const {
50194 return Subtarget.is64Bit();
50197 /// Returns true if stack probing through a function call is requested.
50198 bool X86TargetLowering::hasStackProbeSymbol(MachineFunction &MF) const {
50199 return !getStackProbeSymbolName(MF).empty();
50202 /// Returns true if stack probing through inline assembly is requested.
50203 bool X86TargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
50205 // No inline stack probe for Windows, they have their own mechanism.
50206 if (Subtarget.isOSWindows() ||
50207 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
50210 // If the function specifically requests inline stack probes, emit them.
50211 if (MF.getFunction().hasFnAttribute("probe-stack"))
50212 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
50218 /// Returns the name of the symbol used to emit stack probes or the empty
50219 /// string if not applicable.
50221 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
50222 // Inline Stack probes disable stack probe call
50223 if (hasInlineStackProbe(MF))
50226 // If the function specifically requests stack probes, emit them.
50227 if (MF.getFunction().hasFnAttribute("probe-stack"))
50228 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
50230 // Generally, if we aren't on Windows, the platform ABI does not include
50231 // support for stack probes, so don't emit them.
50232 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
50233 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
50236 // We need a stack probe to conform to the Windows ABI. Choose the right
50238 if (Subtarget.is64Bit())
50239 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
50240 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
50244 X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
50245 // The default stack probe size is 4096 if the function has no stackprobesize
50247 unsigned StackProbeSize = 4096;
50248 const Function &Fn = MF.getFunction();
50249 if (Fn.hasFnAttribute("stack-probe-size"))
50250 Fn.getFnAttribute("stack-probe-size")
50251 .getValueAsString()
50252 .getAsInteger(0, StackProbeSize);
50253 return StackProbeSize;